s5p_mfc.c revision f9f715a95d07d3868bb30aeb20252b6b05d35d8f
1/*
2 * Samsung S5P Multi Format Codec v 5.1
3 *
4 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
5 * Kamil Debski, <k.debski@samsung.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/clk.h>
14#include <linux/delay.h>
15#include <linux/interrupt.h>
16#include <linux/io.h>
17#include <linux/module.h>
18#include <linux/platform_device.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/videodev2.h>
22#include <media/v4l2-event.h>
23#include <linux/workqueue.h>
24#include <media/videobuf2-core.h>
25#include "regs-mfc.h"
26#include "s5p_mfc_ctrl.h"
27#include "s5p_mfc_debug.h"
28#include "s5p_mfc_dec.h"
29#include "s5p_mfc_enc.h"
30#include "s5p_mfc_intr.h"
31#include "s5p_mfc_opr.h"
32#include "s5p_mfc_pm.h"
33#include "s5p_mfc_shm.h"
34
35#define S5P_MFC_NAME		"s5p-mfc"
36#define S5P_MFC_DEC_NAME	"s5p-mfc-dec"
37#define S5P_MFC_ENC_NAME	"s5p-mfc-enc"
38
39int debug;
40module_param(debug, int, S_IRUGO | S_IWUSR);
41MODULE_PARM_DESC(debug, "Debug level - higher value produces more verbose messages");
42
43/* Helper functions for interrupt processing */
44/* Remove from hw execution round robin */
45static void clear_work_bit(struct s5p_mfc_ctx *ctx)
46{
47	struct s5p_mfc_dev *dev = ctx->dev;
48
49	spin_lock(&dev->condlock);
50	clear_bit(ctx->num, &dev->ctx_work_bits);
51	spin_unlock(&dev->condlock);
52}
53
54/* Wake up context wait_queue */
55static void wake_up_ctx(struct s5p_mfc_ctx *ctx, unsigned int reason,
56			unsigned int err)
57{
58	ctx->int_cond = 1;
59	ctx->int_type = reason;
60	ctx->int_err = err;
61	wake_up(&ctx->queue);
62}
63
64/* Wake up device wait_queue */
65static void wake_up_dev(struct s5p_mfc_dev *dev, unsigned int reason,
66			unsigned int err)
67{
68	dev->int_cond = 1;
69	dev->int_type = reason;
70	dev->int_err = err;
71	wake_up(&dev->queue);
72}
73
74static void s5p_mfc_watchdog(unsigned long arg)
75{
76	struct s5p_mfc_dev *dev = (struct s5p_mfc_dev *)arg;
77
78	if (test_bit(0, &dev->hw_lock))
79		atomic_inc(&dev->watchdog_cnt);
80	if (atomic_read(&dev->watchdog_cnt) >= MFC_WATCHDOG_CNT) {
81		/* This means that hw is busy and no interrupts were
82		 * generated by hw for the Nth time of running this
83		 * watchdog timer. This usually means a serious hw
84		 * error. Now it is time to kill all instances and
85		 * reset the MFC. */
86		mfc_err("Time out during waiting for HW\n");
87		queue_work(dev->watchdog_workqueue, &dev->watchdog_work);
88	}
89	dev->watchdog_timer.expires = jiffies +
90					msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
91	add_timer(&dev->watchdog_timer);
92}
93
94static void s5p_mfc_watchdog_worker(struct work_struct *work)
95{
96	struct s5p_mfc_dev *dev;
97	struct s5p_mfc_ctx *ctx;
98	unsigned long flags;
99	int mutex_locked;
100	int i, ret;
101
102	dev = container_of(work, struct s5p_mfc_dev, watchdog_work);
103
104	mfc_err("Driver timeout error handling\n");
105	/* Lock the mutex that protects open and release.
106	 * This is necessary as they may load and unload firmware. */
107	mutex_locked = mutex_trylock(&dev->mfc_mutex);
108	if (!mutex_locked)
109		mfc_err("Error: some instance may be closing/opening\n");
110	spin_lock_irqsave(&dev->irqlock, flags);
111
112	s5p_mfc_clock_off();
113
114	for (i = 0; i < MFC_NUM_CONTEXTS; i++) {
115		ctx = dev->ctx[i];
116		if (!ctx)
117			continue;
118		ctx->state = MFCINST_ERROR;
119		s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
120		s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
121		clear_work_bit(ctx);
122		wake_up_ctx(ctx, S5P_FIMV_R2H_CMD_ERR_RET, 0);
123	}
124	clear_bit(0, &dev->hw_lock);
125	spin_unlock_irqrestore(&dev->irqlock, flags);
126	/* Double check if there is at least one instance running.
127	 * If no instance is in memory than no firmware should be present */
128	if (dev->num_inst > 0) {
129		ret = s5p_mfc_reload_firmware(dev);
130		if (ret) {
131			mfc_err("Failed to reload FW\n");
132			goto unlock;
133		}
134		s5p_mfc_clock_on();
135		ret = s5p_mfc_init_hw(dev);
136		if (ret)
137			mfc_err("Failed to reinit FW\n");
138	}
139unlock:
140	if (mutex_locked)
141		mutex_unlock(&dev->mfc_mutex);
142}
143
144static enum s5p_mfc_node_type s5p_mfc_get_node_type(struct file *file)
145{
146	struct video_device *vdev = video_devdata(file);
147
148	if (!vdev) {
149		mfc_err("failed to get video_device");
150		return MFCNODE_INVALID;
151	}
152	if (vdev->index == 0)
153		return MFCNODE_DECODER;
154	else if (vdev->index == 1)
155		return MFCNODE_ENCODER;
156	return MFCNODE_INVALID;
157}
158
159static void s5p_mfc_clear_int_flags(struct s5p_mfc_dev *dev)
160{
161	mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT);
162	mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD);
163	mfc_write(dev, 0xffff, S5P_FIMV_SI_RTN_CHID);
164}
165
166static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx)
167{
168	struct s5p_mfc_buf *dst_buf;
169
170	ctx->state = MFCINST_FINISHED;
171	ctx->sequence++;
172	while (!list_empty(&ctx->dst_queue)) {
173		dst_buf = list_entry(ctx->dst_queue.next,
174				     struct s5p_mfc_buf, list);
175		mfc_debug(2, "Cleaning up buffer: %d\n",
176					  dst_buf->b->v4l2_buf.index);
177		vb2_set_plane_payload(dst_buf->b, 0, 0);
178		vb2_set_plane_payload(dst_buf->b, 1, 0);
179		list_del(&dst_buf->list);
180		ctx->dst_queue_cnt--;
181		dst_buf->b->v4l2_buf.sequence = (ctx->sequence++);
182
183		if (s5p_mfc_read_shm(ctx, PIC_TIME_TOP) ==
184			s5p_mfc_read_shm(ctx, PIC_TIME_BOT))
185			dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
186		else
187			dst_buf->b->v4l2_buf.field = V4L2_FIELD_INTERLACED;
188
189		ctx->dec_dst_flag &= ~(1 << dst_buf->b->v4l2_buf.index);
190		vb2_buffer_done(dst_buf->b, VB2_BUF_STATE_DONE);
191	}
192}
193
194static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
195{
196	struct s5p_mfc_dev *dev = ctx->dev;
197	struct s5p_mfc_buf  *dst_buf, *src_buf;
198	size_t dec_y_addr = s5p_mfc_get_dec_y_adr();
199	unsigned int frame_type = s5p_mfc_get_frame_type();
200
201	/* Copy timestamp / timecode from decoded src to dst and set
202	   appropraite flags */
203	src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
204	list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
205		if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) {
206			memcpy(&dst_buf->b->v4l2_buf.timecode,
207				&src_buf->b->v4l2_buf.timecode,
208				sizeof(struct v4l2_timecode));
209			memcpy(&dst_buf->b->v4l2_buf.timestamp,
210				&src_buf->b->v4l2_buf.timestamp,
211				sizeof(struct timeval));
212			switch (frame_type) {
213			case S5P_FIMV_DECODE_FRAME_I_FRAME:
214				dst_buf->b->v4l2_buf.flags |=
215						V4L2_BUF_FLAG_KEYFRAME;
216				break;
217			case S5P_FIMV_DECODE_FRAME_P_FRAME:
218				dst_buf->b->v4l2_buf.flags |=
219						V4L2_BUF_FLAG_PFRAME;
220				break;
221			case S5P_FIMV_DECODE_FRAME_B_FRAME:
222				dst_buf->b->v4l2_buf.flags |=
223						V4L2_BUF_FLAG_BFRAME;
224				break;
225			}
226			break;
227		}
228	}
229}
230
231static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err)
232{
233	struct s5p_mfc_dev *dev = ctx->dev;
234	struct s5p_mfc_buf  *dst_buf;
235	size_t dspl_y_addr = s5p_mfc_get_dspl_y_adr();
236	unsigned int frame_type = s5p_mfc_get_frame_type();
237	unsigned int index;
238
239	/* If frame is same as previous then skip and do not dequeue */
240	if (frame_type == S5P_FIMV_DECODE_FRAME_SKIPPED) {
241		if (!ctx->after_packed_pb)
242			ctx->sequence++;
243		ctx->after_packed_pb = 0;
244		return;
245	}
246	ctx->sequence++;
247	/* The MFC returns address of the buffer, now we have to
248	 * check which videobuf does it correspond to */
249	list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
250		/* Check if this is the buffer we're looking for */
251		if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dspl_y_addr) {
252			list_del(&dst_buf->list);
253			ctx->dst_queue_cnt--;
254			dst_buf->b->v4l2_buf.sequence = ctx->sequence;
255			if (s5p_mfc_read_shm(ctx, PIC_TIME_TOP) ==
256				s5p_mfc_read_shm(ctx, PIC_TIME_BOT))
257				dst_buf->b->v4l2_buf.field = V4L2_FIELD_NONE;
258			else
259				dst_buf->b->v4l2_buf.field =
260							V4L2_FIELD_INTERLACED;
261			vb2_set_plane_payload(dst_buf->b, 0, ctx->luma_size);
262			vb2_set_plane_payload(dst_buf->b, 1, ctx->chroma_size);
263			clear_bit(dst_buf->b->v4l2_buf.index,
264							&ctx->dec_dst_flag);
265
266			vb2_buffer_done(dst_buf->b,
267				err ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
268
269			index = dst_buf->b->v4l2_buf.index;
270			break;
271		}
272	}
273}
274
275/* Handle frame decoding interrupt */
276static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx,
277					unsigned int reason, unsigned int err)
278{
279	struct s5p_mfc_dev *dev = ctx->dev;
280	unsigned int dst_frame_status;
281	struct s5p_mfc_buf *src_buf;
282	unsigned long flags;
283	unsigned int res_change;
284
285	unsigned int index;
286
287	dst_frame_status = s5p_mfc_get_dspl_status()
288				& S5P_FIMV_DEC_STATUS_DECODING_STATUS_MASK;
289	res_change = s5p_mfc_get_dspl_status()
290				& S5P_FIMV_DEC_STATUS_RESOLUTION_MASK;
291	mfc_debug(2, "Frame Status: %x\n", dst_frame_status);
292	if (ctx->state == MFCINST_RES_CHANGE_INIT)
293		ctx->state = MFCINST_RES_CHANGE_FLUSH;
294	if (res_change) {
295		ctx->state = MFCINST_RES_CHANGE_INIT;
296		s5p_mfc_clear_int_flags(dev);
297		wake_up_ctx(ctx, reason, err);
298		if (test_and_clear_bit(0, &dev->hw_lock) == 0)
299			BUG();
300		s5p_mfc_clock_off();
301		s5p_mfc_try_run(dev);
302		return;
303	}
304	if (ctx->dpb_flush_flag)
305		ctx->dpb_flush_flag = 0;
306
307	spin_lock_irqsave(&dev->irqlock, flags);
308	/* All frames remaining in the buffer have been extracted  */
309	if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_EMPTY) {
310		if (ctx->state == MFCINST_RES_CHANGE_FLUSH) {
311			s5p_mfc_handle_frame_all_extracted(ctx);
312			ctx->state = MFCINST_RES_CHANGE_END;
313			goto leave_handle_frame;
314		} else {
315			s5p_mfc_handle_frame_all_extracted(ctx);
316		}
317	}
318
319	if (dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY ||
320		dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_ONLY)
321		s5p_mfc_handle_frame_copy_time(ctx);
322
323	/* A frame has been decoded and is in the buffer  */
324	if (dst_frame_status == S5P_FIMV_DEC_STATUS_DISPLAY_ONLY ||
325	    dst_frame_status == S5P_FIMV_DEC_STATUS_DECODING_DISPLAY) {
326		s5p_mfc_handle_frame_new(ctx, err);
327	} else {
328		mfc_debug(2, "No frame decode\n");
329	}
330	/* Mark source buffer as complete */
331	if (dst_frame_status != S5P_FIMV_DEC_STATUS_DISPLAY_ONLY
332		&& !list_empty(&ctx->src_queue)) {
333		src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf,
334								list);
335		ctx->consumed_stream += s5p_mfc_get_consumed_stream();
336		if (ctx->codec_mode != S5P_FIMV_CODEC_H264_DEC &&
337			s5p_mfc_get_frame_type() == S5P_FIMV_DECODE_FRAME_P_FRAME
338					&& ctx->consumed_stream + STUFF_BYTE <
339					src_buf->b->v4l2_planes[0].bytesused) {
340			/* Run MFC again on the same buffer */
341			mfc_debug(2, "Running again the same buffer\n");
342			ctx->after_packed_pb = 1;
343		} else {
344			index = src_buf->b->v4l2_buf.index;
345			mfc_debug(2, "MFC needs next buffer\n");
346			ctx->consumed_stream = 0;
347			list_del(&src_buf->list);
348			ctx->src_queue_cnt--;
349			if (s5p_mfc_err_dec(err) > 0)
350				vb2_buffer_done(src_buf->b, VB2_BUF_STATE_ERROR);
351			else
352				vb2_buffer_done(src_buf->b, VB2_BUF_STATE_DONE);
353		}
354	}
355leave_handle_frame:
356	spin_unlock_irqrestore(&dev->irqlock, flags);
357	if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING)
358				    || ctx->dst_queue_cnt < ctx->dpb_count)
359		clear_work_bit(ctx);
360	s5p_mfc_clear_int_flags(dev);
361	wake_up_ctx(ctx, reason, err);
362	if (test_and_clear_bit(0, &dev->hw_lock) == 0)
363		BUG();
364	s5p_mfc_clock_off();
365	s5p_mfc_try_run(dev);
366}
367
368/* Error handling for interrupt */
369static void s5p_mfc_handle_error(struct s5p_mfc_ctx *ctx,
370				 unsigned int reason, unsigned int err)
371{
372	struct s5p_mfc_dev *dev;
373	unsigned long flags;
374
375	/* If no context is available then all necessary
376	 * processing has been done. */
377	if (ctx == NULL)
378		return;
379
380	dev = ctx->dev;
381	mfc_err("Interrupt Error: %08x\n", err);
382	s5p_mfc_clear_int_flags(dev);
383	wake_up_dev(dev, reason, err);
384
385	/* Error recovery is dependent on the state of context */
386	switch (ctx->state) {
387	case MFCINST_INIT:
388		/* This error had to happen while acquireing instance */
389	case MFCINST_GOT_INST:
390		/* This error had to happen while parsing the header */
391	case MFCINST_HEAD_PARSED:
392		/* This error had to happen while setting dst buffers */
393	case MFCINST_RETURN_INST:
394		/* This error had to happen while releasing instance */
395		clear_work_bit(ctx);
396		wake_up_ctx(ctx, reason, err);
397		if (test_and_clear_bit(0, &dev->hw_lock) == 0)
398			BUG();
399		s5p_mfc_clock_off();
400		ctx->state = MFCINST_ERROR;
401		break;
402	case MFCINST_FINISHING:
403	case MFCINST_FINISHED:
404	case MFCINST_RUNNING:
405		/* It is higly probable that an error occured
406		 * while decoding a frame */
407		clear_work_bit(ctx);
408		ctx->state = MFCINST_ERROR;
409		/* Mark all dst buffers as having an error */
410		spin_lock_irqsave(&dev->irqlock, flags);
411		s5p_mfc_cleanup_queue(&ctx->dst_queue, &ctx->vq_dst);
412		/* Mark all src buffers as having an error */
413		s5p_mfc_cleanup_queue(&ctx->src_queue, &ctx->vq_src);
414		spin_unlock_irqrestore(&dev->irqlock, flags);
415		if (test_and_clear_bit(0, &dev->hw_lock) == 0)
416			BUG();
417		s5p_mfc_clock_off();
418		break;
419	default:
420		mfc_err("Encountered an error interrupt which had not been handled\n");
421		break;
422	}
423	return;
424}
425
426/* Header parsing interrupt handling */
427static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
428				 unsigned int reason, unsigned int err)
429{
430	struct s5p_mfc_dev *dev;
431	unsigned int guard_width, guard_height;
432
433	if (ctx == NULL)
434		return;
435	dev = ctx->dev;
436	if (ctx->c_ops->post_seq_start) {
437		if (ctx->c_ops->post_seq_start(ctx))
438			mfc_err("post_seq_start() failed\n");
439	} else {
440		ctx->img_width = s5p_mfc_get_img_width();
441		ctx->img_height = s5p_mfc_get_img_height();
442
443		ctx->buf_width = ALIGN(ctx->img_width,
444						S5P_FIMV_NV12MT_HALIGN);
445		ctx->buf_height = ALIGN(ctx->img_height,
446						S5P_FIMV_NV12MT_VALIGN);
447		mfc_debug(2, "SEQ Done: Movie dimensions %dx%d, "
448			"buffer dimensions: %dx%d\n", ctx->img_width,
449				ctx->img_height, ctx->buf_width,
450						ctx->buf_height);
451		if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC) {
452			ctx->luma_size = ALIGN(ctx->buf_width *
453				ctx->buf_height, S5P_FIMV_DEC_BUF_ALIGN);
454			ctx->chroma_size = ALIGN(ctx->buf_width *
455					 ALIGN((ctx->img_height >> 1),
456					       S5P_FIMV_NV12MT_VALIGN),
457					       S5P_FIMV_DEC_BUF_ALIGN);
458			ctx->mv_size = ALIGN(ctx->buf_width *
459					ALIGN((ctx->buf_height >> 2),
460					S5P_FIMV_NV12MT_VALIGN),
461					S5P_FIMV_DEC_BUF_ALIGN);
462		} else {
463			guard_width = ALIGN(ctx->img_width + 24,
464					S5P_FIMV_NV12MT_HALIGN);
465			guard_height = ALIGN(ctx->img_height + 16,
466						S5P_FIMV_NV12MT_VALIGN);
467			ctx->luma_size = ALIGN(guard_width *
468				guard_height, S5P_FIMV_DEC_BUF_ALIGN);
469			guard_width = ALIGN(ctx->img_width + 16,
470						S5P_FIMV_NV12MT_HALIGN);
471			guard_height = ALIGN((ctx->img_height >> 1) + 4,
472						S5P_FIMV_NV12MT_VALIGN);
473			ctx->chroma_size = ALIGN(guard_width *
474				guard_height, S5P_FIMV_DEC_BUF_ALIGN);
475			ctx->mv_size = 0;
476		}
477		ctx->dpb_count = s5p_mfc_get_dpb_count();
478		if (ctx->img_width == 0 || ctx->img_height == 0)
479			ctx->state = MFCINST_ERROR;
480		else
481			ctx->state = MFCINST_HEAD_PARSED;
482	}
483	s5p_mfc_clear_int_flags(dev);
484	clear_work_bit(ctx);
485	if (test_and_clear_bit(0, &dev->hw_lock) == 0)
486		BUG();
487	s5p_mfc_clock_off();
488	s5p_mfc_try_run(dev);
489	wake_up_ctx(ctx, reason, err);
490}
491
492/* Header parsing interrupt handling */
493static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx,
494				 unsigned int reason, unsigned int err)
495{
496	struct s5p_mfc_buf *src_buf;
497	struct s5p_mfc_dev *dev;
498	unsigned long flags;
499
500	if (ctx == NULL)
501		return;
502	dev = ctx->dev;
503	s5p_mfc_clear_int_flags(dev);
504	ctx->int_type = reason;
505	ctx->int_err = err;
506	ctx->int_cond = 1;
507	spin_lock(&dev->condlock);
508	clear_bit(ctx->num, &dev->ctx_work_bits);
509	spin_unlock(&dev->condlock);
510	if (err == 0) {
511		ctx->state = MFCINST_RUNNING;
512		if (!ctx->dpb_flush_flag) {
513			spin_lock_irqsave(&dev->irqlock, flags);
514			if (!list_empty(&ctx->src_queue)) {
515				src_buf = list_entry(ctx->src_queue.next,
516					     struct s5p_mfc_buf, list);
517				list_del(&src_buf->list);
518				ctx->src_queue_cnt--;
519				vb2_buffer_done(src_buf->b,
520						VB2_BUF_STATE_DONE);
521			}
522			spin_unlock_irqrestore(&dev->irqlock, flags);
523		} else {
524			ctx->dpb_flush_flag = 0;
525		}
526		if (test_and_clear_bit(0, &dev->hw_lock) == 0)
527			BUG();
528
529		s5p_mfc_clock_off();
530
531		wake_up(&ctx->queue);
532		s5p_mfc_try_run(dev);
533	} else {
534		if (test_and_clear_bit(0, &dev->hw_lock) == 0)
535			BUG();
536
537		s5p_mfc_clock_off();
538
539		wake_up(&ctx->queue);
540	}
541}
542
543static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx,
544				 unsigned int reason, unsigned int err)
545{
546	struct s5p_mfc_dev *dev = ctx->dev;
547	struct s5p_mfc_buf *mb_entry;
548
549	mfc_debug(2, "Stream completed");
550
551	s5p_mfc_clear_int_flags(dev);
552	ctx->int_type = reason;
553	ctx->int_err = err;
554	ctx->state = MFCINST_FINISHED;
555
556	spin_lock(&dev->irqlock);
557	if (!list_empty(&ctx->dst_queue)) {
558		mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf,
559									list);
560		list_del(&mb_entry->list);
561		ctx->dst_queue_cnt--;
562		vb2_set_plane_payload(mb_entry->b, 0, 0);
563		vb2_buffer_done(mb_entry->b, VB2_BUF_STATE_DONE);
564	}
565	spin_unlock(&dev->irqlock);
566
567	clear_work_bit(ctx);
568
569	if (test_and_clear_bit(0, &dev->hw_lock) == 0)
570		WARN_ON(1);
571
572	s5p_mfc_clock_off();
573	wake_up(&ctx->queue);
574	s5p_mfc_try_run(dev);
575}
576
577/* Interrupt processing */
578static irqreturn_t s5p_mfc_irq(int irq, void *priv)
579{
580	struct s5p_mfc_dev *dev = priv;
581	struct s5p_mfc_ctx *ctx;
582	unsigned int reason;
583	unsigned int err;
584
585	mfc_debug_enter();
586	/* Reset the timeout watchdog */
587	atomic_set(&dev->watchdog_cnt, 0);
588	ctx = dev->ctx[dev->curr_ctx];
589	/* Get the reason of interrupt and the error code */
590	reason = s5p_mfc_get_int_reason();
591	err = s5p_mfc_get_int_err();
592	mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err);
593	switch (reason) {
594	case S5P_FIMV_R2H_CMD_ERR_RET:
595		/* An error has occured */
596		if (ctx->state == MFCINST_RUNNING &&
597			s5p_mfc_err_dec(err) >= S5P_FIMV_ERR_WARNINGS_START)
598			s5p_mfc_handle_frame(ctx, reason, err);
599		else
600			s5p_mfc_handle_error(ctx, reason, err);
601		clear_bit(0, &dev->enter_suspend);
602		break;
603
604	case S5P_FIMV_R2H_CMD_SLICE_DONE_RET:
605	case S5P_FIMV_R2H_CMD_FRAME_DONE_RET:
606		if (ctx->c_ops->post_frame_start) {
607			if (ctx->c_ops->post_frame_start(ctx))
608				mfc_err("post_frame_start() failed\n");
609			s5p_mfc_clear_int_flags(dev);
610			wake_up_ctx(ctx, reason, err);
611			if (test_and_clear_bit(0, &dev->hw_lock) == 0)
612				BUG();
613			s5p_mfc_clock_off();
614			s5p_mfc_try_run(dev);
615		} else {
616			s5p_mfc_handle_frame(ctx, reason, err);
617		}
618		break;
619
620	case S5P_FIMV_R2H_CMD_SEQ_DONE_RET:
621		s5p_mfc_handle_seq_done(ctx, reason, err);
622		break;
623
624	case S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET:
625		ctx->inst_no = s5p_mfc_get_inst_no();
626		ctx->state = MFCINST_GOT_INST;
627		clear_work_bit(ctx);
628		wake_up(&ctx->queue);
629		goto irq_cleanup_hw;
630
631	case S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET:
632		clear_work_bit(ctx);
633		ctx->state = MFCINST_FREE;
634		wake_up(&ctx->queue);
635		goto irq_cleanup_hw;
636
637	case S5P_FIMV_R2H_CMD_SYS_INIT_RET:
638	case S5P_FIMV_R2H_CMD_FW_STATUS_RET:
639	case S5P_FIMV_R2H_CMD_SLEEP_RET:
640	case S5P_FIMV_R2H_CMD_WAKEUP_RET:
641		if (ctx)
642			clear_work_bit(ctx);
643		s5p_mfc_clear_int_flags(dev);
644		wake_up_dev(dev, reason, err);
645		clear_bit(0, &dev->hw_lock);
646		clear_bit(0, &dev->enter_suspend);
647		break;
648
649	case S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET:
650		s5p_mfc_handle_init_buffers(ctx, reason, err);
651		break;
652
653	case S5P_FIMV_R2H_CMD_ENC_COMPLETE_RET:
654		s5p_mfc_handle_stream_complete(ctx, reason, err);
655		break;
656
657	default:
658		mfc_debug(2, "Unknown int reason\n");
659		s5p_mfc_clear_int_flags(dev);
660	}
661	mfc_debug_leave();
662	return IRQ_HANDLED;
663irq_cleanup_hw:
664	s5p_mfc_clear_int_flags(dev);
665	ctx->int_type = reason;
666	ctx->int_err = err;
667	ctx->int_cond = 1;
668	if (test_and_clear_bit(0, &dev->hw_lock) == 0)
669		mfc_err("Failed to unlock hw\n");
670
671	s5p_mfc_clock_off();
672
673	s5p_mfc_try_run(dev);
674	mfc_debug(2, "Exit via irq_cleanup_hw\n");
675	return IRQ_HANDLED;
676}
677
678/* Open an MFC node */
679static int s5p_mfc_open(struct file *file)
680{
681	struct s5p_mfc_dev *dev = video_drvdata(file);
682	struct s5p_mfc_ctx *ctx = NULL;
683	struct vb2_queue *q;
684	unsigned long flags;
685	int ret = 0;
686
687	mfc_debug_enter();
688	if (mutex_lock_interruptible(&dev->mfc_mutex))
689		return -ERESTARTSYS;
690	dev->num_inst++;	/* It is guarded by mfc_mutex in vfd */
691	/* Allocate memory for context */
692	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
693	if (!ctx) {
694		mfc_err("Not enough memory\n");
695		ret = -ENOMEM;
696		goto err_alloc;
697	}
698	v4l2_fh_init(&ctx->fh, video_devdata(file));
699	file->private_data = &ctx->fh;
700	v4l2_fh_add(&ctx->fh);
701	ctx->dev = dev;
702	INIT_LIST_HEAD(&ctx->src_queue);
703	INIT_LIST_HEAD(&ctx->dst_queue);
704	ctx->src_queue_cnt = 0;
705	ctx->dst_queue_cnt = 0;
706	/* Get context number */
707	ctx->num = 0;
708	while (dev->ctx[ctx->num]) {
709		ctx->num++;
710		if (ctx->num >= MFC_NUM_CONTEXTS) {
711			mfc_err("Too many open contexts\n");
712			ret = -EBUSY;
713			goto err_no_ctx;
714		}
715	}
716	/* Mark context as idle */
717	spin_lock_irqsave(&dev->condlock, flags);
718	clear_bit(ctx->num, &dev->ctx_work_bits);
719	spin_unlock_irqrestore(&dev->condlock, flags);
720	dev->ctx[ctx->num] = ctx;
721	if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
722		ctx->type = MFCINST_DECODER;
723		ctx->c_ops = get_dec_codec_ops();
724		/* Setup ctrl handler */
725		ret = s5p_mfc_dec_ctrls_setup(ctx);
726		if (ret) {
727			mfc_err("Failed to setup mfc controls\n");
728			goto err_ctrls_setup;
729		}
730	} else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
731		ctx->type = MFCINST_ENCODER;
732		ctx->c_ops = get_enc_codec_ops();
733		/* only for encoder */
734		INIT_LIST_HEAD(&ctx->ref_queue);
735		ctx->ref_queue_cnt = 0;
736		/* Setup ctrl handler */
737		ret = s5p_mfc_enc_ctrls_setup(ctx);
738		if (ret) {
739			mfc_err("Failed to setup mfc controls\n");
740			goto err_ctrls_setup;
741		}
742	} else {
743		ret = -ENOENT;
744		goto err_bad_node;
745	}
746	ctx->fh.ctrl_handler = &ctx->ctrl_handler;
747	ctx->inst_no = -1;
748	/* Load firmware if this is the first instance */
749	if (dev->num_inst == 1) {
750		dev->watchdog_timer.expires = jiffies +
751					msecs_to_jiffies(MFC_WATCHDOG_INTERVAL);
752		add_timer(&dev->watchdog_timer);
753		ret = s5p_mfc_power_on();
754		if (ret < 0) {
755			mfc_err("power on failed\n");
756			goto err_pwr_enable;
757		}
758		s5p_mfc_clock_on();
759		ret = s5p_mfc_alloc_and_load_firmware(dev);
760		if (ret)
761			goto err_alloc_fw;
762		/* Init the FW */
763		ret = s5p_mfc_init_hw(dev);
764		if (ret)
765			goto err_init_hw;
766		s5p_mfc_clock_off();
767	}
768	/* Init videobuf2 queue for CAPTURE */
769	q = &ctx->vq_dst;
770	q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
771	q->drv_priv = &ctx->fh;
772	if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
773		q->io_modes = VB2_MMAP;
774		q->ops = get_dec_queue_ops();
775	} else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
776		q->io_modes = VB2_MMAP | VB2_USERPTR;
777		q->ops = get_enc_queue_ops();
778	} else {
779		ret = -ENOENT;
780		goto err_queue_init;
781	}
782	q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
783	ret = vb2_queue_init(q);
784	if (ret) {
785		mfc_err("Failed to initialize videobuf2 queue(capture)\n");
786		goto err_queue_init;
787	}
788	/* Init videobuf2 queue for OUTPUT */
789	q = &ctx->vq_src;
790	q->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
791	q->io_modes = VB2_MMAP;
792	q->drv_priv = &ctx->fh;
793	if (s5p_mfc_get_node_type(file) == MFCNODE_DECODER) {
794		q->io_modes = VB2_MMAP;
795		q->ops = get_dec_queue_ops();
796	} else if (s5p_mfc_get_node_type(file) == MFCNODE_ENCODER) {
797		q->io_modes = VB2_MMAP | VB2_USERPTR;
798		q->ops = get_enc_queue_ops();
799	} else {
800		ret = -ENOENT;
801		goto err_queue_init;
802	}
803	q->mem_ops = (struct vb2_mem_ops *)&vb2_dma_contig_memops;
804	ret = vb2_queue_init(q);
805	if (ret) {
806		mfc_err("Failed to initialize videobuf2 queue(output)\n");
807		goto err_queue_init;
808	}
809	init_waitqueue_head(&ctx->queue);
810	mutex_unlock(&dev->mfc_mutex);
811	mfc_debug_leave();
812	return ret;
813	/* Deinit when failure occured */
814err_queue_init:
815err_init_hw:
816	s5p_mfc_release_firmware(dev);
817err_alloc_fw:
818	dev->ctx[ctx->num] = NULL;
819	del_timer_sync(&dev->watchdog_timer);
820	s5p_mfc_clock_off();
821err_pwr_enable:
822	if (dev->num_inst == 1) {
823		if (s5p_mfc_power_off() < 0)
824			mfc_err("power off failed\n");
825		s5p_mfc_release_firmware(dev);
826	}
827err_ctrls_setup:
828	s5p_mfc_dec_ctrls_delete(ctx);
829err_bad_node:
830err_no_ctx:
831	v4l2_fh_del(&ctx->fh);
832	v4l2_fh_exit(&ctx->fh);
833	kfree(ctx);
834err_alloc:
835	dev->num_inst--;
836	mutex_unlock(&dev->mfc_mutex);
837	mfc_debug_leave();
838	return ret;
839}
840
841/* Release MFC context */
842static int s5p_mfc_release(struct file *file)
843{
844	struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
845	struct s5p_mfc_dev *dev = ctx->dev;
846	unsigned long flags;
847
848	mfc_debug_enter();
849	mutex_lock(&dev->mfc_mutex);
850	s5p_mfc_clock_on();
851	vb2_queue_release(&ctx->vq_src);
852	vb2_queue_release(&ctx->vq_dst);
853	/* Mark context as idle */
854	spin_lock_irqsave(&dev->condlock, flags);
855	clear_bit(ctx->num, &dev->ctx_work_bits);
856	spin_unlock_irqrestore(&dev->condlock, flags);
857	/* If instance was initialised then
858	 * return instance and free reosurces */
859	if (ctx->inst_no != MFC_NO_INSTANCE_SET) {
860		mfc_debug(2, "Has to free instance\n");
861		ctx->state = MFCINST_RETURN_INST;
862		spin_lock_irqsave(&dev->condlock, flags);
863		set_bit(ctx->num, &dev->ctx_work_bits);
864		spin_unlock_irqrestore(&dev->condlock, flags);
865		s5p_mfc_clean_ctx_int_flags(ctx);
866		s5p_mfc_try_run(dev);
867		/* Wait until instance is returned or timeout occured */
868		if (s5p_mfc_wait_for_done_ctx
869		    (ctx, S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET, 0)) {
870			s5p_mfc_clock_off();
871			mfc_err("Err returning instance\n");
872		}
873		mfc_debug(2, "After free instance\n");
874		/* Free resources */
875		s5p_mfc_release_codec_buffers(ctx);
876		s5p_mfc_release_instance_buffer(ctx);
877		if (ctx->type == MFCINST_DECODER)
878			s5p_mfc_release_dec_desc_buffer(ctx);
879
880		ctx->inst_no = MFC_NO_INSTANCE_SET;
881	}
882	/* hardware locking scheme */
883	if (dev->curr_ctx == ctx->num)
884		clear_bit(0, &dev->hw_lock);
885	dev->num_inst--;
886	if (dev->num_inst == 0) {
887		mfc_debug(2, "Last instance - release firmware\n");
888		/* reset <-> F/W release */
889		s5p_mfc_reset(dev);
890		s5p_mfc_release_firmware(dev);
891		del_timer_sync(&dev->watchdog_timer);
892		if (s5p_mfc_power_off() < 0)
893			mfc_err("Power off failed\n");
894	}
895	mfc_debug(2, "Shutting down clock\n");
896	s5p_mfc_clock_off();
897	dev->ctx[ctx->num] = NULL;
898	s5p_mfc_dec_ctrls_delete(ctx);
899	v4l2_fh_del(&ctx->fh);
900	v4l2_fh_exit(&ctx->fh);
901	kfree(ctx);
902	mfc_debug_leave();
903	mutex_unlock(&dev->mfc_mutex);
904	return 0;
905}
906
907/* Poll */
908static unsigned int s5p_mfc_poll(struct file *file,
909				 struct poll_table_struct *wait)
910{
911	struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
912	struct s5p_mfc_dev *dev = ctx->dev;
913	struct vb2_queue *src_q, *dst_q;
914	struct vb2_buffer *src_vb = NULL, *dst_vb = NULL;
915	unsigned int rc = 0;
916	unsigned long flags;
917
918	mutex_lock(&dev->mfc_mutex);
919	src_q = &ctx->vq_src;
920	dst_q = &ctx->vq_dst;
921	/*
922	 * There has to be at least one buffer queued on each queued_list, which
923	 * means either in driver already or waiting for driver to claim it
924	 * and start processing.
925	 */
926	if ((!src_q->streaming || list_empty(&src_q->queued_list))
927		&& (!dst_q->streaming || list_empty(&dst_q->queued_list))) {
928		rc = POLLERR;
929		goto end;
930	}
931	mutex_unlock(&dev->mfc_mutex);
932	poll_wait(file, &ctx->fh.wait, wait);
933	poll_wait(file, &src_q->done_wq, wait);
934	poll_wait(file, &dst_q->done_wq, wait);
935	mutex_lock(&dev->mfc_mutex);
936	if (v4l2_event_pending(&ctx->fh))
937		rc |= POLLPRI;
938	spin_lock_irqsave(&src_q->done_lock, flags);
939	if (!list_empty(&src_q->done_list))
940		src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer,
941								done_entry);
942	if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE
943				|| src_vb->state == VB2_BUF_STATE_ERROR))
944		rc |= POLLOUT | POLLWRNORM;
945	spin_unlock_irqrestore(&src_q->done_lock, flags);
946	spin_lock_irqsave(&dst_q->done_lock, flags);
947	if (!list_empty(&dst_q->done_list))
948		dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer,
949								done_entry);
950	if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE
951				|| dst_vb->state == VB2_BUF_STATE_ERROR))
952		rc |= POLLIN | POLLRDNORM;
953	spin_unlock_irqrestore(&dst_q->done_lock, flags);
954end:
955	mutex_unlock(&dev->mfc_mutex);
956	return rc;
957}
958
959/* Mmap */
960static int s5p_mfc_mmap(struct file *file, struct vm_area_struct *vma)
961{
962	struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data);
963	struct s5p_mfc_dev *dev = ctx->dev;
964	unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
965	int ret;
966
967	if (mutex_lock_interruptible(&dev->mfc_mutex))
968		return -ERESTARTSYS;
969	if (offset < DST_QUEUE_OFF_BASE) {
970		mfc_debug(2, "mmaping source\n");
971		ret = vb2_mmap(&ctx->vq_src, vma);
972	} else {		/* capture */
973		mfc_debug(2, "mmaping destination\n");
974		vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT);
975		ret = vb2_mmap(&ctx->vq_dst, vma);
976	}
977	mutex_unlock(&dev->mfc_mutex);
978	return ret;
979}
980
981/* v4l2 ops */
982static const struct v4l2_file_operations s5p_mfc_fops = {
983	.owner = THIS_MODULE,
984	.open = s5p_mfc_open,
985	.release = s5p_mfc_release,
986	.poll = s5p_mfc_poll,
987	.unlocked_ioctl = video_ioctl2,
988	.mmap = s5p_mfc_mmap,
989};
990
991static int match_child(struct device *dev, void *data)
992{
993	if (!dev_name(dev))
994		return 0;
995	return !strcmp(dev_name(dev), (char *)data);
996}
997
998/* MFC probe function */
999static int s5p_mfc_probe(struct platform_device *pdev)
1000{
1001	struct s5p_mfc_dev *dev;
1002	struct video_device *vfd;
1003	struct resource *res;
1004	int ret;
1005
1006	pr_debug("%s++\n", __func__);
1007	dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
1008	if (!dev) {
1009		dev_err(&pdev->dev, "Not enough memory for MFC device\n");
1010		return -ENOMEM;
1011	}
1012
1013	spin_lock_init(&dev->irqlock);
1014	spin_lock_init(&dev->condlock);
1015	dev->plat_dev = pdev;
1016	if (!dev->plat_dev) {
1017		dev_err(&pdev->dev, "No platform data specified\n");
1018		return -ENODEV;
1019	}
1020
1021	ret = s5p_mfc_init_pm(dev);
1022	if (ret < 0) {
1023		dev_err(&pdev->dev, "failed to get mfc clock source\n");
1024		return ret;
1025	}
1026
1027	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1028
1029	dev->regs_base = devm_request_and_ioremap(&pdev->dev, res);
1030	if (dev->regs_base == NULL) {
1031		dev_err(&pdev->dev, "Failed to obtain io memory\n");
1032		return -ENOENT;
1033	}
1034
1035	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1036	if (res == NULL) {
1037		dev_err(&pdev->dev, "failed to get irq resource\n");
1038		ret = -ENOENT;
1039		goto err_res;
1040	}
1041	dev->irq = res->start;
1042	ret = devm_request_irq(&pdev->dev, dev->irq, s5p_mfc_irq,
1043					IRQF_DISABLED, pdev->name, dev);
1044	if (ret) {
1045		dev_err(&pdev->dev, "Failed to install irq (%d)\n", ret);
1046		goto err_res;
1047	}
1048
1049	dev->mem_dev_l = device_find_child(&dev->plat_dev->dev, "s5p-mfc-l",
1050					   match_child);
1051	if (!dev->mem_dev_l) {
1052		mfc_err("Mem child (L) device get failed\n");
1053		ret = -ENODEV;
1054		goto err_res;
1055	}
1056	dev->mem_dev_r = device_find_child(&dev->plat_dev->dev, "s5p-mfc-r",
1057					   match_child);
1058	if (!dev->mem_dev_r) {
1059		mfc_err("Mem child (R) device get failed\n");
1060		ret = -ENODEV;
1061		goto err_res;
1062	}
1063
1064	dev->alloc_ctx[0] = vb2_dma_contig_init_ctx(dev->mem_dev_l);
1065	if (IS_ERR_OR_NULL(dev->alloc_ctx[0])) {
1066		ret = PTR_ERR(dev->alloc_ctx[0]);
1067		goto err_res;
1068	}
1069	dev->alloc_ctx[1] = vb2_dma_contig_init_ctx(dev->mem_dev_r);
1070	if (IS_ERR_OR_NULL(dev->alloc_ctx[1])) {
1071		ret = PTR_ERR(dev->alloc_ctx[1]);
1072		goto err_mem_init_ctx_1;
1073	}
1074
1075	mutex_init(&dev->mfc_mutex);
1076
1077	ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
1078	if (ret)
1079		goto err_v4l2_dev_reg;
1080	init_waitqueue_head(&dev->queue);
1081
1082	/* decoder */
1083	vfd = video_device_alloc();
1084	if (!vfd) {
1085		v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
1086		ret = -ENOMEM;
1087		goto err_dec_alloc;
1088	}
1089	vfd->fops	= &s5p_mfc_fops,
1090	vfd->ioctl_ops	= get_dec_v4l2_ioctl_ops();
1091	vfd->release	= video_device_release,
1092	vfd->lock	= &dev->mfc_mutex;
1093	vfd->v4l2_dev	= &dev->v4l2_dev;
1094	vfd->vfl_dir	= VFL_DIR_M2M;
1095	snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_DEC_NAME);
1096	dev->vfd_dec	= vfd;
1097	ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
1098	if (ret) {
1099		v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
1100		video_device_release(vfd);
1101		goto err_dec_reg;
1102	}
1103	v4l2_info(&dev->v4l2_dev,
1104		  "decoder registered as /dev/video%d\n", vfd->num);
1105	video_set_drvdata(vfd, dev);
1106
1107	/* encoder */
1108	vfd = video_device_alloc();
1109	if (!vfd) {
1110		v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
1111		ret = -ENOMEM;
1112		goto err_enc_alloc;
1113	}
1114	vfd->fops	= &s5p_mfc_fops,
1115	vfd->ioctl_ops	= get_enc_v4l2_ioctl_ops();
1116	vfd->release	= video_device_release,
1117	vfd->lock	= &dev->mfc_mutex;
1118	vfd->v4l2_dev	= &dev->v4l2_dev;
1119	snprintf(vfd->name, sizeof(vfd->name), "%s", S5P_MFC_ENC_NAME);
1120	dev->vfd_enc	= vfd;
1121	ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
1122	if (ret) {
1123		v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
1124		video_device_release(vfd);
1125		goto err_enc_reg;
1126	}
1127	v4l2_info(&dev->v4l2_dev,
1128		  "encoder registered as /dev/video%d\n", vfd->num);
1129	video_set_drvdata(vfd, dev);
1130	platform_set_drvdata(pdev, dev);
1131
1132	dev->hw_lock = 0;
1133	dev->watchdog_workqueue = create_singlethread_workqueue(S5P_MFC_NAME);
1134	INIT_WORK(&dev->watchdog_work, s5p_mfc_watchdog_worker);
1135	atomic_set(&dev->watchdog_cnt, 0);
1136	init_timer(&dev->watchdog_timer);
1137	dev->watchdog_timer.data = (unsigned long)dev;
1138	dev->watchdog_timer.function = s5p_mfc_watchdog;
1139
1140	pr_debug("%s--\n", __func__);
1141	return 0;
1142
1143/* Deinit MFC if probe had failed */
1144err_enc_reg:
1145	video_device_release(dev->vfd_enc);
1146err_enc_alloc:
1147	video_unregister_device(dev->vfd_dec);
1148err_dec_reg:
1149	video_device_release(dev->vfd_dec);
1150err_dec_alloc:
1151	v4l2_device_unregister(&dev->v4l2_dev);
1152err_v4l2_dev_reg:
1153	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
1154err_mem_init_ctx_1:
1155	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
1156err_res:
1157	s5p_mfc_final_pm(dev);
1158
1159	pr_debug("%s-- with error\n", __func__);
1160	return ret;
1161
1162}
1163
1164/* Remove the driver */
1165static int __devexit s5p_mfc_remove(struct platform_device *pdev)
1166{
1167	struct s5p_mfc_dev *dev = platform_get_drvdata(pdev);
1168
1169	v4l2_info(&dev->v4l2_dev, "Removing %s\n", pdev->name);
1170
1171	del_timer_sync(&dev->watchdog_timer);
1172	flush_workqueue(dev->watchdog_workqueue);
1173	destroy_workqueue(dev->watchdog_workqueue);
1174
1175	video_unregister_device(dev->vfd_enc);
1176	video_unregister_device(dev->vfd_dec);
1177	v4l2_device_unregister(&dev->v4l2_dev);
1178	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[0]);
1179	vb2_dma_contig_cleanup_ctx(dev->alloc_ctx[1]);
1180
1181	s5p_mfc_final_pm(dev);
1182	return 0;
1183}
1184
1185#ifdef CONFIG_PM_SLEEP
1186
1187static int s5p_mfc_suspend(struct device *dev)
1188{
1189	struct platform_device *pdev = to_platform_device(dev);
1190	struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1191	int ret;
1192
1193	if (m_dev->num_inst == 0)
1194		return 0;
1195	return s5p_mfc_sleep(m_dev);
1196	if (test_and_set_bit(0, &m_dev->enter_suspend) != 0) {
1197		mfc_err("Error: going to suspend for a second time\n");
1198		return -EIO;
1199	}
1200
1201	/* Check if we're processing then wait if it necessary. */
1202	while (test_and_set_bit(0, &m_dev->hw_lock) != 0) {
1203		/* Try and lock the HW */
1204		/* Wait on the interrupt waitqueue */
1205		ret = wait_event_interruptible_timeout(m_dev->queue,
1206			m_dev->int_cond || m_dev->ctx[m_dev->curr_ctx]->int_cond,
1207			msecs_to_jiffies(MFC_INT_TIMEOUT));
1208
1209		if (ret == 0) {
1210			mfc_err("Waiting for hardware to finish timed out\n");
1211			return -EIO;
1212		}
1213	}
1214	return 0;
1215}
1216
1217static int s5p_mfc_resume(struct device *dev)
1218{
1219	struct platform_device *pdev = to_platform_device(dev);
1220	struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1221
1222	if (m_dev->num_inst == 0)
1223		return 0;
1224	return s5p_mfc_wakeup(m_dev);
1225}
1226#endif
1227
1228#ifdef CONFIG_PM_RUNTIME
1229static int s5p_mfc_runtime_suspend(struct device *dev)
1230{
1231	struct platform_device *pdev = to_platform_device(dev);
1232	struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1233
1234	atomic_set(&m_dev->pm.power, 0);
1235	return 0;
1236}
1237
1238static int s5p_mfc_runtime_resume(struct device *dev)
1239{
1240	struct platform_device *pdev = to_platform_device(dev);
1241	struct s5p_mfc_dev *m_dev = platform_get_drvdata(pdev);
1242	int pre_power;
1243
1244	if (!m_dev->alloc_ctx)
1245		return 0;
1246	pre_power = atomic_read(&m_dev->pm.power);
1247	atomic_set(&m_dev->pm.power, 1);
1248	return 0;
1249}
1250#endif
1251
1252/* Power management */
1253static const struct dev_pm_ops s5p_mfc_pm_ops = {
1254	SET_SYSTEM_SLEEP_PM_OPS(s5p_mfc_suspend, s5p_mfc_resume)
1255	SET_RUNTIME_PM_OPS(s5p_mfc_runtime_suspend, s5p_mfc_runtime_resume,
1256			   NULL)
1257};
1258
1259static struct platform_driver s5p_mfc_driver = {
1260	.probe	= s5p_mfc_probe,
1261	.remove	= __devexit_p(s5p_mfc_remove),
1262	.driver	= {
1263		.name	= S5P_MFC_NAME,
1264		.owner	= THIS_MODULE,
1265		.pm	= &s5p_mfc_pm_ops
1266	},
1267};
1268
1269module_platform_driver(s5p_mfc_driver);
1270
1271MODULE_LICENSE("GPL");
1272MODULE_AUTHOR("Kamil Debski <k.debski@samsung.com>");
1273MODULE_DESCRIPTION("Samsung S5P Multi Format Codec V4L2 driver");
1274
1275