1/* drivers/video/msm_fb/mdp.c
2 *
3 * MSM MDP Interface (used by framebuffer core)
4 *
5 * Copyright (C) 2007 QUALCOMM Incorporated
6 * Copyright (C) 2007 Google Incorporated
7 *
8 * This software is licensed under the terms of the GNU General Public
9 * License version 2, as published by the Free Software Foundation, and
10 * may be copied, distributed, and modified under those terms.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/kernel.h>
19#include <linux/fb.h>
20#include <linux/msm_mdp.h>
21#include <linux/interrupt.h>
22#include <linux/wait.h>
23#include <linux/clk.h>
24#include <linux/file.h>
25#include <linux/major.h>
26#include <linux/slab.h>
27
28#include <mach/msm_iomap.h>
29#include <mach/msm_fb.h>
30#include <linux/platform_device.h>
31#include <linux/export.h>
32
33#include "mdp_hw.h"
34
35struct class *mdp_class;
36
37#define MDP_CMD_DEBUG_ACCESS_BASE (0x10000)
38
39static uint16_t mdp_default_ccs[] = {
40	0x254, 0x000, 0x331, 0x254, 0xF38, 0xE61, 0x254, 0x409, 0x000,
41	0x010, 0x080, 0x080
42};
43
44static DECLARE_WAIT_QUEUE_HEAD(mdp_dma2_waitqueue);
45static DECLARE_WAIT_QUEUE_HEAD(mdp_ppp_waitqueue);
46static struct msmfb_callback *dma_callback;
47static struct clk *clk;
48static unsigned int mdp_irq_mask;
49static DEFINE_SPINLOCK(mdp_lock);
50DEFINE_MUTEX(mdp_mutex);
51
52static int enable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
53{
54	unsigned long irq_flags;
55	int ret = 0;
56
57	BUG_ON(!mask);
58
59	spin_lock_irqsave(&mdp_lock, irq_flags);
60	/* if the mask bits are already set return an error, this interrupt
61	 * is already enabled */
62	if (mdp_irq_mask & mask) {
63		printk(KERN_ERR "mdp irq already on already on %x %x\n",
64		       mdp_irq_mask, mask);
65		ret = -1;
66	}
67	/* if the mdp irq is not already enabled enable it */
68	if (!mdp_irq_mask) {
69		if (clk)
70			clk_enable(clk);
71		enable_irq(mdp->irq);
72	}
73
74	/* update the irq mask to reflect the fact that the interrupt is
75	 * enabled */
76	mdp_irq_mask |= mask;
77	spin_unlock_irqrestore(&mdp_lock, irq_flags);
78	return ret;
79}
80
81static int locked_disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
82{
83	/* this interrupt is already disabled! */
84	if (!(mdp_irq_mask & mask)) {
85		printk(KERN_ERR "mdp irq already off %x %x\n",
86		       mdp_irq_mask, mask);
87		return -1;
88	}
89	/* update the irq mask to reflect the fact that the interrupt is
90	 * disabled */
91	mdp_irq_mask &= ~(mask);
92	/* if no one is waiting on the interrupt, disable it */
93	if (!mdp_irq_mask) {
94		disable_irq_nosync(mdp->irq);
95		if (clk)
96			clk_disable(clk);
97	}
98	return 0;
99}
100
101static int disable_mdp_irq(struct mdp_info *mdp, uint32_t mask)
102{
103	unsigned long irq_flags;
104	int ret;
105
106	spin_lock_irqsave(&mdp_lock, irq_flags);
107	ret = locked_disable_mdp_irq(mdp, mask);
108	spin_unlock_irqrestore(&mdp_lock, irq_flags);
109	return ret;
110}
111
112static irqreturn_t mdp_isr(int irq, void *data)
113{
114	uint32_t status;
115	unsigned long irq_flags;
116	struct mdp_info *mdp = data;
117
118	spin_lock_irqsave(&mdp_lock, irq_flags);
119
120	status = mdp_readl(mdp, MDP_INTR_STATUS);
121	mdp_writel(mdp, status, MDP_INTR_CLEAR);
122
123	status &= mdp_irq_mask;
124	if (status & DL0_DMA2_TERM_DONE) {
125		if (dma_callback) {
126			dma_callback->func(dma_callback);
127			dma_callback = NULL;
128		}
129		wake_up(&mdp_dma2_waitqueue);
130	}
131
132	if (status & DL0_ROI_DONE)
133		wake_up(&mdp_ppp_waitqueue);
134
135	if (status)
136		locked_disable_mdp_irq(mdp, status);
137
138	spin_unlock_irqrestore(&mdp_lock, irq_flags);
139	return IRQ_HANDLED;
140}
141
142static uint32_t mdp_check_mask(uint32_t mask)
143{
144	uint32_t ret;
145	unsigned long irq_flags;
146
147	spin_lock_irqsave(&mdp_lock, irq_flags);
148	ret = mdp_irq_mask & mask;
149	spin_unlock_irqrestore(&mdp_lock, irq_flags);
150	return ret;
151}
152
153static int mdp_wait(struct mdp_info *mdp, uint32_t mask, wait_queue_head_t *wq)
154{
155	int ret = 0;
156	unsigned long irq_flags;
157
158	wait_event_timeout(*wq, !mdp_check_mask(mask), HZ);
159
160	spin_lock_irqsave(&mdp_lock, irq_flags);
161	if (mdp_irq_mask & mask) {
162		locked_disable_mdp_irq(mdp, mask);
163		printk(KERN_WARNING "timeout waiting for mdp to complete %x\n",
164		       mask);
165		ret = -ETIMEDOUT;
166	}
167	spin_unlock_irqrestore(&mdp_lock, irq_flags);
168
169	return ret;
170}
171
172void mdp_dma_wait(struct mdp_device *mdp_dev)
173{
174#define MDP_MAX_TIMEOUTS 20
175	static int timeout_count;
176	struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
177
178	if (mdp_wait(mdp, DL0_DMA2_TERM_DONE, &mdp_dma2_waitqueue) == -ETIMEDOUT)
179		timeout_count++;
180	else
181		timeout_count = 0;
182
183	if (timeout_count > MDP_MAX_TIMEOUTS) {
184		printk(KERN_ERR "mdp: dma failed %d times, somethings wrong!\n",
185		       MDP_MAX_TIMEOUTS);
186		BUG();
187	}
188}
189
190static int mdp_ppp_wait(struct mdp_info *mdp)
191{
192	return mdp_wait(mdp, DL0_ROI_DONE, &mdp_ppp_waitqueue);
193}
194
195void mdp_dma_to_mddi(struct mdp_info *mdp, uint32_t addr, uint32_t stride,
196		     uint32_t width, uint32_t height, uint32_t x, uint32_t y,
197		     struct msmfb_callback *callback)
198{
199	uint32_t dma2_cfg;
200	uint16_t ld_param = 0; /* 0=PRIM, 1=SECD, 2=EXT */
201
202	if (enable_mdp_irq(mdp, DL0_DMA2_TERM_DONE)) {
203		printk(KERN_ERR "mdp_dma_to_mddi: busy\n");
204		return;
205	}
206
207	dma_callback = callback;
208
209	dma2_cfg = DMA_PACK_TIGHT |
210		DMA_PACK_ALIGN_LSB |
211		DMA_PACK_PATTERN_RGB |
212		DMA_OUT_SEL_AHB |
213		DMA_IBUF_NONCONTIGUOUS;
214
215	dma2_cfg |= DMA_IBUF_FORMAT_RGB565;
216
217	dma2_cfg |= DMA_OUT_SEL_MDDI;
218
219	dma2_cfg |= DMA_MDDI_DMAOUT_LCD_SEL_PRIMARY;
220
221	dma2_cfg |= DMA_DITHER_EN;
222
223	/* setup size, address, and stride */
224	mdp_writel(mdp, (height << 16) | (width),
225		   MDP_CMD_DEBUG_ACCESS_BASE + 0x0184);
226	mdp_writel(mdp, addr, MDP_CMD_DEBUG_ACCESS_BASE + 0x0188);
227	mdp_writel(mdp, stride, MDP_CMD_DEBUG_ACCESS_BASE + 0x018C);
228
229	/* 666 18BPP */
230	dma2_cfg |= DMA_DSTC0G_6BITS | DMA_DSTC1B_6BITS | DMA_DSTC2R_6BITS;
231
232	/* set y & x offset and MDDI transaction parameters */
233	mdp_writel(mdp, (y << 16) | (x), MDP_CMD_DEBUG_ACCESS_BASE + 0x0194);
234	mdp_writel(mdp, ld_param, MDP_CMD_DEBUG_ACCESS_BASE + 0x01a0);
235	mdp_writel(mdp, (MDDI_VDO_PACKET_DESC << 16) | MDDI_VDO_PACKET_PRIM,
236		   MDP_CMD_DEBUG_ACCESS_BASE + 0x01a4);
237
238	mdp_writel(mdp, dma2_cfg, MDP_CMD_DEBUG_ACCESS_BASE + 0x0180);
239
240	/* start DMA2 */
241	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0044);
242}
243
244void mdp_dma(struct mdp_device *mdp_dev, uint32_t addr, uint32_t stride,
245	     uint32_t width, uint32_t height, uint32_t x, uint32_t y,
246	     struct msmfb_callback *callback, int interface)
247{
248	struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
249
250	if (interface == MSM_MDDI_PMDH_INTERFACE) {
251		mdp_dma_to_mddi(mdp, addr, stride, width, height, x, y,
252				callback);
253	}
254}
255
256int get_img(struct mdp_img *img, struct fb_info *info,
257	    unsigned long *start, unsigned long *len,
258	    struct file **filep)
259{
260	int put_needed, ret = 0;
261	struct file *file;
262
263	file = fget_light(img->memory_id, &put_needed);
264	if (file == NULL)
265		return -1;
266
267	if (MAJOR(file->f_dentry->d_inode->i_rdev) == FB_MAJOR) {
268		*start = info->fix.smem_start;
269		*len = info->fix.smem_len;
270	} else
271		ret = -1;
272	fput_light(file, put_needed);
273
274	return ret;
275}
276
277void put_img(struct file *src_file, struct file *dst_file)
278{
279}
280
281int mdp_blit(struct mdp_device *mdp_dev, struct fb_info *fb,
282	     struct mdp_blit_req *req)
283{
284	int ret;
285	unsigned long src_start = 0, src_len = 0, dst_start = 0, dst_len = 0;
286	struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
287	struct file *src_file = 0, *dst_file = 0;
288
289	/* WORKAROUND FOR HARDWARE BUG IN BG TILE FETCH */
290	if (unlikely(req->src_rect.h == 0 ||
291		     req->src_rect.w == 0)) {
292		printk(KERN_ERR "mpd_ppp: src img of zero size!\n");
293		return -EINVAL;
294	}
295	if (unlikely(req->dst_rect.h == 0 ||
296		     req->dst_rect.w == 0))
297		return -EINVAL;
298
299	/* do this first so that if this fails, the caller can always
300	 * safely call put_img */
301	if (unlikely(get_img(&req->src, fb, &src_start, &src_len, &src_file))) {
302		printk(KERN_ERR "mpd_ppp: could not retrieve src image from "
303				"memory\n");
304		return -EINVAL;
305	}
306
307	if (unlikely(get_img(&req->dst, fb, &dst_start, &dst_len, &dst_file))) {
308		printk(KERN_ERR "mpd_ppp: could not retrieve dst image from "
309				"memory\n");
310		return -EINVAL;
311	}
312	mutex_lock(&mdp_mutex);
313
314	/* transp_masking unimplemented */
315	req->transp_mask = MDP_TRANSP_NOP;
316	if (unlikely((req->transp_mask != MDP_TRANSP_NOP ||
317		      req->alpha != MDP_ALPHA_NOP ||
318		      HAS_ALPHA(req->src.format)) &&
319		     (req->flags & MDP_ROT_90 &&
320		      req->dst_rect.w <= 16 && req->dst_rect.h >= 16))) {
321		int i;
322		unsigned int tiles = req->dst_rect.h / 16;
323		unsigned int remainder = req->dst_rect.h % 16;
324		req->src_rect.w = 16*req->src_rect.w / req->dst_rect.h;
325		req->dst_rect.h = 16;
326		for (i = 0; i < tiles; i++) {
327			enable_mdp_irq(mdp, DL0_ROI_DONE);
328			ret = mdp_ppp_blit(mdp, req, src_file, src_start,
329					   src_len, dst_file, dst_start,
330					   dst_len);
331			if (ret)
332				goto err_bad_blit;
333			ret = mdp_ppp_wait(mdp);
334			if (ret)
335				goto err_wait_failed;
336			req->dst_rect.y += 16;
337			req->src_rect.x += req->src_rect.w;
338		}
339		if (!remainder)
340			goto end;
341		req->src_rect.w = remainder*req->src_rect.w / req->dst_rect.h;
342		req->dst_rect.h = remainder;
343	}
344	enable_mdp_irq(mdp, DL0_ROI_DONE);
345	ret = mdp_ppp_blit(mdp, req, src_file, src_start, src_len, dst_file,
346			   dst_start,
347			   dst_len);
348	if (ret)
349		goto err_bad_blit;
350	ret = mdp_ppp_wait(mdp);
351	if (ret)
352		goto err_wait_failed;
353end:
354	put_img(src_file, dst_file);
355	mutex_unlock(&mdp_mutex);
356	return 0;
357err_bad_blit:
358	disable_mdp_irq(mdp, DL0_ROI_DONE);
359err_wait_failed:
360	put_img(src_file, dst_file);
361	mutex_unlock(&mdp_mutex);
362	return ret;
363}
364
365void mdp_set_grp_disp(struct mdp_device *mdp_dev, unsigned disp_id)
366{
367	struct mdp_info *mdp = container_of(mdp_dev, struct mdp_info, mdp_dev);
368
369	disp_id &= 0xf;
370	mdp_writel(mdp, disp_id, MDP_FULL_BYPASS_WORD43);
371}
372
373int register_mdp_client(struct class_interface *cint)
374{
375	if (!mdp_class) {
376		pr_err("mdp: no mdp_class when registering mdp client\n");
377		return -ENODEV;
378	}
379	cint->class = mdp_class;
380	return class_interface_register(cint);
381}
382
383#include "mdp_csc_table.h"
384#include "mdp_scale_tables.h"
385
386int mdp_probe(struct platform_device *pdev)
387{
388	struct resource *resource;
389	int ret;
390	int n;
391	struct mdp_info *mdp;
392
393	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
394	if (!resource) {
395		pr_err("mdp: can not get mdp mem resource!\n");
396		return -ENOMEM;
397	}
398
399	mdp = kzalloc(sizeof(struct mdp_info), GFP_KERNEL);
400	if (!mdp)
401		return -ENOMEM;
402
403	mdp->irq = platform_get_irq(pdev, 0);
404	if (mdp->irq < 0) {
405		pr_err("mdp: can not get mdp irq\n");
406		ret = mdp->irq;
407		goto error_get_irq;
408	}
409
410	mdp->base = ioremap(resource->start, resource_size(resource));
411	if (mdp->base == 0) {
412		printk(KERN_ERR "msmfb: cannot allocate mdp regs!\n");
413		ret = -ENOMEM;
414		goto error_ioremap;
415	}
416
417	mdp->mdp_dev.dma = mdp_dma;
418	mdp->mdp_dev.dma_wait = mdp_dma_wait;
419	mdp->mdp_dev.blit = mdp_blit;
420	mdp->mdp_dev.set_grp_disp = mdp_set_grp_disp;
421
422	clk = clk_get(&pdev->dev, "mdp_clk");
423	if (IS_ERR(clk)) {
424		printk(KERN_INFO "mdp: failed to get mdp clk");
425		ret = PTR_ERR(clk);
426		goto error_get_clk;
427	}
428
429	ret = request_irq(mdp->irq, mdp_isr, 0, "msm_mdp", mdp);
430	if (ret)
431		goto error_request_irq;
432	disable_irq(mdp->irq);
433	mdp_irq_mask = 0;
434
435	/* debug interface write access */
436	mdp_writel(mdp, 1, 0x60);
437
438	mdp_writel(mdp, MDP_ANY_INTR_MASK, MDP_INTR_ENABLE);
439	mdp_writel(mdp, 1, MDP_EBI2_PORTMAP_MODE);
440
441	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01f8);
442	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01fc);
443
444	for (n = 0; n < ARRAY_SIZE(csc_table); n++)
445		mdp_writel(mdp, csc_table[n].val, csc_table[n].reg);
446
447	/* clear up unused fg/main registers */
448	/* comp.plane 2&3 ystride */
449	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0120);
450
451	/* unpacked pattern */
452	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x012c);
453	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0130);
454	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0134);
455	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0158);
456	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x015c);
457	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0160);
458	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0170);
459	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0174);
460	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x017c);
461
462	/* comp.plane 2 & 3 */
463	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0114);
464	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x0118);
465
466	/* clear unused bg registers */
467	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01c8);
468	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01d0);
469	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01dc);
470	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e0);
471	mdp_writel(mdp, 0, MDP_CMD_DEBUG_ACCESS_BASE + 0x01e4);
472
473	for (n = 0; n < ARRAY_SIZE(mdp_upscale_table); n++)
474		mdp_writel(mdp, mdp_upscale_table[n].val,
475		       mdp_upscale_table[n].reg);
476
477	for (n = 0; n < 9; n++)
478		mdp_writel(mdp, mdp_default_ccs[n], 0x40440 + 4 * n);
479	mdp_writel(mdp, mdp_default_ccs[9], 0x40500 + 4 * 0);
480	mdp_writel(mdp, mdp_default_ccs[10], 0x40500 + 4 * 0);
481	mdp_writel(mdp, mdp_default_ccs[11], 0x40500 + 4 * 0);
482
483	/* register mdp device */
484	mdp->mdp_dev.dev.parent = &pdev->dev;
485	mdp->mdp_dev.dev.class = mdp_class;
486	dev_set_name(&mdp->mdp_dev.dev, "mdp%d", pdev->id);
487
488	/* if you can remove the platform device you'd have to implement
489	 * this:
490	mdp_dev.release = mdp_class; */
491
492	ret = device_register(&mdp->mdp_dev.dev);
493	if (ret)
494		goto error_device_register;
495	return 0;
496
497error_device_register:
498	free_irq(mdp->irq, mdp);
499error_request_irq:
500error_get_clk:
501	iounmap(mdp->base);
502error_get_irq:
503error_ioremap:
504	kfree(mdp);
505	return ret;
506}
507
508static struct platform_driver msm_mdp_driver = {
509	.probe = mdp_probe,
510	.driver = {.name = "msm_mdp"},
511};
512
513static int __init mdp_init(void)
514{
515	mdp_class = class_create(THIS_MODULE, "msm_mdp");
516	if (IS_ERR(mdp_class)) {
517		printk(KERN_ERR "Error creating mdp class\n");
518		return PTR_ERR(mdp_class);
519	}
520	return platform_driver_register(&msm_mdp_driver);
521}
522
523subsys_initcall(mdp_init);
524