1/*
2 * Linux driver the digital TV devices equipped with B2C2 FlexcopII(b)/III
3 * flexcop-pci.c - covers the PCI part including DMA transfers
4 * see flexcop.c for copyright information
5 */
6
7#define FC_LOG_PREFIX "flexcop-pci"
8#include "flexcop-common.h"
9
10static int enable_pid_filtering = 1;
11module_param(enable_pid_filtering, int, 0444);
12MODULE_PARM_DESC(enable_pid_filtering,
13	"enable hardware pid filtering: supported values: 0 (fullts), 1");
14
15static int irq_chk_intv = 100;
16module_param(irq_chk_intv, int, 0644);
17MODULE_PARM_DESC(irq_chk_intv, "set the interval for IRQ streaming watchdog.");
18
19#ifdef CONFIG_DVB_B2C2_FLEXCOP_DEBUG
20#define dprintk(level,args...) \
21	do { if ((debug & level)) printk(args); } while (0)
22#define DEBSTATUS ""
23#else
24#define dprintk(level,args...)
25#define DEBSTATUS " (debugging is not enabled)"
26#endif
27
28#define deb_info(args...) dprintk(0x01, args)
29#define deb_reg(args...) dprintk(0x02, args)
30#define deb_ts(args...) dprintk(0x04, args)
31#define deb_irq(args...) dprintk(0x08, args)
32#define deb_chk(args...) dprintk(0x10, args)
33
34static int debug;
35module_param(debug, int, 0644);
36MODULE_PARM_DESC(debug,
37	"set debug level (1=info,2=regs,4=TS,8=irqdma,16=check (|-able))."
38	DEBSTATUS);
39
40#define DRIVER_VERSION "0.1"
41#define DRIVER_NAME "flexcop-pci"
42#define DRIVER_AUTHOR "Patrick Boettcher <patrick.boettcher@desy.de>"
43
44struct flexcop_pci {
45	struct pci_dev *pdev;
46
47#define FC_PCI_INIT     0x01
48#define FC_PCI_DMA_INIT 0x02
49	int init_state;
50
51	void __iomem *io_mem;
52	u32 irq;
53	/* buffersize (at least for DMA1, need to be % 188 == 0,
54	 * this logic is required */
55#define FC_DEFAULT_DMA1_BUFSIZE (1280 * 188)
56#define FC_DEFAULT_DMA2_BUFSIZE (10 * 188)
57	struct flexcop_dma dma[2];
58
59	int active_dma1_addr; /* 0 = addr0 of dma1; 1 = addr1 of dma1 */
60	u32 last_dma1_cur_pos;
61	/* position of the pointer last time the timer/packet irq occurred */
62	int count;
63	int count_prev;
64	int stream_problem;
65
66	spinlock_t irq_lock;
67	unsigned long last_irq;
68
69	struct delayed_work irq_check_work;
70	struct flexcop_device *fc_dev;
71};
72
73static int lastwreg, lastwval, lastrreg, lastrval;
74
75static flexcop_ibi_value flexcop_pci_read_ibi_reg(struct flexcop_device *fc,
76		flexcop_ibi_register r)
77{
78	struct flexcop_pci *fc_pci = fc->bus_specific;
79	flexcop_ibi_value v;
80	v.raw = readl(fc_pci->io_mem + r);
81
82	if (lastrreg != r || lastrval != v.raw) {
83		lastrreg = r; lastrval = v.raw;
84		deb_reg("new rd: %3x: %08x\n", r, v.raw);
85	}
86
87	return v;
88}
89
90static int flexcop_pci_write_ibi_reg(struct flexcop_device *fc,
91		flexcop_ibi_register r, flexcop_ibi_value v)
92{
93	struct flexcop_pci *fc_pci = fc->bus_specific;
94
95	if (lastwreg != r || lastwval != v.raw) {
96		lastwreg = r; lastwval = v.raw;
97		deb_reg("new wr: %3x: %08x\n", r, v.raw);
98	}
99
100	writel(v.raw, fc_pci->io_mem + r);
101	return 0;
102}
103
104static void flexcop_pci_irq_check_work(struct work_struct *work)
105{
106	struct flexcop_pci *fc_pci =
107		container_of(work, struct flexcop_pci, irq_check_work.work);
108	struct flexcop_device *fc = fc_pci->fc_dev;
109
110	if (fc->feedcount) {
111
112		if (fc_pci->count == fc_pci->count_prev) {
113			deb_chk("no IRQ since the last check\n");
114			if (fc_pci->stream_problem++ == 3) {
115				struct dvb_demux_feed *feed;
116				deb_info("flexcop-pci: stream problem, resetting pid filter\n");
117
118				spin_lock_irq(&fc->demux.lock);
119				list_for_each_entry(feed, &fc->demux.feed_list,
120						list_head) {
121					flexcop_pid_feed_control(fc, feed, 0);
122				}
123
124				list_for_each_entry(feed, &fc->demux.feed_list,
125						list_head) {
126					flexcop_pid_feed_control(fc, feed, 1);
127				}
128				spin_unlock_irq(&fc->demux.lock);
129
130				fc_pci->stream_problem = 0;
131			}
132		} else {
133			fc_pci->stream_problem = 0;
134			fc_pci->count_prev = fc_pci->count;
135		}
136	}
137
138	schedule_delayed_work(&fc_pci->irq_check_work,
139			msecs_to_jiffies(irq_chk_intv < 100 ? 100 : irq_chk_intv));
140}
141
142/* When PID filtering is turned on, we use the timer IRQ, because small amounts
143 * of data need to be passed to the user space instantly as well. When PID
144 * filtering is turned off, we use the page-change-IRQ */
145static irqreturn_t flexcop_pci_isr(int irq, void *dev_id)
146{
147	struct flexcop_pci *fc_pci = dev_id;
148	struct flexcop_device *fc = fc_pci->fc_dev;
149	unsigned long flags;
150	flexcop_ibi_value v;
151	irqreturn_t ret = IRQ_HANDLED;
152
153	spin_lock_irqsave(&fc_pci->irq_lock, flags);
154	v = fc->read_ibi_reg(fc, irq_20c);
155
156	/* errors */
157	if (v.irq_20c.Data_receiver_error)
158		deb_chk("data receiver error\n");
159	if (v.irq_20c.Continuity_error_flag)
160		deb_chk("Contunuity error flag is set\n");
161	if (v.irq_20c.LLC_SNAP_FLAG_set)
162		deb_chk("LLC_SNAP_FLAG_set is set\n");
163	if (v.irq_20c.Transport_Error)
164		deb_chk("Transport error\n");
165
166	if ((fc_pci->count % 1000) == 0)
167		deb_chk("%d valid irq took place so far\n", fc_pci->count);
168
169	if (v.irq_20c.DMA1_IRQ_Status == 1) {
170		if (fc_pci->active_dma1_addr == 0)
171			flexcop_pass_dmx_packets(fc_pci->fc_dev,
172					fc_pci->dma[0].cpu_addr0,
173					fc_pci->dma[0].size / 188);
174		else
175			flexcop_pass_dmx_packets(fc_pci->fc_dev,
176					fc_pci->dma[0].cpu_addr1,
177					fc_pci->dma[0].size / 188);
178
179		deb_irq("page change to page: %d\n",!fc_pci->active_dma1_addr);
180		fc_pci->active_dma1_addr = !fc_pci->active_dma1_addr;
181		/* for the timer IRQ we only can use buffer dmx feeding, because we don't have
182		 * complete TS packets when reading from the DMA memory */
183	} else if (v.irq_20c.DMA1_Timer_Status == 1) {
184		dma_addr_t cur_addr =
185			fc->read_ibi_reg(fc,dma1_008).dma_0x8.dma_cur_addr << 2;
186		u32 cur_pos = cur_addr - fc_pci->dma[0].dma_addr0;
187
188		deb_irq("%u irq: %08x cur_addr: %llx: cur_pos: %08x, "
189			"last_cur_pos: %08x ",
190				jiffies_to_usecs(jiffies - fc_pci->last_irq),
191				v.raw, (unsigned long long)cur_addr, cur_pos,
192				fc_pci->last_dma1_cur_pos);
193		fc_pci->last_irq = jiffies;
194
195		/* buffer end was reached, restarted from the beginning
196		 * pass the data from last_cur_pos to the buffer end to the demux
197		 */
198		if (cur_pos < fc_pci->last_dma1_cur_pos) {
199			deb_irq(" end was reached: passing %d bytes ",
200				(fc_pci->dma[0].size*2 - 1) -
201				fc_pci->last_dma1_cur_pos);
202			flexcop_pass_dmx_data(fc_pci->fc_dev,
203				fc_pci->dma[0].cpu_addr0 +
204					fc_pci->last_dma1_cur_pos,
205				(fc_pci->dma[0].size*2) -
206					fc_pci->last_dma1_cur_pos);
207			fc_pci->last_dma1_cur_pos = 0;
208		}
209
210		if (cur_pos > fc_pci->last_dma1_cur_pos) {
211			deb_irq(" passing %d bytes ",
212				cur_pos - fc_pci->last_dma1_cur_pos);
213			flexcop_pass_dmx_data(fc_pci->fc_dev,
214				fc_pci->dma[0].cpu_addr0 +
215					fc_pci->last_dma1_cur_pos,
216				cur_pos - fc_pci->last_dma1_cur_pos);
217		}
218		deb_irq("\n");
219
220		fc_pci->last_dma1_cur_pos = cur_pos;
221		fc_pci->count++;
222	} else {
223		deb_irq("isr for flexcop called, "
224			"apparently without reason (%08x)\n", v.raw);
225		ret = IRQ_NONE;
226	}
227
228	spin_unlock_irqrestore(&fc_pci->irq_lock, flags);
229	return ret;
230}
231
232static int flexcop_pci_stream_control(struct flexcop_device *fc, int onoff)
233{
234	struct flexcop_pci *fc_pci = fc->bus_specific;
235	if (onoff) {
236		flexcop_dma_config(fc, &fc_pci->dma[0], FC_DMA_1);
237		flexcop_dma_config(fc, &fc_pci->dma[1], FC_DMA_2);
238		flexcop_dma_config_timer(fc, FC_DMA_1, 0);
239		flexcop_dma_xfer_control(fc, FC_DMA_1,
240				FC_DMA_SUBADDR_0 | FC_DMA_SUBADDR_1, 1);
241		deb_irq("DMA xfer enabled\n");
242
243		fc_pci->last_dma1_cur_pos = 0;
244		flexcop_dma_control_timer_irq(fc, FC_DMA_1, 1);
245		deb_irq("IRQ enabled\n");
246		fc_pci->count_prev = fc_pci->count;
247	} else {
248		flexcop_dma_control_timer_irq(fc, FC_DMA_1, 0);
249		deb_irq("IRQ disabled\n");
250
251		flexcop_dma_xfer_control(fc, FC_DMA_1,
252			 FC_DMA_SUBADDR_0 | FC_DMA_SUBADDR_1, 0);
253		deb_irq("DMA xfer disabled\n");
254	}
255	return 0;
256}
257
258static int flexcop_pci_dma_init(struct flexcop_pci *fc_pci)
259{
260	int ret;
261	ret = flexcop_dma_allocate(fc_pci->pdev, &fc_pci->dma[0],
262			FC_DEFAULT_DMA1_BUFSIZE);
263	if (ret != 0)
264		return ret;
265
266	ret = flexcop_dma_allocate(fc_pci->pdev, &fc_pci->dma[1],
267			FC_DEFAULT_DMA2_BUFSIZE);
268	if (ret != 0) {
269		flexcop_dma_free(&fc_pci->dma[0]);
270		return ret;
271	}
272
273	flexcop_sram_set_dest(fc_pci->fc_dev, FC_SRAM_DEST_MEDIA |
274			FC_SRAM_DEST_NET, FC_SRAM_DEST_TARGET_DMA1);
275	flexcop_sram_set_dest(fc_pci->fc_dev, FC_SRAM_DEST_CAO |
276			FC_SRAM_DEST_CAI, FC_SRAM_DEST_TARGET_DMA2);
277	fc_pci->init_state |= FC_PCI_DMA_INIT;
278	return ret;
279}
280
281static void flexcop_pci_dma_exit(struct flexcop_pci *fc_pci)
282{
283	if (fc_pci->init_state & FC_PCI_DMA_INIT) {
284		flexcop_dma_free(&fc_pci->dma[0]);
285		flexcop_dma_free(&fc_pci->dma[1]);
286	}
287	fc_pci->init_state &= ~FC_PCI_DMA_INIT;
288}
289
290static int flexcop_pci_init(struct flexcop_pci *fc_pci)
291{
292	int ret;
293
294	info("card revision %x", fc_pci->pdev->revision);
295
296	if ((ret = pci_enable_device(fc_pci->pdev)) != 0)
297		return ret;
298	pci_set_master(fc_pci->pdev);
299
300	if ((ret = pci_request_regions(fc_pci->pdev, DRIVER_NAME)) != 0)
301		goto err_pci_disable_device;
302
303	fc_pci->io_mem = pci_iomap(fc_pci->pdev, 0, 0x800);
304
305	if (!fc_pci->io_mem) {
306		err("cannot map io memory\n");
307		ret = -EIO;
308		goto err_pci_release_regions;
309	}
310
311	pci_set_drvdata(fc_pci->pdev, fc_pci);
312	spin_lock_init(&fc_pci->irq_lock);
313	if ((ret = request_irq(fc_pci->pdev->irq, flexcop_pci_isr,
314					IRQF_SHARED, DRIVER_NAME, fc_pci)) != 0)
315		goto err_pci_iounmap;
316
317	fc_pci->init_state |= FC_PCI_INIT;
318	return ret;
319
320err_pci_iounmap:
321	pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
322err_pci_release_regions:
323	pci_release_regions(fc_pci->pdev);
324err_pci_disable_device:
325	pci_disable_device(fc_pci->pdev);
326	return ret;
327}
328
329static void flexcop_pci_exit(struct flexcop_pci *fc_pci)
330{
331	if (fc_pci->init_state & FC_PCI_INIT) {
332		free_irq(fc_pci->pdev->irq, fc_pci);
333		pci_iounmap(fc_pci->pdev, fc_pci->io_mem);
334		pci_release_regions(fc_pci->pdev);
335		pci_disable_device(fc_pci->pdev);
336	}
337	fc_pci->init_state &= ~FC_PCI_INIT;
338}
339
340static int flexcop_pci_probe(struct pci_dev *pdev,
341		const struct pci_device_id *ent)
342{
343	struct flexcop_device *fc;
344	struct flexcop_pci *fc_pci;
345	int ret = -ENOMEM;
346
347	if ((fc = flexcop_device_kmalloc(sizeof(struct flexcop_pci))) == NULL) {
348		err("out of memory\n");
349		return -ENOMEM;
350	}
351
352	/* general flexcop init */
353	fc_pci = fc->bus_specific;
354	fc_pci->fc_dev = fc;
355
356	fc->read_ibi_reg = flexcop_pci_read_ibi_reg;
357	fc->write_ibi_reg = flexcop_pci_write_ibi_reg;
358	fc->i2c_request = flexcop_i2c_request;
359	fc->get_mac_addr = flexcop_eeprom_check_mac_addr;
360	fc->stream_control = flexcop_pci_stream_control;
361
362	if (enable_pid_filtering)
363		info("will use the HW PID filter.");
364	else
365		info("will pass the complete TS to the demuxer.");
366
367	fc->pid_filtering = enable_pid_filtering;
368	fc->bus_type = FC_PCI;
369	fc->dev = &pdev->dev;
370	fc->owner = THIS_MODULE;
371
372	/* bus specific part */
373	fc_pci->pdev = pdev;
374	if ((ret = flexcop_pci_init(fc_pci)) != 0)
375		goto err_kfree;
376
377	/* init flexcop */
378	if ((ret = flexcop_device_initialize(fc)) != 0)
379		goto err_pci_exit;
380
381	/* init dma */
382	if ((ret = flexcop_pci_dma_init(fc_pci)) != 0)
383		goto err_fc_exit;
384
385	INIT_DELAYED_WORK(&fc_pci->irq_check_work, flexcop_pci_irq_check_work);
386
387	if (irq_chk_intv > 0)
388		schedule_delayed_work(&fc_pci->irq_check_work,
389				msecs_to_jiffies(irq_chk_intv < 100 ?
390					100 :
391					irq_chk_intv));
392	return ret;
393
394err_fc_exit:
395	flexcop_device_exit(fc);
396err_pci_exit:
397	flexcop_pci_exit(fc_pci);
398err_kfree:
399	flexcop_device_kfree(fc);
400	return ret;
401}
402
403/* in theory every _exit function should be called exactly two times,
404 * here and in the bail-out-part of the _init-function
405 */
406static void flexcop_pci_remove(struct pci_dev *pdev)
407{
408	struct flexcop_pci *fc_pci = pci_get_drvdata(pdev);
409
410	if (irq_chk_intv > 0)
411		cancel_delayed_work(&fc_pci->irq_check_work);
412
413	flexcop_pci_dma_exit(fc_pci);
414	flexcop_device_exit(fc_pci->fc_dev);
415	flexcop_pci_exit(fc_pci);
416	flexcop_device_kfree(fc_pci->fc_dev);
417}
418
419static struct pci_device_id flexcop_pci_tbl[] = {
420	{ PCI_DEVICE(0x13d0, 0x2103) },
421	{ },
422};
423
424MODULE_DEVICE_TABLE(pci, flexcop_pci_tbl);
425
426static struct pci_driver flexcop_pci_driver = {
427	.name     = "b2c2_flexcop_pci",
428	.id_table = flexcop_pci_tbl,
429	.probe    = flexcop_pci_probe,
430	.remove   = flexcop_pci_remove,
431};
432
433module_pci_driver(flexcop_pci_driver);
434
435MODULE_AUTHOR(DRIVER_AUTHOR);
436MODULE_DESCRIPTION(DRIVER_NAME);
437MODULE_LICENSE("GPL");
438