1/*
2 *
3 * Author	Karsten Keil <kkeil@novell.com>
4 *
5 * Copyright 2008  by Karsten Keil <kkeil@novell.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/gfp.h>
19#include <linux/module.h>
20#include <linux/mISDNhw.h>
21
22static void
23dchannel_bh(struct work_struct *ws)
24{
25	struct dchannel	*dch  = container_of(ws, struct dchannel, workq);
26	struct sk_buff	*skb;
27	int		err;
28
29	if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
30		while ((skb = skb_dequeue(&dch->rqueue))) {
31			if (likely(dch->dev.D.peer)) {
32				err = dch->dev.D.recv(dch->dev.D.peer, skb);
33				if (err)
34					dev_kfree_skb(skb);
35			} else
36				dev_kfree_skb(skb);
37		}
38	}
39	if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
40		if (dch->phfunc)
41			dch->phfunc(dch);
42	}
43}
44
45static void
46bchannel_bh(struct work_struct *ws)
47{
48	struct bchannel	*bch  = container_of(ws, struct bchannel, workq);
49	struct sk_buff	*skb;
50	int		err;
51
52	if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
53		while ((skb = skb_dequeue(&bch->rqueue))) {
54			bch->rcount--;
55			if (likely(bch->ch.peer)) {
56				err = bch->ch.recv(bch->ch.peer, skb);
57				if (err)
58					dev_kfree_skb(skb);
59			} else
60				dev_kfree_skb(skb);
61		}
62	}
63}
64
65int
66mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
67{
68	test_and_set_bit(FLG_HDLC, &ch->Flags);
69	ch->maxlen = maxlen;
70	ch->hw = NULL;
71	ch->rx_skb = NULL;
72	ch->tx_skb = NULL;
73	ch->tx_idx = 0;
74	ch->phfunc = phf;
75	skb_queue_head_init(&ch->squeue);
76	skb_queue_head_init(&ch->rqueue);
77	INIT_LIST_HEAD(&ch->dev.bchannels);
78	INIT_WORK(&ch->workq, dchannel_bh);
79	return 0;
80}
81EXPORT_SYMBOL(mISDN_initdchannel);
82
83int
84mISDN_initbchannel(struct bchannel *ch, unsigned short maxlen,
85		   unsigned short minlen)
86{
87	ch->Flags = 0;
88	ch->minlen = minlen;
89	ch->next_minlen = minlen;
90	ch->init_minlen = minlen;
91	ch->maxlen = maxlen;
92	ch->next_maxlen = maxlen;
93	ch->init_maxlen = maxlen;
94	ch->hw = NULL;
95	ch->rx_skb = NULL;
96	ch->tx_skb = NULL;
97	ch->tx_idx = 0;
98	skb_queue_head_init(&ch->rqueue);
99	ch->rcount = 0;
100	ch->next_skb = NULL;
101	INIT_WORK(&ch->workq, bchannel_bh);
102	return 0;
103}
104EXPORT_SYMBOL(mISDN_initbchannel);
105
106int
107mISDN_freedchannel(struct dchannel *ch)
108{
109	if (ch->tx_skb) {
110		dev_kfree_skb(ch->tx_skb);
111		ch->tx_skb = NULL;
112	}
113	if (ch->rx_skb) {
114		dev_kfree_skb(ch->rx_skb);
115		ch->rx_skb = NULL;
116	}
117	skb_queue_purge(&ch->squeue);
118	skb_queue_purge(&ch->rqueue);
119	flush_work(&ch->workq);
120	return 0;
121}
122EXPORT_SYMBOL(mISDN_freedchannel);
123
124void
125mISDN_clear_bchannel(struct bchannel *ch)
126{
127	if (ch->tx_skb) {
128		dev_kfree_skb(ch->tx_skb);
129		ch->tx_skb = NULL;
130	}
131	ch->tx_idx = 0;
132	if (ch->rx_skb) {
133		dev_kfree_skb(ch->rx_skb);
134		ch->rx_skb = NULL;
135	}
136	if (ch->next_skb) {
137		dev_kfree_skb(ch->next_skb);
138		ch->next_skb = NULL;
139	}
140	test_and_clear_bit(FLG_TX_BUSY, &ch->Flags);
141	test_and_clear_bit(FLG_TX_NEXT, &ch->Flags);
142	test_and_clear_bit(FLG_ACTIVE, &ch->Flags);
143	test_and_clear_bit(FLG_FILLEMPTY, &ch->Flags);
144	test_and_clear_bit(FLG_TX_EMPTY, &ch->Flags);
145	test_and_clear_bit(FLG_RX_OFF, &ch->Flags);
146	ch->dropcnt = 0;
147	ch->minlen = ch->init_minlen;
148	ch->next_minlen = ch->init_minlen;
149	ch->maxlen = ch->init_maxlen;
150	ch->next_maxlen = ch->init_maxlen;
151	skb_queue_purge(&ch->rqueue);
152	ch->rcount = 0;
153}
154EXPORT_SYMBOL(mISDN_clear_bchannel);
155
156void
157mISDN_freebchannel(struct bchannel *ch)
158{
159	cancel_work_sync(&ch->workq);
160	mISDN_clear_bchannel(ch);
161}
162EXPORT_SYMBOL(mISDN_freebchannel);
163
164int
165mISDN_ctrl_bchannel(struct bchannel *bch, struct mISDN_ctrl_req *cq)
166{
167	int ret = 0;
168
169	switch (cq->op) {
170	case MISDN_CTRL_GETOP:
171		cq->op = MISDN_CTRL_RX_BUFFER | MISDN_CTRL_FILL_EMPTY |
172			 MISDN_CTRL_RX_OFF;
173		break;
174	case MISDN_CTRL_FILL_EMPTY:
175		if (cq->p1) {
176			memset(bch->fill, cq->p2 & 0xff, MISDN_BCH_FILL_SIZE);
177			test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
178		} else {
179			test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
180		}
181		break;
182	case MISDN_CTRL_RX_OFF:
183		/* read back dropped byte count */
184		cq->p2 = bch->dropcnt;
185		if (cq->p1)
186			test_and_set_bit(FLG_RX_OFF, &bch->Flags);
187		else
188			test_and_clear_bit(FLG_RX_OFF, &bch->Flags);
189		bch->dropcnt = 0;
190		break;
191	case MISDN_CTRL_RX_BUFFER:
192		if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE)
193			bch->next_maxlen = cq->p2;
194		if (cq->p1 > MISDN_CTRL_RX_SIZE_IGNORE)
195			bch->next_minlen = cq->p1;
196		/* we return the old values */
197		cq->p1 = bch->minlen;
198		cq->p2 = bch->maxlen;
199		break;
200	default:
201		pr_info("mISDN unhandled control %x operation\n", cq->op);
202		ret = -EINVAL;
203		break;
204	}
205	return ret;
206}
207EXPORT_SYMBOL(mISDN_ctrl_bchannel);
208
209static inline u_int
210get_sapi_tei(u_char *p)
211{
212	u_int	sapi, tei;
213
214	sapi = *p >> 2;
215	tei = p[1] >> 1;
216	return sapi | (tei << 8);
217}
218
219void
220recv_Dchannel(struct dchannel *dch)
221{
222	struct mISDNhead *hh;
223
224	if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
225		dev_kfree_skb(dch->rx_skb);
226		dch->rx_skb = NULL;
227		return;
228	}
229	hh = mISDN_HEAD_P(dch->rx_skb);
230	hh->prim = PH_DATA_IND;
231	hh->id = get_sapi_tei(dch->rx_skb->data);
232	skb_queue_tail(&dch->rqueue, dch->rx_skb);
233	dch->rx_skb = NULL;
234	schedule_event(dch, FLG_RECVQUEUE);
235}
236EXPORT_SYMBOL(recv_Dchannel);
237
238void
239recv_Echannel(struct dchannel *ech, struct dchannel *dch)
240{
241	struct mISDNhead *hh;
242
243	if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
244		dev_kfree_skb(ech->rx_skb);
245		ech->rx_skb = NULL;
246		return;
247	}
248	hh = mISDN_HEAD_P(ech->rx_skb);
249	hh->prim = PH_DATA_E_IND;
250	hh->id = get_sapi_tei(ech->rx_skb->data);
251	skb_queue_tail(&dch->rqueue, ech->rx_skb);
252	ech->rx_skb = NULL;
253	schedule_event(dch, FLG_RECVQUEUE);
254}
255EXPORT_SYMBOL(recv_Echannel);
256
257void
258recv_Bchannel(struct bchannel *bch, unsigned int id, bool force)
259{
260	struct mISDNhead *hh;
261
262	/* if allocation did fail upper functions still may call us */
263	if (unlikely(!bch->rx_skb))
264		return;
265	if (unlikely(!bch->rx_skb->len)) {
266		/* we have no data to send - this may happen after recovery
267		 * from overflow or too small allocation.
268		 * We need to free the buffer here */
269		dev_kfree_skb(bch->rx_skb);
270		bch->rx_skb = NULL;
271	} else {
272		if (test_bit(FLG_TRANSPARENT, &bch->Flags) &&
273		    (bch->rx_skb->len < bch->minlen) && !force)
274				return;
275		hh = mISDN_HEAD_P(bch->rx_skb);
276		hh->prim = PH_DATA_IND;
277		hh->id = id;
278		if (bch->rcount >= 64) {
279			printk(KERN_WARNING
280			       "B%d receive queue overflow - flushing!\n",
281			       bch->nr);
282			skb_queue_purge(&bch->rqueue);
283		}
284		bch->rcount++;
285		skb_queue_tail(&bch->rqueue, bch->rx_skb);
286		bch->rx_skb = NULL;
287		schedule_event(bch, FLG_RECVQUEUE);
288	}
289}
290EXPORT_SYMBOL(recv_Bchannel);
291
292void
293recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
294{
295	skb_queue_tail(&dch->rqueue, skb);
296	schedule_event(dch, FLG_RECVQUEUE);
297}
298EXPORT_SYMBOL(recv_Dchannel_skb);
299
300void
301recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
302{
303	if (bch->rcount >= 64) {
304		printk(KERN_WARNING "B-channel %p receive queue overflow, "
305		       "flushing!\n", bch);
306		skb_queue_purge(&bch->rqueue);
307		bch->rcount = 0;
308	}
309	bch->rcount++;
310	skb_queue_tail(&bch->rqueue, skb);
311	schedule_event(bch, FLG_RECVQUEUE);
312}
313EXPORT_SYMBOL(recv_Bchannel_skb);
314
315static void
316confirm_Dsend(struct dchannel *dch)
317{
318	struct sk_buff	*skb;
319
320	skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
321			       0, NULL, GFP_ATOMIC);
322	if (!skb) {
323		printk(KERN_ERR "%s: no skb id %x\n", __func__,
324		       mISDN_HEAD_ID(dch->tx_skb));
325		return;
326	}
327	skb_queue_tail(&dch->rqueue, skb);
328	schedule_event(dch, FLG_RECVQUEUE);
329}
330
331int
332get_next_dframe(struct dchannel *dch)
333{
334	dch->tx_idx = 0;
335	dch->tx_skb = skb_dequeue(&dch->squeue);
336	if (dch->tx_skb) {
337		confirm_Dsend(dch);
338		return 1;
339	}
340	dch->tx_skb = NULL;
341	test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
342	return 0;
343}
344EXPORT_SYMBOL(get_next_dframe);
345
346static void
347confirm_Bsend(struct bchannel *bch)
348{
349	struct sk_buff	*skb;
350
351	if (bch->rcount >= 64) {
352		printk(KERN_WARNING "B-channel %p receive queue overflow, "
353		       "flushing!\n", bch);
354		skb_queue_purge(&bch->rqueue);
355		bch->rcount = 0;
356	}
357	skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
358			       0, NULL, GFP_ATOMIC);
359	if (!skb) {
360		printk(KERN_ERR "%s: no skb id %x\n", __func__,
361		       mISDN_HEAD_ID(bch->tx_skb));
362		return;
363	}
364	bch->rcount++;
365	skb_queue_tail(&bch->rqueue, skb);
366	schedule_event(bch, FLG_RECVQUEUE);
367}
368
369int
370get_next_bframe(struct bchannel *bch)
371{
372	bch->tx_idx = 0;
373	if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
374		bch->tx_skb = bch->next_skb;
375		if (bch->tx_skb) {
376			bch->next_skb = NULL;
377			test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
378			/* confirm imediately to allow next data */
379			confirm_Bsend(bch);
380			return 1;
381		} else {
382			test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
383			printk(KERN_WARNING "B TX_NEXT without skb\n");
384		}
385	}
386	bch->tx_skb = NULL;
387	test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
388	return 0;
389}
390EXPORT_SYMBOL(get_next_bframe);
391
392void
393queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
394{
395	struct mISDNhead *hh;
396
397	if (!skb) {
398		_queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
399	} else {
400		if (ch->peer) {
401			hh = mISDN_HEAD_P(skb);
402			hh->prim = pr;
403			hh->id = id;
404			if (!ch->recv(ch->peer, skb))
405				return;
406		}
407		dev_kfree_skb(skb);
408	}
409}
410EXPORT_SYMBOL(queue_ch_frame);
411
412int
413dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
414{
415	/* check oversize */
416	if (skb->len <= 0) {
417		printk(KERN_WARNING "%s: skb too small\n", __func__);
418		return -EINVAL;
419	}
420	if (skb->len > ch->maxlen) {
421		printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
422		       __func__, skb->len, ch->maxlen);
423		return -EINVAL;
424	}
425	/* HW lock must be obtained */
426	if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
427		skb_queue_tail(&ch->squeue, skb);
428		return 0;
429	} else {
430		/* write to fifo */
431		ch->tx_skb = skb;
432		ch->tx_idx = 0;
433		return 1;
434	}
435}
436EXPORT_SYMBOL(dchannel_senddata);
437
438int
439bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
440{
441
442	/* check oversize */
443	if (skb->len <= 0) {
444		printk(KERN_WARNING "%s: skb too small\n", __func__);
445		return -EINVAL;
446	}
447	if (skb->len > ch->maxlen) {
448		printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
449		       __func__, skb->len, ch->maxlen);
450		return -EINVAL;
451	}
452	/* HW lock must be obtained */
453	/* check for pending next_skb */
454	if (ch->next_skb) {
455		printk(KERN_WARNING
456		       "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
457		       __func__, skb->len, ch->next_skb->len);
458		return -EBUSY;
459	}
460	if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
461		test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
462		ch->next_skb = skb;
463		return 0;
464	} else {
465		/* write to fifo */
466		ch->tx_skb = skb;
467		ch->tx_idx = 0;
468		confirm_Bsend(ch);
469		return 1;
470	}
471}
472EXPORT_SYMBOL(bchannel_senddata);
473
474/* The function allocates a new receive skb on demand with a size for the
475 * requirements of the current protocol. It returns the tailroom of the
476 * receive skb or an error.
477 */
478int
479bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
480{
481	int len;
482
483	if (bch->rx_skb) {
484		len = skb_tailroom(bch->rx_skb);
485		if (len < reqlen) {
486			pr_warning("B%d no space for %d (only %d) bytes\n",
487				   bch->nr, reqlen, len);
488			if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
489				/* send what we have now and try a new buffer */
490				recv_Bchannel(bch, 0, true);
491			} else {
492				/* on HDLC we have to drop too big frames */
493				return -EMSGSIZE;
494			}
495		} else {
496			return len;
497		}
498	}
499	/* update current min/max length first */
500	if (unlikely(bch->maxlen != bch->next_maxlen))
501		bch->maxlen = bch->next_maxlen;
502	if (unlikely(bch->minlen != bch->next_minlen))
503		bch->minlen = bch->next_minlen;
504	if (unlikely(reqlen > bch->maxlen))
505		return -EMSGSIZE;
506	if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
507		if (reqlen >= bch->minlen) {
508			len = reqlen;
509		} else {
510			len = 2 * bch->minlen;
511			if (len > bch->maxlen)
512				len = bch->maxlen;
513		}
514	} else {
515		/* with HDLC we do not know the length yet */
516		len = bch->maxlen;
517	}
518	bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC);
519	if (!bch->rx_skb) {
520		pr_warning("B%d receive no memory for %d bytes\n",
521			   bch->nr, len);
522		len = -ENOMEM;
523	}
524	return len;
525}
526EXPORT_SYMBOL(bchannel_get_rxbuf);
527