llc_shdlc.c revision 916082b073ebb7f4e064cebce0768e34cacde508
1/*
2 * shdlc Link Layer Control
3 *
4 * Copyright (C) 2012  Intel Corporation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the
17 * Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#define pr_fmt(fmt) "shdlc: %s: " fmt, __func__
22
23#include <linux/types.h>
24#include <linux/sched.h>
25#include <linux/wait.h>
26#include <linux/slab.h>
27#include <linux/skbuff.h>
28
29#include "llc.h"
30
31enum shdlc_state {
32	SHDLC_DISCONNECTED = 0,
33	SHDLC_CONNECTING = 1,
34	SHDLC_NEGOTIATING = 2,
35	SHDLC_HALF_CONNECTED = 3,
36	SHDLC_CONNECTED = 4
37};
38
39struct llc_shdlc {
40	struct nfc_hci_dev *hdev;
41	xmit_to_drv_t xmit_to_drv;
42	rcv_to_hci_t rcv_to_hci;
43
44	struct mutex state_mutex;
45	enum shdlc_state state;
46	int hard_fault;
47
48	wait_queue_head_t *connect_wq;
49	int connect_tries;
50	int connect_result;
51	struct timer_list connect_timer;/* aka T3 in spec 10.6.1 */
52
53	u8 w;				/* window size */
54	bool srej_support;
55
56	struct timer_list t1_timer;	/* send ack timeout */
57	bool t1_active;
58
59	struct timer_list t2_timer;	/* guard/retransmit timeout */
60	bool t2_active;
61
62	int ns;				/* next seq num for send */
63	int nr;				/* next expected seq num for receive */
64	int dnr;			/* oldest sent unacked seq num */
65
66	struct sk_buff_head rcv_q;
67
68	struct sk_buff_head send_q;
69	bool rnr;			/* other side is not ready to receive */
70
71	struct sk_buff_head ack_pending_q;
72
73	struct work_struct sm_work;
74
75	int tx_headroom;
76	int tx_tailroom;
77
78	llc_failure_t llc_failure;
79};
80
81#define SHDLC_LLC_HEAD_ROOM	2
82
83#define SHDLC_MAX_WINDOW	4
84#define SHDLC_SREJ_SUPPORT	false
85
86#define SHDLC_CONTROL_HEAD_MASK	0xe0
87#define SHDLC_CONTROL_HEAD_I	0x80
88#define SHDLC_CONTROL_HEAD_I2	0xa0
89#define SHDLC_CONTROL_HEAD_S	0xc0
90#define SHDLC_CONTROL_HEAD_U	0xe0
91
92#define SHDLC_CONTROL_NS_MASK	0x38
93#define SHDLC_CONTROL_NR_MASK	0x07
94#define SHDLC_CONTROL_TYPE_MASK	0x18
95
96#define SHDLC_CONTROL_M_MASK	0x1f
97
98enum sframe_type {
99	S_FRAME_RR = 0x00,
100	S_FRAME_REJ = 0x01,
101	S_FRAME_RNR = 0x02,
102	S_FRAME_SREJ = 0x03
103};
104
105enum uframe_modifier {
106	U_FRAME_UA = 0x06,
107	U_FRAME_RSET = 0x19
108};
109
110#define SHDLC_CONNECT_VALUE_MS	5
111#define SHDLC_T1_VALUE_MS(w)	((5 * w) / 4)
112#define SHDLC_T2_VALUE_MS	300
113
114#define SHDLC_DUMP_SKB(info, skb)				  \
115do {								  \
116	pr_debug("%s:\n", info);				  \
117	print_hex_dump(KERN_DEBUG, "shdlc: ", DUMP_PREFIX_OFFSET, \
118		       16, 1, skb->data, skb->len, 0);		  \
119} while (0)
120
121/* checks x < y <= z modulo 8 */
122static bool llc_shdlc_x_lt_y_lteq_z(int x, int y, int z)
123{
124	if (x < z)
125		return ((x < y) && (y <= z)) ? true : false;
126	else
127		return ((y > x) || (y <= z)) ? true : false;
128}
129
130/* checks x <= y < z modulo 8 */
131static bool llc_shdlc_x_lteq_y_lt_z(int x, int y, int z)
132{
133	if (x <= z)
134		return ((x <= y) && (y < z)) ? true : false;
135	else			/* x > z -> z+8 > x */
136		return ((y >= x) || (y < z)) ? true : false;
137}
138
139static struct sk_buff *llc_shdlc_alloc_skb(struct llc_shdlc *shdlc,
140					   int payload_len)
141{
142	struct sk_buff *skb;
143
144	skb = alloc_skb(shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM +
145			shdlc->tx_tailroom + payload_len, GFP_KERNEL);
146	if (skb)
147		skb_reserve(skb, shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM);
148
149	return skb;
150}
151
152/* immediately sends an S frame. */
153static int llc_shdlc_send_s_frame(struct llc_shdlc *shdlc,
154				  enum sframe_type sframe_type, int nr)
155{
156	int r;
157	struct sk_buff *skb;
158
159	pr_debug("sframe_type=%d nr=%d\n", sframe_type, nr);
160
161	skb = llc_shdlc_alloc_skb(shdlc, 0);
162	if (skb == NULL)
163		return -ENOMEM;
164
165	*skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr;
166
167	r = shdlc->xmit_to_drv(shdlc->hdev, skb);
168
169	kfree_skb(skb);
170
171	return r;
172}
173
174/* immediately sends an U frame. skb may contain optional payload */
175static int llc_shdlc_send_u_frame(struct llc_shdlc *shdlc,
176				  struct sk_buff *skb,
177				  enum uframe_modifier uframe_modifier)
178{
179	int r;
180
181	pr_debug("uframe_modifier=%d\n", uframe_modifier);
182
183	*skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier;
184
185	r = shdlc->xmit_to_drv(shdlc->hdev, skb);
186
187	kfree_skb(skb);
188
189	return r;
190}
191
192/*
193 * Free ack_pending frames until y_nr - 1, and reset t2 according to
194 * the remaining oldest ack_pending frame sent time
195 */
196static void llc_shdlc_reset_t2(struct llc_shdlc *shdlc, int y_nr)
197{
198	struct sk_buff *skb;
199	int dnr = shdlc->dnr;	/* MUST initially be < y_nr */
200
201	pr_debug("release ack pending up to frame %d excluded\n", y_nr);
202
203	while (dnr != y_nr) {
204		pr_debug("release ack pending frame %d\n", dnr);
205
206		skb = skb_dequeue(&shdlc->ack_pending_q);
207		kfree_skb(skb);
208
209		dnr = (dnr + 1) % 8;
210	}
211
212	if (skb_queue_empty(&shdlc->ack_pending_q)) {
213		if (shdlc->t2_active) {
214			del_timer_sync(&shdlc->t2_timer);
215			shdlc->t2_active = false;
216
217			pr_debug
218			    ("All sent frames acked. Stopped T2(retransmit)\n");
219		}
220	} else {
221		skb = skb_peek(&shdlc->ack_pending_q);
222
223		mod_timer(&shdlc->t2_timer, *(unsigned long *)skb->cb +
224			  msecs_to_jiffies(SHDLC_T2_VALUE_MS));
225		shdlc->t2_active = true;
226
227		pr_debug
228		    ("Start T2(retransmit) for remaining unacked sent frames\n");
229	}
230}
231
232/*
233 * Receive validated frames from lower layer. skb contains HCI payload only.
234 * Handle according to algorithm at spec:10.8.2
235 */
236static void llc_shdlc_rcv_i_frame(struct llc_shdlc *shdlc,
237				  struct sk_buff *skb, int ns, int nr)
238{
239	int x_ns = ns;
240	int y_nr = nr;
241
242	pr_debug("recvd I-frame %d, remote waiting frame %d\n", ns, nr);
243
244	if (shdlc->state != SHDLC_CONNECTED)
245		goto exit;
246
247	if (x_ns != shdlc->nr) {
248		llc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr);
249		goto exit;
250	}
251
252	if (shdlc->t1_active == false) {
253		shdlc->t1_active = true;
254		mod_timer(&shdlc->t1_timer, jiffies +
255			  msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w)));
256		pr_debug("(re)Start T1(send ack)\n");
257	}
258
259	if (skb->len) {
260		shdlc->rcv_to_hci(shdlc->hdev, skb);
261		skb = NULL;
262	}
263
264	shdlc->nr = (shdlc->nr + 1) % 8;
265
266	if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
267		llc_shdlc_reset_t2(shdlc, y_nr);
268
269		shdlc->dnr = y_nr;
270	}
271
272exit:
273	kfree_skb(skb);
274}
275
276static void llc_shdlc_rcv_ack(struct llc_shdlc *shdlc, int y_nr)
277{
278	pr_debug("remote acked up to frame %d excluded\n", y_nr);
279
280	if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
281		llc_shdlc_reset_t2(shdlc, y_nr);
282		shdlc->dnr = y_nr;
283	}
284}
285
286static void llc_shdlc_requeue_ack_pending(struct llc_shdlc *shdlc)
287{
288	struct sk_buff *skb;
289
290	pr_debug("ns reset to %d\n", shdlc->dnr);
291
292	while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) {
293		skb_pull(skb, 1);	/* remove control field */
294		skb_queue_head(&shdlc->send_q, skb);
295	}
296	shdlc->ns = shdlc->dnr;
297}
298
299static void llc_shdlc_rcv_rej(struct llc_shdlc *shdlc, int y_nr)
300{
301	struct sk_buff *skb;
302
303	pr_debug("remote asks retransmition from frame %d\n", y_nr);
304
305	if (llc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) {
306		if (shdlc->t2_active) {
307			del_timer_sync(&shdlc->t2_timer);
308			shdlc->t2_active = false;
309			pr_debug("Stopped T2(retransmit)\n");
310		}
311
312		if (shdlc->dnr != y_nr) {
313			while ((shdlc->dnr = ((shdlc->dnr + 1) % 8)) != y_nr) {
314				skb = skb_dequeue(&shdlc->ack_pending_q);
315				kfree_skb(skb);
316			}
317		}
318
319		llc_shdlc_requeue_ack_pending(shdlc);
320	}
321}
322
323/* See spec RR:10.8.3 REJ:10.8.4 */
324static void llc_shdlc_rcv_s_frame(struct llc_shdlc *shdlc,
325				  enum sframe_type s_frame_type, int nr)
326{
327	struct sk_buff *skb;
328
329	if (shdlc->state != SHDLC_CONNECTED)
330		return;
331
332	switch (s_frame_type) {
333	case S_FRAME_RR:
334		llc_shdlc_rcv_ack(shdlc, nr);
335		if (shdlc->rnr == true) {	/* see SHDLC 10.7.7 */
336			shdlc->rnr = false;
337			if (shdlc->send_q.qlen == 0) {
338				skb = llc_shdlc_alloc_skb(shdlc, 0);
339				if (skb)
340					skb_queue_tail(&shdlc->send_q, skb);
341			}
342		}
343		break;
344	case S_FRAME_REJ:
345		llc_shdlc_rcv_rej(shdlc, nr);
346		break;
347	case S_FRAME_RNR:
348		llc_shdlc_rcv_ack(shdlc, nr);
349		shdlc->rnr = true;
350		break;
351	default:
352		break;
353	}
354}
355
356static void llc_shdlc_connect_complete(struct llc_shdlc *shdlc, int r)
357{
358	pr_debug("result=%d\n", r);
359
360	del_timer_sync(&shdlc->connect_timer);
361
362	if (r == 0) {
363		shdlc->ns = 0;
364		shdlc->nr = 0;
365		shdlc->dnr = 0;
366
367		shdlc->state = SHDLC_HALF_CONNECTED;
368	} else {
369		shdlc->state = SHDLC_DISCONNECTED;
370	}
371
372	shdlc->connect_result = r;
373
374	wake_up(shdlc->connect_wq);
375}
376
377static int llc_shdlc_connect_initiate(struct llc_shdlc *shdlc)
378{
379	struct sk_buff *skb;
380
381	pr_debug("\n");
382
383	skb = llc_shdlc_alloc_skb(shdlc, 2);
384	if (skb == NULL)
385		return -ENOMEM;
386
387	*skb_put(skb, 1) = SHDLC_MAX_WINDOW;
388	*skb_put(skb, 1) = SHDLC_SREJ_SUPPORT ? 1 : 0;
389
390	return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
391}
392
393static int llc_shdlc_connect_send_ua(struct llc_shdlc *shdlc)
394{
395	struct sk_buff *skb;
396
397	pr_debug("\n");
398
399	skb = llc_shdlc_alloc_skb(shdlc, 0);
400	if (skb == NULL)
401		return -ENOMEM;
402
403	return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA);
404}
405
406static void llc_shdlc_rcv_u_frame(struct llc_shdlc *shdlc,
407				  struct sk_buff *skb,
408				  enum uframe_modifier u_frame_modifier)
409{
410	u8 w = SHDLC_MAX_WINDOW;
411	bool srej_support = SHDLC_SREJ_SUPPORT;
412	int r;
413
414	pr_debug("u_frame_modifier=%d\n", u_frame_modifier);
415
416	switch (u_frame_modifier) {
417	case U_FRAME_RSET:
418		switch (shdlc->state) {
419		case SHDLC_NEGOTIATING:
420		case SHDLC_CONNECTING:
421			/*
422			 * We sent RSET, but chip wants to negociate or we
423			 * got RSET before we managed to send out our.
424			 */
425			if (skb->len > 0)
426				w = skb->data[0];
427
428			if (skb->len > 1)
429				srej_support = skb->data[1] & 0x01 ? true :
430					       false;
431
432			if ((w <= SHDLC_MAX_WINDOW) &&
433			    (SHDLC_SREJ_SUPPORT || (srej_support == false))) {
434				shdlc->w = w;
435				shdlc->srej_support = srej_support;
436				r = llc_shdlc_connect_send_ua(shdlc);
437				llc_shdlc_connect_complete(shdlc, r);
438			}
439			break;
440		case SHDLC_HALF_CONNECTED:
441			/*
442			 * Chip resent RSET due to its timeout - Ignote it
443			 * as we already sent UA.
444			 */
445			break;
446		case SHDLC_CONNECTED:
447			/*
448			 * Chip wants to reset link. This is unexpected and
449			 * unsupported.
450			 */
451			shdlc->hard_fault = -ECONNRESET;
452			break;
453		default:
454			break;
455		}
456		break;
457	case U_FRAME_UA:
458		if ((shdlc->state == SHDLC_CONNECTING &&
459		     shdlc->connect_tries > 0) ||
460		    (shdlc->state == SHDLC_NEGOTIATING)) {
461			llc_shdlc_connect_complete(shdlc, 0);
462			shdlc->state = SHDLC_CONNECTED;
463		}
464		break;
465	default:
466		break;
467	}
468
469	kfree_skb(skb);
470}
471
472static void llc_shdlc_handle_rcv_queue(struct llc_shdlc *shdlc)
473{
474	struct sk_buff *skb;
475	u8 control;
476	int nr;
477	int ns;
478	enum sframe_type s_frame_type;
479	enum uframe_modifier u_frame_modifier;
480
481	if (shdlc->rcv_q.qlen)
482		pr_debug("rcvQlen=%d\n", shdlc->rcv_q.qlen);
483
484	while ((skb = skb_dequeue(&shdlc->rcv_q)) != NULL) {
485		control = skb->data[0];
486		skb_pull(skb, 1);
487		switch (control & SHDLC_CONTROL_HEAD_MASK) {
488		case SHDLC_CONTROL_HEAD_I:
489		case SHDLC_CONTROL_HEAD_I2:
490			if (shdlc->state == SHDLC_HALF_CONNECTED)
491				shdlc->state = SHDLC_CONNECTED;
492
493			ns = (control & SHDLC_CONTROL_NS_MASK) >> 3;
494			nr = control & SHDLC_CONTROL_NR_MASK;
495			llc_shdlc_rcv_i_frame(shdlc, skb, ns, nr);
496			break;
497		case SHDLC_CONTROL_HEAD_S:
498			if (shdlc->state == SHDLC_HALF_CONNECTED)
499				shdlc->state = SHDLC_CONNECTED;
500
501			s_frame_type = (control & SHDLC_CONTROL_TYPE_MASK) >> 3;
502			nr = control & SHDLC_CONTROL_NR_MASK;
503			llc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr);
504			kfree_skb(skb);
505			break;
506		case SHDLC_CONTROL_HEAD_U:
507			u_frame_modifier = control & SHDLC_CONTROL_M_MASK;
508			llc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier);
509			break;
510		default:
511			pr_err("UNKNOWN Control=%d\n", control);
512			kfree_skb(skb);
513			break;
514		}
515	}
516}
517
518static int llc_shdlc_w_used(int ns, int dnr)
519{
520	int unack_count;
521
522	if (dnr <= ns)
523		unack_count = ns - dnr;
524	else
525		unack_count = 8 - dnr + ns;
526
527	return unack_count;
528}
529
530/* Send frames according to algorithm at spec:10.8.1 */
531static void llc_shdlc_handle_send_queue(struct llc_shdlc *shdlc)
532{
533	struct sk_buff *skb;
534	int r;
535	unsigned long time_sent;
536
537	if (shdlc->send_q.qlen)
538		pr_debug
539		    ("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n",
540		     shdlc->send_q.qlen, shdlc->ns, shdlc->dnr,
541		     shdlc->rnr == false ? "false" : "true",
542		     shdlc->w - llc_shdlc_w_used(shdlc->ns, shdlc->dnr),
543		     shdlc->ack_pending_q.qlen);
544
545	while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w &&
546	       (shdlc->rnr == false)) {
547
548		if (shdlc->t1_active) {
549			del_timer_sync(&shdlc->t1_timer);
550			shdlc->t1_active = false;
551			pr_debug("Stopped T1(send ack)\n");
552		}
553
554		skb = skb_dequeue(&shdlc->send_q);
555
556		*skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) |
557				    shdlc->nr;
558
559		pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns,
560			 shdlc->nr);
561		SHDLC_DUMP_SKB("shdlc frame written", skb);
562
563		r = shdlc->xmit_to_drv(shdlc->hdev, skb);
564		if (r < 0) {
565			shdlc->hard_fault = r;
566			break;
567		}
568
569		shdlc->ns = (shdlc->ns + 1) % 8;
570
571		time_sent = jiffies;
572		*(unsigned long *)skb->cb = time_sent;
573
574		skb_queue_tail(&shdlc->ack_pending_q, skb);
575
576		if (shdlc->t2_active == false) {
577			shdlc->t2_active = true;
578			mod_timer(&shdlc->t2_timer, time_sent +
579				  msecs_to_jiffies(SHDLC_T2_VALUE_MS));
580			pr_debug("Started T2 (retransmit)\n");
581		}
582	}
583}
584
585static void llc_shdlc_connect_timeout(unsigned long data)
586{
587	struct llc_shdlc *shdlc = (struct llc_shdlc *)data;
588
589	pr_debug("\n");
590
591	schedule_work(&shdlc->sm_work);
592}
593
594static void llc_shdlc_t1_timeout(unsigned long data)
595{
596	struct llc_shdlc *shdlc = (struct llc_shdlc *)data;
597
598	pr_debug("SoftIRQ: need to send ack\n");
599
600	schedule_work(&shdlc->sm_work);
601}
602
603static void llc_shdlc_t2_timeout(unsigned long data)
604{
605	struct llc_shdlc *shdlc = (struct llc_shdlc *)data;
606
607	pr_debug("SoftIRQ: need to retransmit\n");
608
609	schedule_work(&shdlc->sm_work);
610}
611
612static void llc_shdlc_sm_work(struct work_struct *work)
613{
614	struct llc_shdlc *shdlc = container_of(work, struct llc_shdlc, sm_work);
615	int r;
616
617	pr_debug("\n");
618
619	mutex_lock(&shdlc->state_mutex);
620
621	switch (shdlc->state) {
622	case SHDLC_DISCONNECTED:
623		skb_queue_purge(&shdlc->rcv_q);
624		skb_queue_purge(&shdlc->send_q);
625		skb_queue_purge(&shdlc->ack_pending_q);
626		break;
627	case SHDLC_CONNECTING:
628		if (shdlc->hard_fault) {
629			llc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
630			break;
631		}
632
633		if (shdlc->connect_tries++ < 5)
634			r = llc_shdlc_connect_initiate(shdlc);
635		else
636			r = -ETIME;
637		if (r < 0)
638			llc_shdlc_connect_complete(shdlc, r);
639		else {
640			mod_timer(&shdlc->connect_timer, jiffies +
641				  msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS));
642
643			shdlc->state = SHDLC_NEGOTIATING;
644		}
645		break;
646	case SHDLC_NEGOTIATING:
647		if (timer_pending(&shdlc->connect_timer) == 0) {
648			shdlc->state = SHDLC_CONNECTING;
649			schedule_work(&shdlc->sm_work);
650		}
651
652		llc_shdlc_handle_rcv_queue(shdlc);
653
654		if (shdlc->hard_fault) {
655			llc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
656			break;
657		}
658		break;
659	case SHDLC_HALF_CONNECTED:
660	case SHDLC_CONNECTED:
661		llc_shdlc_handle_rcv_queue(shdlc);
662		llc_shdlc_handle_send_queue(shdlc);
663
664		if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) {
665			pr_debug
666			    ("Handle T1(send ack) elapsed (T1 now inactive)\n");
667
668			shdlc->t1_active = false;
669			r = llc_shdlc_send_s_frame(shdlc, S_FRAME_RR,
670						   shdlc->nr);
671			if (r < 0)
672				shdlc->hard_fault = r;
673		}
674
675		if (shdlc->t2_active && timer_pending(&shdlc->t2_timer) == 0) {
676			pr_debug
677			    ("Handle T2(retransmit) elapsed (T2 inactive)\n");
678
679			shdlc->t2_active = false;
680
681			llc_shdlc_requeue_ack_pending(shdlc);
682			llc_shdlc_handle_send_queue(shdlc);
683		}
684
685		if (shdlc->hard_fault) {
686			shdlc->llc_failure(shdlc->hdev, shdlc->hard_fault);
687		}
688		break;
689	default:
690		break;
691	}
692	mutex_unlock(&shdlc->state_mutex);
693}
694
695/*
696 * Called from syscall context to establish shdlc link. Sleeps until
697 * link is ready or failure.
698 */
699static int llc_shdlc_connect(struct llc_shdlc *shdlc)
700{
701	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq);
702
703	pr_debug("\n");
704
705	mutex_lock(&shdlc->state_mutex);
706
707	shdlc->state = SHDLC_CONNECTING;
708	shdlc->connect_wq = &connect_wq;
709	shdlc->connect_tries = 0;
710	shdlc->connect_result = 1;
711
712	mutex_unlock(&shdlc->state_mutex);
713
714	schedule_work(&shdlc->sm_work);
715
716	wait_event(connect_wq, shdlc->connect_result != 1);
717
718	return shdlc->connect_result;
719}
720
721static void llc_shdlc_disconnect(struct llc_shdlc *shdlc)
722{
723	pr_debug("\n");
724
725	mutex_lock(&shdlc->state_mutex);
726
727	shdlc->state = SHDLC_DISCONNECTED;
728
729	mutex_unlock(&shdlc->state_mutex);
730
731	schedule_work(&shdlc->sm_work);
732}
733
734/*
735 * Receive an incoming shdlc frame. Frame has already been crc-validated.
736 * skb contains only LLC header and payload.
737 * If skb == NULL, it is a notification that the link below is dead.
738 */
739static void llc_shdlc_recv_frame(struct llc_shdlc *shdlc, struct sk_buff *skb)
740{
741	if (skb == NULL) {
742		pr_err("NULL Frame -> link is dead\n");
743		shdlc->hard_fault = -EREMOTEIO;
744	} else {
745		SHDLC_DUMP_SKB("incoming frame", skb);
746		skb_queue_tail(&shdlc->rcv_q, skb);
747	}
748
749	schedule_work(&shdlc->sm_work);
750}
751
752static void *llc_shdlc_init(struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv,
753			    rcv_to_hci_t rcv_to_hci, int tx_headroom,
754			    int tx_tailroom, int *rx_headroom, int *rx_tailroom,
755			    llc_failure_t llc_failure)
756{
757	struct llc_shdlc *shdlc;
758
759	*rx_headroom = SHDLC_LLC_HEAD_ROOM;
760	*rx_tailroom = 0;
761
762	shdlc = kzalloc(sizeof(struct llc_shdlc), GFP_KERNEL);
763	if (shdlc == NULL)
764		return NULL;
765
766	mutex_init(&shdlc->state_mutex);
767	shdlc->state = SHDLC_DISCONNECTED;
768
769	init_timer(&shdlc->connect_timer);
770	shdlc->connect_timer.data = (unsigned long)shdlc;
771	shdlc->connect_timer.function = llc_shdlc_connect_timeout;
772
773	init_timer(&shdlc->t1_timer);
774	shdlc->t1_timer.data = (unsigned long)shdlc;
775	shdlc->t1_timer.function = llc_shdlc_t1_timeout;
776
777	init_timer(&shdlc->t2_timer);
778	shdlc->t2_timer.data = (unsigned long)shdlc;
779	shdlc->t2_timer.function = llc_shdlc_t2_timeout;
780
781	shdlc->w = SHDLC_MAX_WINDOW;
782	shdlc->srej_support = SHDLC_SREJ_SUPPORT;
783
784	skb_queue_head_init(&shdlc->rcv_q);
785	skb_queue_head_init(&shdlc->send_q);
786	skb_queue_head_init(&shdlc->ack_pending_q);
787
788	INIT_WORK(&shdlc->sm_work, llc_shdlc_sm_work);
789
790	shdlc->hdev = hdev;
791	shdlc->xmit_to_drv = xmit_to_drv;
792	shdlc->rcv_to_hci = rcv_to_hci;
793	shdlc->tx_headroom = tx_headroom;
794	shdlc->tx_tailroom = tx_tailroom;
795	shdlc->llc_failure = llc_failure;
796
797	return shdlc;
798}
799
800static void llc_shdlc_deinit(struct nfc_llc *llc)
801{
802	struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
803
804	skb_queue_purge(&shdlc->rcv_q);
805	skb_queue_purge(&shdlc->send_q);
806	skb_queue_purge(&shdlc->ack_pending_q);
807
808	kfree(shdlc);
809}
810
811static int llc_shdlc_start(struct nfc_llc *llc)
812{
813	struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
814
815	return llc_shdlc_connect(shdlc);
816}
817
818static int llc_shdlc_stop(struct nfc_llc *llc)
819{
820	struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
821
822	llc_shdlc_disconnect(shdlc);
823
824	return 0;
825}
826
827static void llc_shdlc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
828{
829	struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
830
831	llc_shdlc_recv_frame(shdlc, skb);
832}
833
834static int llc_shdlc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
835{
836	struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
837
838	skb_queue_tail(&shdlc->send_q, skb);
839
840	schedule_work(&shdlc->sm_work);
841
842	return 0;
843}
844
845static struct nfc_llc_ops llc_shdlc_ops = {
846	.init = llc_shdlc_init,
847	.deinit = llc_shdlc_deinit,
848	.start = llc_shdlc_start,
849	.stop = llc_shdlc_stop,
850	.rcv_from_drv = llc_shdlc_rcv_from_drv,
851	.xmit_from_hci = llc_shdlc_xmit_from_hci,
852};
853
854int nfc_llc_shdlc_register(void)
855{
856	return nfc_llc_register(LLC_SHDLC_NAME, &llc_shdlc_ops);
857}
858