1/*
2 * Ultra Wide Band
3 * Dynamic Reservation Protocol handling
4 *
5 * Copyright (C) 2005-2006 Intel Corporation
6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
7 * Copyright (C) 2008 Cambridge Silicon Radio Ltd.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
20 */
21#include <linux/kthread.h>
22#include <linux/freezer.h>
23#include <linux/slab.h>
24#include <linux/delay.h>
25#include "uwb-internal.h"
26
27
28/* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */
29enum uwb_drp_conflict_action {
30	/* Reservation is maintained, no action needed */
31	UWB_DRP_CONFLICT_MANTAIN = 0,
32
33	/* the device shall not transmit frames in conflicting MASs in
34	 * the following superframe. If the device is the reservation
35	 * target, it shall also set the Reason Code in its DRP IE to
36	 * Conflict in its beacon in the following superframe.
37	 */
38	UWB_DRP_CONFLICT_ACT1,
39
40	/* the device shall not set the Reservation Status bit to ONE
41	 * and shall not transmit frames in conflicting MASs. If the
42	 * device is the reservation target, it shall also set the
43	 * Reason Code in its DRP IE to Conflict.
44	 */
45	UWB_DRP_CONFLICT_ACT2,
46
47	/* the device shall not transmit frames in conflicting MASs in
48	 * the following superframe. It shall remove the conflicting
49	 * MASs from the reservation or set the Reservation Status to
50	 * ZERO in its beacon in the following superframe. If the
51	 * device is the reservation target, it shall also set the
52	 * Reason Code in its DRP IE to Conflict.
53	 */
54	UWB_DRP_CONFLICT_ACT3,
55};
56
57
58static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg,
59				    struct uwb_rceb *reply, ssize_t reply_size)
60{
61	struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply;
62	unsigned long flags;
63
64	if (r != NULL) {
65		if (r->bResultCode != UWB_RC_RES_SUCCESS)
66			dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n",
67				uwb_rc_strerror(r->bResultCode), r->bResultCode);
68	} else
69		dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n");
70
71	spin_lock_irqsave(&rc->rsvs_lock, flags);
72	if (rc->set_drp_ie_pending > 1) {
73		rc->set_drp_ie_pending = 0;
74		uwb_rsv_queue_update(rc);
75	} else {
76		rc->set_drp_ie_pending = 0;
77	}
78	spin_unlock_irqrestore(&rc->rsvs_lock, flags);
79}
80
81/**
82 * Construct and send the SET DRP IE
83 *
84 * @rc:         UWB Host controller
85 * @returns:    >= 0 number of bytes still available in the beacon
86 *              < 0 errno code on error.
87 *
88 * See WUSB[8.6.2.7]: The host must set all the DRP IEs that it wants the
89 * device to include in its beacon at the same time. We thus have to
90 * traverse all reservations and include the DRP IEs of all PENDING
91 * and NEGOTIATED reservations in a SET DRP command for transmission.
92 *
93 * A DRP Availability IE is appended.
94 *
95 * rc->rsvs_mutex is held
96 *
97 * FIXME We currently ignore the returned value indicating the remaining space
98 * in beacon. This could be used to deny reservation requests earlier if
99 * determined that they would cause the beacon space to be exceeded.
100 */
101int uwb_rc_send_all_drp_ie(struct uwb_rc *rc)
102{
103	int result;
104	struct uwb_rc_cmd_set_drp_ie *cmd;
105	struct uwb_rsv *rsv;
106	struct uwb_rsv_move *mv;
107	int num_bytes = 0;
108	u8 *IEDataptr;
109
110	result = -ENOMEM;
111	/* First traverse all reservations to determine memory needed. */
112	list_for_each_entry(rsv, &rc->reservations, rc_node) {
113		if (rsv->drp_ie != NULL) {
114			num_bytes += rsv->drp_ie->hdr.length + 2;
115			if (uwb_rsv_has_two_drp_ies(rsv) &&
116				(rsv->mv.companion_drp_ie != NULL)) {
117				mv = &rsv->mv;
118				num_bytes +=
119					mv->companion_drp_ie->hdr.length + 2;
120			}
121		}
122	}
123	num_bytes += sizeof(rc->drp_avail.ie);
124	cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL);
125	if (cmd == NULL)
126		goto error;
127	cmd->rccb.bCommandType = UWB_RC_CET_GENERAL;
128	cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_DRP_IE);
129	cmd->wIELength = num_bytes;
130	IEDataptr = (u8 *)&cmd->IEData[0];
131
132	/* FIXME: DRV avail IE is not always needed */
133	/* put DRP avail IE first */
134	memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie));
135	IEDataptr += sizeof(struct uwb_ie_drp_avail);
136
137	/* Next traverse all reservations to place IEs in allocated memory. */
138	list_for_each_entry(rsv, &rc->reservations, rc_node) {
139		if (rsv->drp_ie != NULL) {
140			memcpy(IEDataptr, rsv->drp_ie,
141			       rsv->drp_ie->hdr.length + 2);
142			IEDataptr += rsv->drp_ie->hdr.length + 2;
143
144			if (uwb_rsv_has_two_drp_ies(rsv) &&
145				(rsv->mv.companion_drp_ie != NULL)) {
146				mv = &rsv->mv;
147				memcpy(IEDataptr, mv->companion_drp_ie,
148				       mv->companion_drp_ie->hdr.length + 2);
149				IEDataptr +=
150					mv->companion_drp_ie->hdr.length + 2;
151			}
152		}
153	}
154
155	result = uwb_rc_cmd_async(rc, "SET-DRP-IE",
156				&cmd->rccb, sizeof(*cmd) + num_bytes,
157				UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE,
158				uwb_rc_set_drp_cmd_done, NULL);
159
160	rc->set_drp_ie_pending = 1;
161
162	kfree(cmd);
163error:
164	return result;
165}
166
167/*
168 * Evaluate the action to perform using conflict resolution rules
169 *
170 * Return a uwb_drp_conflict_action.
171 */
172static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot,
173				    struct uwb_rsv *rsv, int our_status)
174{
175	int our_tie_breaker = rsv->tiebreaker;
176	int our_type        = rsv->type;
177	int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot;
178
179	int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie);
180	int ext_status      = uwb_ie_drp_status(ext_drp_ie);
181	int ext_type        = uwb_ie_drp_type(ext_drp_ie);
182
183
184	/* [ECMA-368 2nd Edition] 17.4.6 */
185	if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) {
186		return UWB_DRP_CONFLICT_MANTAIN;
187	}
188
189	/* [ECMA-368 2nd Edition] 17.4.6-1 */
190	if (our_type == UWB_DRP_TYPE_ALIEN_BP) {
191		return UWB_DRP_CONFLICT_MANTAIN;
192	}
193
194	/* [ECMA-368 2nd Edition] 17.4.6-2 */
195	if (ext_type == UWB_DRP_TYPE_ALIEN_BP) {
196		/* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */
197		return UWB_DRP_CONFLICT_ACT1;
198	}
199
200	/* [ECMA-368 2nd Edition] 17.4.6-3 */
201	if (our_status == 0 && ext_status == 1) {
202		return UWB_DRP_CONFLICT_ACT2;
203	}
204
205	/* [ECMA-368 2nd Edition] 17.4.6-4 */
206	if (our_status == 1 && ext_status == 0) {
207		return UWB_DRP_CONFLICT_MANTAIN;
208	}
209
210	/* [ECMA-368 2nd Edition] 17.4.6-5a */
211	if (our_tie_breaker == ext_tie_breaker &&
212	    our_beacon_slot <  ext_beacon_slot) {
213		return UWB_DRP_CONFLICT_MANTAIN;
214	}
215
216	/* [ECMA-368 2nd Edition] 17.4.6-5b */
217	if (our_tie_breaker != ext_tie_breaker &&
218	    our_beacon_slot >  ext_beacon_slot) {
219		return UWB_DRP_CONFLICT_MANTAIN;
220	}
221
222	if (our_status == 0) {
223		if (our_tie_breaker == ext_tie_breaker) {
224			/* [ECMA-368 2nd Edition] 17.4.6-6a */
225			if (our_beacon_slot > ext_beacon_slot) {
226				return UWB_DRP_CONFLICT_ACT2;
227			}
228		} else  {
229			/* [ECMA-368 2nd Edition] 17.4.6-6b */
230			if (our_beacon_slot < ext_beacon_slot) {
231				return UWB_DRP_CONFLICT_ACT2;
232			}
233		}
234	} else {
235		if (our_tie_breaker == ext_tie_breaker) {
236			/* [ECMA-368 2nd Edition] 17.4.6-7a */
237			if (our_beacon_slot > ext_beacon_slot) {
238				return UWB_DRP_CONFLICT_ACT3;
239			}
240		} else {
241			/* [ECMA-368 2nd Edition] 17.4.6-7b */
242			if (our_beacon_slot < ext_beacon_slot) {
243				return UWB_DRP_CONFLICT_ACT3;
244			}
245		}
246	}
247	return UWB_DRP_CONFLICT_MANTAIN;
248}
249
250static void handle_conflict_normal(struct uwb_ie_drp *drp_ie,
251				   int ext_beacon_slot,
252				   struct uwb_rsv *rsv,
253				   struct uwb_mas_bm *conflicting_mas)
254{
255	struct uwb_rc *rc = rsv->rc;
256	struct uwb_rsv_move *mv = &rsv->mv;
257	struct uwb_drp_backoff_win *bow = &rc->bow;
258	int action;
259
260	action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv));
261
262	if (uwb_rsv_is_owner(rsv)) {
263		switch(action) {
264		case UWB_DRP_CONFLICT_ACT2:
265			/* try move */
266			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED);
267			if (bow->can_reserve_extra_mases == false)
268				uwb_rsv_backoff_win_increment(rc);
269
270			break;
271		case UWB_DRP_CONFLICT_ACT3:
272			uwb_rsv_backoff_win_increment(rc);
273			/* drop some mases with reason modified */
274			/* put in the companion the mases to be dropped */
275			bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS);
276			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
277		default:
278			break;
279		}
280	} else {
281		switch(action) {
282		case UWB_DRP_CONFLICT_ACT2:
283		case UWB_DRP_CONFLICT_ACT3:
284			uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
285		default:
286			break;
287		}
288
289	}
290
291}
292
293static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot,
294				      struct uwb_rsv *rsv, bool companion_only,
295				      struct uwb_mas_bm *conflicting_mas)
296{
297	struct uwb_rc *rc = rsv->rc;
298	struct uwb_drp_backoff_win *bow = &rc->bow;
299	struct uwb_rsv_move *mv = &rsv->mv;
300	int action;
301
302	if (companion_only) {
303		/* status of companion is 0 at this point */
304		action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0);
305		if (uwb_rsv_is_owner(rsv)) {
306			switch(action) {
307			case UWB_DRP_CONFLICT_ACT2:
308			case UWB_DRP_CONFLICT_ACT3:
309				uwb_rsv_set_state(rsv,
310						UWB_RSV_STATE_O_ESTABLISHED);
311				rsv->needs_release_companion_mas = false;
312				if (bow->can_reserve_extra_mases == false)
313					uwb_rsv_backoff_win_increment(rc);
314				uwb_drp_avail_release(rsv->rc,
315						&rsv->mv.companion_mas);
316			}
317		} else { /* rsv is target */
318			switch(action) {
319			case UWB_DRP_CONFLICT_ACT2:
320			case UWB_DRP_CONFLICT_ACT3:
321				uwb_rsv_set_state(rsv,
322					UWB_RSV_STATE_T_EXPANDING_CONFLICT);
323                                /* send_drp_avail_ie = true; */
324			}
325		}
326	} else { /* also base part of the reservation is conflicting */
327		if (uwb_rsv_is_owner(rsv)) {
328			uwb_rsv_backoff_win_increment(rc);
329			/* remove companion part */
330			uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
331
332			/* drop some mases with reason modified */
333
334			/* put in the companion the mases to be dropped */
335			bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm,
336					conflicting_mas->bm, UWB_NUM_MAS);
337			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
338		} else { /* it is a target rsv */
339			uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
340                        /* send_drp_avail_ie = true; */
341		}
342	}
343}
344
345static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv,
346					struct uwb_rc_evt_drp *drp_evt,
347					struct uwb_ie_drp *drp_ie,
348					struct uwb_mas_bm *conflicting_mas)
349{
350	struct uwb_rsv_move *mv;
351
352	/* check if the conflicting reservation has two drp_ies */
353	if (uwb_rsv_has_two_drp_ies(rsv)) {
354		mv = &rsv->mv;
355		if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm,
356								UWB_NUM_MAS)) {
357			handle_conflict_expanding(drp_ie,
358						drp_evt->beacon_slot_number,
359						rsv, false, conflicting_mas);
360		} else {
361			if (bitmap_intersects(mv->companion_mas.bm,
362					conflicting_mas->bm, UWB_NUM_MAS)) {
363				handle_conflict_expanding(
364					drp_ie, drp_evt->beacon_slot_number,
365					rsv, true, conflicting_mas);
366			}
367		}
368	} else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm,
369							UWB_NUM_MAS)) {
370		handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number,
371					rsv, conflicting_mas);
372	}
373}
374
375static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc,
376					    struct uwb_rc_evt_drp *drp_evt,
377					    struct uwb_ie_drp *drp_ie,
378					    struct uwb_mas_bm *conflicting_mas)
379{
380	struct uwb_rsv *rsv;
381
382	list_for_each_entry(rsv, &rc->reservations, rc_node) {
383		uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie,
384							conflicting_mas);
385	}
386}
387
388static void uwb_drp_process_target_accepted(struct uwb_rc *rc,
389	struct uwb_rsv *rsv, struct uwb_rc_evt_drp *drp_evt,
390	struct uwb_ie_drp *drp_ie, struct uwb_mas_bm *mas)
391{
392	struct uwb_rsv_move *mv = &rsv->mv;
393	int status;
394
395	status = uwb_ie_drp_status(drp_ie);
396
397	if (rsv->state == UWB_RSV_STATE_T_CONFLICT) {
398		uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT);
399		return;
400	}
401
402	if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) {
403		/* drp_ie is companion */
404		if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) {
405			/* stroke companion */
406			uwb_rsv_set_state(rsv,
407				UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
408		}
409	} else {
410		if (!bitmap_equal(rsv->mas.bm, mas->bm, UWB_NUM_MAS)) {
411			if (uwb_drp_avail_reserve_pending(rc, mas) == -EBUSY) {
412				/* FIXME: there is a conflict, find
413				 * the conflicting reservations and
414				 * take a sensible action. Consider
415				 * that in drp_ie there is the
416				 * "neighbour" */
417				uwb_drp_handle_all_conflict_rsv(rc, drp_evt,
418						drp_ie, mas);
419			} else {
420				/* accept the extra reservation */
421				bitmap_copy(mv->companion_mas.bm, mas->bm,
422								UWB_NUM_MAS);
423				uwb_rsv_set_state(rsv,
424					UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
425			}
426		} else {
427			if (status) {
428				uwb_rsv_set_state(rsv,
429						UWB_RSV_STATE_T_ACCEPTED);
430			}
431		}
432
433	}
434}
435
436/*
437 * Based on the DRP IE, transition a target reservation to a new
438 * state.
439 */
440static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv,
441		   struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt)
442{
443	struct device *dev = &rc->uwb_dev.dev;
444	struct uwb_rsv_move *mv = &rsv->mv;
445	int status;
446	enum uwb_drp_reason reason_code;
447	struct uwb_mas_bm mas;
448
449	status = uwb_ie_drp_status(drp_ie);
450	reason_code = uwb_ie_drp_reason_code(drp_ie);
451	uwb_drp_ie_to_bm(&mas, drp_ie);
452
453	switch (reason_code) {
454	case UWB_DRP_REASON_ACCEPTED:
455		uwb_drp_process_target_accepted(rc, rsv, drp_evt, drp_ie, &mas);
456		break;
457
458	case UWB_DRP_REASON_MODIFIED:
459		/* check to see if we have already modified the reservation */
460		if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) {
461			uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
462			break;
463		}
464
465		/* find if the owner wants to expand or reduce */
466		if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) {
467			/* owner is reducing */
468			bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm,
469				UWB_NUM_MAS);
470			uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
471		}
472
473		bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS);
474		uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED);
475		break;
476	default:
477		dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
478			 reason_code, status);
479	}
480}
481
482static void uwb_drp_process_owner_accepted(struct uwb_rsv *rsv,
483						struct uwb_mas_bm *mas)
484{
485	struct uwb_rsv_move *mv = &rsv->mv;
486
487	switch (rsv->state) {
488	case UWB_RSV_STATE_O_PENDING:
489	case UWB_RSV_STATE_O_INITIATED:
490	case UWB_RSV_STATE_O_ESTABLISHED:
491		uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
492		break;
493	case UWB_RSV_STATE_O_MODIFIED:
494		if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
495			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
496		else
497			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED);
498		break;
499
500	case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */
501		if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
502			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
503		else
504			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
505		break;
506	case UWB_RSV_STATE_O_MOVE_EXPANDING:
507		if (bitmap_equal(mas->bm, mv->companion_mas.bm, UWB_NUM_MAS)) {
508			/* Companion reservation accepted */
509			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
510		} else {
511			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
512		}
513		break;
514	case UWB_RSV_STATE_O_MOVE_COMBINING:
515		if (bitmap_equal(mas->bm, rsv->mas.bm, UWB_NUM_MAS))
516			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
517		else
518			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
519		break;
520	default:
521		break;
522	}
523}
524/*
525 * Based on the DRP IE, transition an owner reservation to a new
526 * state.
527 */
528static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv,
529				  struct uwb_dev *src, struct uwb_ie_drp *drp_ie,
530				  struct uwb_rc_evt_drp *drp_evt)
531{
532	struct device *dev = &rc->uwb_dev.dev;
533	int status;
534	enum uwb_drp_reason reason_code;
535	struct uwb_mas_bm mas;
536
537	status = uwb_ie_drp_status(drp_ie);
538	reason_code = uwb_ie_drp_reason_code(drp_ie);
539	uwb_drp_ie_to_bm(&mas, drp_ie);
540
541	if (status) {
542		switch (reason_code) {
543		case UWB_DRP_REASON_ACCEPTED:
544			uwb_drp_process_owner_accepted(rsv, &mas);
545			break;
546		default:
547			dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
548				 reason_code, status);
549		}
550	} else {
551		switch (reason_code) {
552		case UWB_DRP_REASON_PENDING:
553			uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_PENDING);
554			break;
555		case UWB_DRP_REASON_DENIED:
556			uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
557			break;
558		case UWB_DRP_REASON_CONFLICT:
559			/* resolve the conflict */
560			bitmap_complement(mas.bm, src->last_availability_bm,
561					  UWB_NUM_MAS);
562			uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas);
563			break;
564		default:
565			dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n",
566				 reason_code, status);
567		}
568	}
569}
570
571static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt)
572{
573	unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US;
574	mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us));
575}
576
577static void uwb_cnflt_update_work(struct work_struct *work)
578{
579	struct uwb_cnflt_alien *cnflt = container_of(work,
580						     struct uwb_cnflt_alien,
581						     cnflt_update_work);
582	struct uwb_cnflt_alien *c;
583	struct uwb_rc *rc = cnflt->rc;
584
585	unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
586
587	mutex_lock(&rc->rsvs_mutex);
588
589	list_del(&cnflt->rc_node);
590
591	/* update rc global conflicting alien bitmap */
592	bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
593
594	list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) {
595		bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm,
596						c->mas.bm, UWB_NUM_MAS);
597	}
598
599	queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work,
600					usecs_to_jiffies(delay_us));
601
602	kfree(cnflt);
603	mutex_unlock(&rc->rsvs_mutex);
604}
605
606static void uwb_cnflt_timer(unsigned long arg)
607{
608	struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg;
609
610	queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work);
611}
612
613/*
614 * We have received an DRP_IE of type Alien BP and we need to make
615 * sure we do not transmit in conflicting MASs.
616 */
617static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
618{
619	struct device *dev = &rc->uwb_dev.dev;
620	struct uwb_mas_bm mas;
621	struct uwb_cnflt_alien *cnflt;
622	char buf[72];
623	unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
624
625	uwb_drp_ie_to_bm(&mas, drp_ie);
626	bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS);
627
628	list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) {
629		if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) {
630			/* Existing alien BP reservation conflicting
631			 * bitmap, just reset the timer */
632			uwb_cnflt_alien_stroke_timer(cnflt);
633			return;
634		}
635	}
636
637	/* New alien BP reservation conflicting bitmap */
638
639	/* alloc and initialize new uwb_cnflt_alien */
640	cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL);
641	if (!cnflt) {
642		dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n");
643		return;
644	}
645
646	INIT_LIST_HEAD(&cnflt->rc_node);
647	init_timer(&cnflt->timer);
648	cnflt->timer.function = uwb_cnflt_timer;
649	cnflt->timer.data     = (unsigned long)cnflt;
650
651	cnflt->rc = rc;
652	INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work);
653
654	bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS);
655
656	list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list);
657
658	/* update rc global conflicting alien bitmap */
659	bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS);
660
661	queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us));
662
663	/* start the timer */
664	uwb_cnflt_alien_stroke_timer(cnflt);
665}
666
667static void uwb_drp_process_not_involved(struct uwb_rc *rc,
668					 struct uwb_rc_evt_drp *drp_evt,
669					 struct uwb_ie_drp *drp_ie)
670{
671	struct uwb_mas_bm mas;
672
673	uwb_drp_ie_to_bm(&mas, drp_ie);
674	uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas);
675}
676
677static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src,
678				     struct uwb_rc_evt_drp *drp_evt,
679				     struct uwb_ie_drp *drp_ie)
680{
681	struct uwb_rsv *rsv;
682
683	rsv = uwb_rsv_find(rc, src, drp_ie);
684	if (!rsv) {
685		/*
686		 * No reservation? It's either for a recently
687		 * terminated reservation; or the DRP IE couldn't be
688		 * processed (e.g., an invalid IE or out of memory).
689		 */
690		return;
691	}
692
693	/*
694	 * Do nothing with DRP IEs for reservations that have been
695	 * terminated.
696	 */
697	if (rsv->state == UWB_RSV_STATE_NONE) {
698		uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
699		return;
700	}
701
702	if (uwb_ie_drp_owner(drp_ie))
703		uwb_drp_process_target(rc, rsv, drp_ie, drp_evt);
704	else
705		uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt);
706
707}
708
709
710static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie)
711{
712	return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0;
713}
714
715/*
716 * Process a received DRP IE.
717 */
718static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
719			    struct uwb_dev *src, struct uwb_ie_drp *drp_ie)
720{
721	if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP)
722		uwb_drp_handle_alien_drp(rc, drp_ie);
723	else if (uwb_drp_involves_us(rc, drp_ie))
724		uwb_drp_process_involved(rc, src, drp_evt, drp_ie);
725	else
726		uwb_drp_process_not_involved(rc, drp_evt, drp_ie);
727}
728
729/*
730 * Process a received DRP Availability IE
731 */
732static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src,
733					 struct uwb_ie_drp_avail *drp_availability_ie)
734{
735	bitmap_copy(src->last_availability_bm,
736		    drp_availability_ie->bmp, UWB_NUM_MAS);
737}
738
739/*
740 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE)
741 * from a device.
742 */
743static
744void uwb_drp_process_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt,
745			 size_t ielen, struct uwb_dev *src_dev)
746{
747	struct device *dev = &rc->uwb_dev.dev;
748	struct uwb_ie_hdr *ie_hdr;
749	void *ptr;
750
751	ptr = drp_evt->ie_data;
752	for (;;) {
753		ie_hdr = uwb_ie_next(&ptr, &ielen);
754		if (!ie_hdr)
755			break;
756
757		switch (ie_hdr->element_id) {
758		case UWB_IE_DRP_AVAILABILITY:
759			uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr);
760			break;
761		case UWB_IE_DRP:
762			uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr);
763			break;
764		default:
765			dev_warn(dev, "unexpected IE in DRP notification\n");
766			break;
767		}
768	}
769
770	if (ielen > 0)
771		dev_warn(dev, "%d octets remaining in DRP notification\n",
772			 (int)ielen);
773}
774
775/**
776 * uwbd_evt_handle_rc_drp - handle a DRP_IE event
777 * @evt: the DRP_IE event from the radio controller
778 *
779 * This processes DRP notifications from the radio controller, either
780 * initiating a new reservation or transitioning an existing
781 * reservation into a different state.
782 *
783 * DRP notifications can occur for three different reasons:
784 *
785 * - UWB_DRP_NOTIF_DRP_IE_RECVD: one or more DRP IEs with the RC as
786 *   the target or source have been received.
787 *
788 *   These DRP IEs could be new or for an existing reservation.
789 *
790 *   If the DRP IE for an existing reservation ceases to be to
791 *   received for at least mMaxLostBeacons, the reservation should be
792 *   considered to be terminated.  Note that the TERMINATE reason (see
793 *   below) may not always be signalled (e.g., the remote device has
794 *   two or more reservations established with the RC).
795 *
796 * - UWB_DRP_NOTIF_CONFLICT: DRP IEs from any device in the beacon
797 *   group conflict with the RC's reservations.
798 *
799 * - UWB_DRP_NOTIF_TERMINATE: DRP IEs are no longer being received
800 *   from a device (i.e., it's terminated all reservations).
801 *
802 * Only the software state of the reservations is changed; the setting
803 * of the radio controller's DRP IEs is done after all the events in
804 * an event buffer are processed.  This saves waiting multiple times
805 * for the SET_DRP_IE command to complete.
806 */
807int uwbd_evt_handle_rc_drp(struct uwb_event *evt)
808{
809	struct device *dev = &evt->rc->uwb_dev.dev;
810	struct uwb_rc *rc = evt->rc;
811	struct uwb_rc_evt_drp *drp_evt;
812	size_t ielength, bytes_left;
813	struct uwb_dev_addr src_addr;
814	struct uwb_dev *src_dev;
815
816	/* Is there enough data to decode the event (and any IEs in
817	   its payload)? */
818	if (evt->notif.size < sizeof(*drp_evt)) {
819		dev_err(dev, "DRP event: Not enough data to decode event "
820			"[%zu bytes left, %zu needed]\n",
821			evt->notif.size, sizeof(*drp_evt));
822		return 0;
823	}
824	bytes_left = evt->notif.size - sizeof(*drp_evt);
825	drp_evt = container_of(evt->notif.rceb, struct uwb_rc_evt_drp, rceb);
826	ielength = le16_to_cpu(drp_evt->ie_length);
827	if (bytes_left != ielength) {
828		dev_err(dev, "DRP event: Not enough data in payload [%zu"
829			"bytes left, %zu declared in the event]\n",
830			bytes_left, ielength);
831		return 0;
832	}
833
834	memcpy(src_addr.data, &drp_evt->src_addr, sizeof(src_addr));
835	src_dev = uwb_dev_get_by_devaddr(rc, &src_addr);
836	if (!src_dev) {
837		/*
838		 * A DRP notification from an unrecognized device.
839		 *
840		 * This is probably from a WUSB device that doesn't
841		 * have an EUI-48 and therefore doesn't show up in the
842		 * UWB device database.  It's safe to simply ignore
843		 * these.
844		 */
845		return 0;
846	}
847
848	mutex_lock(&rc->rsvs_mutex);
849
850	/* We do not distinguish from the reason */
851	uwb_drp_process_all(rc, drp_evt, ielength, src_dev);
852
853	mutex_unlock(&rc->rsvs_mutex);
854
855	uwb_dev_put(src_dev);
856	return 0;
857}
858