1/*
2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author:     Luis Carlos Cobo <luisca@cozybit.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/etherdevice.h>
11#include <linux/list.h>
12#include <linux/random.h>
13#include <linux/slab.h>
14#include <linux/spinlock.h>
15#include <linux/string.h>
16#include <net/mac80211.h>
17#include "wme.h"
18#include "ieee80211_i.h"
19#include "mesh.h"
20
21/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
22#define INIT_PATHS_SIZE_ORDER	2
23
24/* Keep the mean chain length below this constant */
25#define MEAN_CHAIN_LEN		2
26
27static inline bool mpath_expired(struct mesh_path *mpath)
28{
29	return (mpath->flags & MESH_PATH_ACTIVE) &&
30	       time_after(jiffies, mpath->exp_time) &&
31	       !(mpath->flags & MESH_PATH_FIXED);
32}
33
34struct mpath_node {
35	struct hlist_node list;
36	struct rcu_head rcu;
37	/* This indirection allows two different tables to point to the same
38	 * mesh_path structure, useful when resizing
39	 */
40	struct mesh_path *mpath;
41};
42
43static struct mesh_table __rcu *mesh_paths;
44static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
45
46int mesh_paths_generation;
47
48/* This lock will have the grow table function as writer and add / delete nodes
49 * as readers. RCU provides sufficient protection only when reading the table
50 * (i.e. doing lookups).  Adding or adding or removing nodes requires we take
51 * the read lock or we risk operating on an old table.  The write lock is only
52 * needed when modifying the number of buckets a table.
53 */
54static DEFINE_RWLOCK(pathtbl_resize_lock);
55
56
57static inline struct mesh_table *resize_dereference_mesh_paths(void)
58{
59	return rcu_dereference_protected(mesh_paths,
60		lockdep_is_held(&pathtbl_resize_lock));
61}
62
63static inline struct mesh_table *resize_dereference_mpp_paths(void)
64{
65	return rcu_dereference_protected(mpp_paths,
66		lockdep_is_held(&pathtbl_resize_lock));
67}
68
69/*
70 * CAREFUL -- "tbl" must not be an expression,
71 * in particular not an rcu_dereference(), since
72 * it's used twice. So it is illegal to do
73 *	for_each_mesh_entry(rcu_dereference(...), ...)
74 */
75#define for_each_mesh_entry(tbl, node, i) \
76	for (i = 0; i <= tbl->hash_mask; i++) \
77		hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
78
79
80static struct mesh_table *mesh_table_alloc(int size_order)
81{
82	int i;
83	struct mesh_table *newtbl;
84
85	newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
86	if (!newtbl)
87		return NULL;
88
89	newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
90			(1 << size_order), GFP_ATOMIC);
91
92	if (!newtbl->hash_buckets) {
93		kfree(newtbl);
94		return NULL;
95	}
96
97	newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
98			(1 << size_order), GFP_ATOMIC);
99	if (!newtbl->hashwlock) {
100		kfree(newtbl->hash_buckets);
101		kfree(newtbl);
102		return NULL;
103	}
104
105	newtbl->size_order = size_order;
106	newtbl->hash_mask = (1 << size_order) - 1;
107	atomic_set(&newtbl->entries,  0);
108	get_random_bytes(&newtbl->hash_rnd,
109			sizeof(newtbl->hash_rnd));
110	for (i = 0; i <= newtbl->hash_mask; i++)
111		spin_lock_init(&newtbl->hashwlock[i]);
112	spin_lock_init(&newtbl->gates_lock);
113
114	return newtbl;
115}
116
117static void __mesh_table_free(struct mesh_table *tbl)
118{
119	kfree(tbl->hash_buckets);
120	kfree(tbl->hashwlock);
121	kfree(tbl);
122}
123
124static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
125{
126	struct hlist_head *mesh_hash;
127	struct hlist_node *p, *q;
128	struct mpath_node *gate;
129	int i;
130
131	mesh_hash = tbl->hash_buckets;
132	for (i = 0; i <= tbl->hash_mask; i++) {
133		spin_lock_bh(&tbl->hashwlock[i]);
134		hlist_for_each_safe(p, q, &mesh_hash[i]) {
135			tbl->free_node(p, free_leafs);
136			atomic_dec(&tbl->entries);
137		}
138		spin_unlock_bh(&tbl->hashwlock[i]);
139	}
140	if (free_leafs) {
141		spin_lock_bh(&tbl->gates_lock);
142		hlist_for_each_entry_safe(gate, q,
143					 tbl->known_gates, list) {
144			hlist_del(&gate->list);
145			kfree(gate);
146		}
147		kfree(tbl->known_gates);
148		spin_unlock_bh(&tbl->gates_lock);
149	}
150
151	__mesh_table_free(tbl);
152}
153
154static int mesh_table_grow(struct mesh_table *oldtbl,
155			   struct mesh_table *newtbl)
156{
157	struct hlist_head *oldhash;
158	struct hlist_node *p, *q;
159	int i;
160
161	if (atomic_read(&oldtbl->entries)
162			< oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
163		return -EAGAIN;
164
165	newtbl->free_node = oldtbl->free_node;
166	newtbl->mean_chain_len = oldtbl->mean_chain_len;
167	newtbl->copy_node = oldtbl->copy_node;
168	newtbl->known_gates = oldtbl->known_gates;
169	atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
170
171	oldhash = oldtbl->hash_buckets;
172	for (i = 0; i <= oldtbl->hash_mask; i++)
173		hlist_for_each(p, &oldhash[i])
174			if (oldtbl->copy_node(p, newtbl) < 0)
175				goto errcopy;
176
177	return 0;
178
179errcopy:
180	for (i = 0; i <= newtbl->hash_mask; i++) {
181		hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
182			oldtbl->free_node(p, 0);
183	}
184	return -ENOMEM;
185}
186
187static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata,
188			   struct mesh_table *tbl)
189{
190	/* Use last four bytes of hw addr and interface index as hash index */
191	return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex,
192			    tbl->hash_rnd) & tbl->hash_mask;
193}
194
195
196/**
197 *
198 * mesh_path_assign_nexthop - update mesh path next hop
199 *
200 * @mpath: mesh path to update
201 * @sta: next hop to assign
202 *
203 * Locking: mpath->state_lock must be held when calling this function
204 */
205void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
206{
207	struct sk_buff *skb;
208	struct ieee80211_hdr *hdr;
209	unsigned long flags;
210
211	rcu_assign_pointer(mpath->next_hop, sta);
212
213	spin_lock_irqsave(&mpath->frame_queue.lock, flags);
214	skb_queue_walk(&mpath->frame_queue, skb) {
215		hdr = (struct ieee80211_hdr *) skb->data;
216		memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
217		memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
218		ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
219	}
220
221	spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
222}
223
224static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
225			     struct mesh_path *gate_mpath)
226{
227	struct ieee80211_hdr *hdr;
228	struct ieee80211s_hdr *mshdr;
229	int mesh_hdrlen, hdrlen;
230	char *next_hop;
231
232	hdr = (struct ieee80211_hdr *) skb->data;
233	hdrlen = ieee80211_hdrlen(hdr->frame_control);
234	mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
235
236	if (!(mshdr->flags & MESH_FLAGS_AE)) {
237		/* size of the fixed part of the mesh header */
238		mesh_hdrlen = 6;
239
240		/* make room for the two extended addresses */
241		skb_push(skb, 2 * ETH_ALEN);
242		memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
243
244		hdr = (struct ieee80211_hdr *) skb->data;
245
246		/* we preserve the previous mesh header and only add
247		 * the new addreses */
248		mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
249		mshdr->flags = MESH_FLAGS_AE_A5_A6;
250		memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
251		memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
252	}
253
254	/* update next hop */
255	hdr = (struct ieee80211_hdr *) skb->data;
256	rcu_read_lock();
257	next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
258	memcpy(hdr->addr1, next_hop, ETH_ALEN);
259	rcu_read_unlock();
260	memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
261	memcpy(hdr->addr3, dst_addr, ETH_ALEN);
262}
263
264/**
265 *
266 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
267 *
268 * This function is used to transfer or copy frames from an unresolved mpath to
269 * a gate mpath.  The function also adds the Address Extension field and
270 * updates the next hop.
271 *
272 * If a frame already has an Address Extension field, only the next hop and
273 * destination addresses are updated.
274 *
275 * The gate mpath must be an active mpath with a valid mpath->next_hop.
276 *
277 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
278 * @from_mpath: The failed mpath
279 * @copy: When true, copy all the frames to the new mpath queue.  When false,
280 * move them.
281 */
282static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
283				    struct mesh_path *from_mpath,
284				    bool copy)
285{
286	struct sk_buff *skb, *fskb, *tmp;
287	struct sk_buff_head failq;
288	unsigned long flags;
289
290	if (WARN_ON(gate_mpath == from_mpath))
291		return;
292	if (WARN_ON(!gate_mpath->next_hop))
293		return;
294
295	__skb_queue_head_init(&failq);
296
297	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
298	skb_queue_splice_init(&from_mpath->frame_queue, &failq);
299	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
300
301	skb_queue_walk_safe(&failq, fskb, tmp) {
302		if (skb_queue_len(&gate_mpath->frame_queue) >=
303				  MESH_FRAME_QUEUE_LEN) {
304			mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
305			break;
306		}
307
308		skb = skb_copy(fskb, GFP_ATOMIC);
309		if (WARN_ON(!skb))
310			break;
311
312		prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
313		skb_queue_tail(&gate_mpath->frame_queue, skb);
314
315		if (copy)
316			continue;
317
318		__skb_unlink(fskb, &failq);
319		kfree_skb(fskb);
320	}
321
322	mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
323		  gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
324
325	if (!copy)
326		return;
327
328	spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
329	skb_queue_splice(&failq, &from_mpath->frame_queue);
330	spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
331}
332
333
334static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
335				      struct ieee80211_sub_if_data *sdata)
336{
337	struct mesh_path *mpath;
338	struct hlist_head *bucket;
339	struct mpath_node *node;
340
341	bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
342	hlist_for_each_entry_rcu(node, bucket, list) {
343		mpath = node->mpath;
344		if (mpath->sdata == sdata &&
345		    ether_addr_equal(dst, mpath->dst)) {
346			if (mpath_expired(mpath)) {
347				spin_lock_bh(&mpath->state_lock);
348				mpath->flags &= ~MESH_PATH_ACTIVE;
349				spin_unlock_bh(&mpath->state_lock);
350			}
351			return mpath;
352		}
353	}
354	return NULL;
355}
356
357/**
358 * mesh_path_lookup - look up a path in the mesh path table
359 * @sdata: local subif
360 * @dst: hardware address (ETH_ALEN length) of destination
361 *
362 * Returns: pointer to the mesh path structure, or NULL if not found
363 *
364 * Locking: must be called within a read rcu section.
365 */
366struct mesh_path *
367mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
368{
369	return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
370}
371
372struct mesh_path *
373mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
374{
375	return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
376}
377
378
379/**
380 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
381 * @idx: index
382 * @sdata: local subif, or NULL for all entries
383 *
384 * Returns: pointer to the mesh path structure, or NULL if not found.
385 *
386 * Locking: must be called within a read rcu section.
387 */
388struct mesh_path *
389mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
390{
391	struct mesh_table *tbl = rcu_dereference(mesh_paths);
392	struct mpath_node *node;
393	int i;
394	int j = 0;
395
396	for_each_mesh_entry(tbl, node, i) {
397		if (sdata && node->mpath->sdata != sdata)
398			continue;
399		if (j++ == idx) {
400			if (mpath_expired(node->mpath)) {
401				spin_lock_bh(&node->mpath->state_lock);
402				node->mpath->flags &= ~MESH_PATH_ACTIVE;
403				spin_unlock_bh(&node->mpath->state_lock);
404			}
405			return node->mpath;
406		}
407	}
408
409	return NULL;
410}
411
412/**
413 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
414 * @mpath: gate path to add to table
415 */
416int mesh_path_add_gate(struct mesh_path *mpath)
417{
418	struct mesh_table *tbl;
419	struct mpath_node *gate, *new_gate;
420	int err;
421
422	rcu_read_lock();
423	tbl = rcu_dereference(mesh_paths);
424
425	hlist_for_each_entry_rcu(gate, tbl->known_gates, list)
426		if (gate->mpath == mpath) {
427			err = -EEXIST;
428			goto err_rcu;
429		}
430
431	new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
432	if (!new_gate) {
433		err = -ENOMEM;
434		goto err_rcu;
435	}
436
437	mpath->is_gate = true;
438	mpath->sdata->u.mesh.num_gates++;
439	new_gate->mpath = mpath;
440	spin_lock_bh(&tbl->gates_lock);
441	hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
442	spin_unlock_bh(&tbl->gates_lock);
443	mpath_dbg(mpath->sdata,
444		  "Mesh path: Recorded new gate: %pM. %d known gates\n",
445		  mpath->dst, mpath->sdata->u.mesh.num_gates);
446	err = 0;
447err_rcu:
448	rcu_read_unlock();
449	return err;
450}
451
452/**
453 * mesh_gate_del - remove a mesh gate from the list of known gates
454 * @tbl: table which holds our list of known gates
455 * @mpath: gate mpath
456 *
457 * Locking: must be called inside rcu_read_lock() section
458 */
459static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
460{
461	struct mpath_node *gate;
462	struct hlist_node *q;
463
464	hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) {
465		if (gate->mpath != mpath)
466			continue;
467		spin_lock_bh(&tbl->gates_lock);
468		hlist_del_rcu(&gate->list);
469		kfree_rcu(gate, rcu);
470		spin_unlock_bh(&tbl->gates_lock);
471		mpath->sdata->u.mesh.num_gates--;
472		mpath->is_gate = false;
473		mpath_dbg(mpath->sdata,
474			  "Mesh path: Deleted gate: %pM. %d known gates\n",
475			  mpath->dst, mpath->sdata->u.mesh.num_gates);
476		break;
477	}
478}
479
480/**
481 * mesh_gate_num - number of gates known to this interface
482 * @sdata: subif data
483 */
484int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
485{
486	return sdata->u.mesh.num_gates;
487}
488
489/**
490 * mesh_path_add - allocate and add a new path to the mesh path table
491 * @dst: destination address of the path (ETH_ALEN length)
492 * @sdata: local subif
493 *
494 * Returns: 0 on success
495 *
496 * State: the initial state of the new path is set to 0
497 */
498struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
499				const u8 *dst)
500{
501	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
502	struct ieee80211_local *local = sdata->local;
503	struct mesh_table *tbl;
504	struct mesh_path *mpath, *new_mpath;
505	struct mpath_node *node, *new_node;
506	struct hlist_head *bucket;
507	int grow = 0;
508	int err;
509	u32 hash_idx;
510
511	if (ether_addr_equal(dst, sdata->vif.addr))
512		/* never add ourselves as neighbours */
513		return ERR_PTR(-ENOTSUPP);
514
515	if (is_multicast_ether_addr(dst))
516		return ERR_PTR(-ENOTSUPP);
517
518	if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
519		return ERR_PTR(-ENOSPC);
520
521	read_lock_bh(&pathtbl_resize_lock);
522	tbl = resize_dereference_mesh_paths();
523
524	hash_idx = mesh_table_hash(dst, sdata, tbl);
525	bucket = &tbl->hash_buckets[hash_idx];
526
527	spin_lock(&tbl->hashwlock[hash_idx]);
528
529	hlist_for_each_entry(node, bucket, list) {
530		mpath = node->mpath;
531		if (mpath->sdata == sdata &&
532		    ether_addr_equal(dst, mpath->dst))
533			goto found;
534	}
535
536	err = -ENOMEM;
537	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
538	if (!new_mpath)
539		goto err_path_alloc;
540
541	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
542	if (!new_node)
543		goto err_node_alloc;
544
545	memcpy(new_mpath->dst, dst, ETH_ALEN);
546	eth_broadcast_addr(new_mpath->rann_snd_addr);
547	new_mpath->is_root = false;
548	new_mpath->sdata = sdata;
549	new_mpath->flags = 0;
550	skb_queue_head_init(&new_mpath->frame_queue);
551	new_node->mpath = new_mpath;
552	new_mpath->timer.data = (unsigned long) new_mpath;
553	new_mpath->timer.function = mesh_path_timer;
554	new_mpath->exp_time = jiffies;
555	spin_lock_init(&new_mpath->state_lock);
556	init_timer(&new_mpath->timer);
557
558	hlist_add_head_rcu(&new_node->list, bucket);
559	if (atomic_inc_return(&tbl->entries) >=
560	    tbl->mean_chain_len * (tbl->hash_mask + 1))
561		grow = 1;
562
563	mesh_paths_generation++;
564
565	if (grow) {
566		set_bit(MESH_WORK_GROW_MPATH_TABLE,  &ifmsh->wrkq_flags);
567		ieee80211_queue_work(&local->hw, &sdata->work);
568	}
569	mpath = new_mpath;
570found:
571	spin_unlock(&tbl->hashwlock[hash_idx]);
572	read_unlock_bh(&pathtbl_resize_lock);
573	return mpath;
574
575err_node_alloc:
576	kfree(new_mpath);
577err_path_alloc:
578	atomic_dec(&sdata->u.mesh.mpaths);
579	spin_unlock(&tbl->hashwlock[hash_idx]);
580	read_unlock_bh(&pathtbl_resize_lock);
581	return ERR_PTR(err);
582}
583
584static void mesh_table_free_rcu(struct rcu_head *rcu)
585{
586	struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
587
588	mesh_table_free(tbl, false);
589}
590
591void mesh_mpath_table_grow(void)
592{
593	struct mesh_table *oldtbl, *newtbl;
594
595	write_lock_bh(&pathtbl_resize_lock);
596	oldtbl = resize_dereference_mesh_paths();
597	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
598	if (!newtbl)
599		goto out;
600	if (mesh_table_grow(oldtbl, newtbl) < 0) {
601		__mesh_table_free(newtbl);
602		goto out;
603	}
604	rcu_assign_pointer(mesh_paths, newtbl);
605
606	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
607
608 out:
609	write_unlock_bh(&pathtbl_resize_lock);
610}
611
612void mesh_mpp_table_grow(void)
613{
614	struct mesh_table *oldtbl, *newtbl;
615
616	write_lock_bh(&pathtbl_resize_lock);
617	oldtbl = resize_dereference_mpp_paths();
618	newtbl = mesh_table_alloc(oldtbl->size_order + 1);
619	if (!newtbl)
620		goto out;
621	if (mesh_table_grow(oldtbl, newtbl) < 0) {
622		__mesh_table_free(newtbl);
623		goto out;
624	}
625	rcu_assign_pointer(mpp_paths, newtbl);
626	call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
627
628 out:
629	write_unlock_bh(&pathtbl_resize_lock);
630}
631
632int mpp_path_add(struct ieee80211_sub_if_data *sdata,
633		 const u8 *dst, const u8 *mpp)
634{
635	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
636	struct ieee80211_local *local = sdata->local;
637	struct mesh_table *tbl;
638	struct mesh_path *mpath, *new_mpath;
639	struct mpath_node *node, *new_node;
640	struct hlist_head *bucket;
641	int grow = 0;
642	int err = 0;
643	u32 hash_idx;
644
645	if (ether_addr_equal(dst, sdata->vif.addr))
646		/* never add ourselves as neighbours */
647		return -ENOTSUPP;
648
649	if (is_multicast_ether_addr(dst))
650		return -ENOTSUPP;
651
652	err = -ENOMEM;
653	new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
654	if (!new_mpath)
655		goto err_path_alloc;
656
657	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
658	if (!new_node)
659		goto err_node_alloc;
660
661	read_lock_bh(&pathtbl_resize_lock);
662	memcpy(new_mpath->dst, dst, ETH_ALEN);
663	memcpy(new_mpath->mpp, mpp, ETH_ALEN);
664	new_mpath->sdata = sdata;
665	new_mpath->flags = 0;
666	skb_queue_head_init(&new_mpath->frame_queue);
667	new_node->mpath = new_mpath;
668	init_timer(&new_mpath->timer);
669	new_mpath->exp_time = jiffies;
670	spin_lock_init(&new_mpath->state_lock);
671
672	tbl = resize_dereference_mpp_paths();
673
674	hash_idx = mesh_table_hash(dst, sdata, tbl);
675	bucket = &tbl->hash_buckets[hash_idx];
676
677	spin_lock(&tbl->hashwlock[hash_idx]);
678
679	err = -EEXIST;
680	hlist_for_each_entry(node, bucket, list) {
681		mpath = node->mpath;
682		if (mpath->sdata == sdata &&
683		    ether_addr_equal(dst, mpath->dst))
684			goto err_exists;
685	}
686
687	hlist_add_head_rcu(&new_node->list, bucket);
688	if (atomic_inc_return(&tbl->entries) >=
689	    tbl->mean_chain_len * (tbl->hash_mask + 1))
690		grow = 1;
691
692	spin_unlock(&tbl->hashwlock[hash_idx]);
693	read_unlock_bh(&pathtbl_resize_lock);
694	if (grow) {
695		set_bit(MESH_WORK_GROW_MPP_TABLE,  &ifmsh->wrkq_flags);
696		ieee80211_queue_work(&local->hw, &sdata->work);
697	}
698	return 0;
699
700err_exists:
701	spin_unlock(&tbl->hashwlock[hash_idx]);
702	read_unlock_bh(&pathtbl_resize_lock);
703	kfree(new_node);
704err_node_alloc:
705	kfree(new_mpath);
706err_path_alloc:
707	return err;
708}
709
710
711/**
712 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
713 *
714 * @sta: broken peer link
715 *
716 * This function must be called from the rate control algorithm if enough
717 * delivery errors suggest that a peer link is no longer usable.
718 */
719void mesh_plink_broken(struct sta_info *sta)
720{
721	struct mesh_table *tbl;
722	static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
723	struct mesh_path *mpath;
724	struct mpath_node *node;
725	struct ieee80211_sub_if_data *sdata = sta->sdata;
726	int i;
727
728	rcu_read_lock();
729	tbl = rcu_dereference(mesh_paths);
730	for_each_mesh_entry(tbl, node, i) {
731		mpath = node->mpath;
732		if (rcu_access_pointer(mpath->next_hop) == sta &&
733		    mpath->flags & MESH_PATH_ACTIVE &&
734		    !(mpath->flags & MESH_PATH_FIXED)) {
735			spin_lock_bh(&mpath->state_lock);
736			mpath->flags &= ~MESH_PATH_ACTIVE;
737			++mpath->sn;
738			spin_unlock_bh(&mpath->state_lock);
739			mesh_path_error_tx(sdata,
740				sdata->u.mesh.mshcfg.element_ttl,
741				mpath->dst, mpath->sn,
742				WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
743		}
744	}
745	rcu_read_unlock();
746}
747
748static void mesh_path_node_reclaim(struct rcu_head *rp)
749{
750	struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
751	struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
752
753	del_timer_sync(&node->mpath->timer);
754	atomic_dec(&sdata->u.mesh.mpaths);
755	kfree(node->mpath);
756	kfree(node);
757}
758
759/* needs to be called with the corresponding hashwlock taken */
760static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
761{
762	struct mesh_path *mpath;
763	mpath = node->mpath;
764	spin_lock(&mpath->state_lock);
765	mpath->flags |= MESH_PATH_RESOLVING;
766	if (mpath->is_gate)
767		mesh_gate_del(tbl, mpath);
768	hlist_del_rcu(&node->list);
769	call_rcu(&node->rcu, mesh_path_node_reclaim);
770	spin_unlock(&mpath->state_lock);
771	atomic_dec(&tbl->entries);
772}
773
774/**
775 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
776 *
777 * @sta: mesh peer to match
778 *
779 * RCU notes: this function is called when a mesh plink transitions from
780 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
781 * allows path creation. This will happen before the sta can be freed (because
782 * sta_info_destroy() calls this) so any reader in a rcu read block will be
783 * protected against the plink disappearing.
784 */
785void mesh_path_flush_by_nexthop(struct sta_info *sta)
786{
787	struct mesh_table *tbl;
788	struct mesh_path *mpath;
789	struct mpath_node *node;
790	int i;
791
792	rcu_read_lock();
793	read_lock_bh(&pathtbl_resize_lock);
794	tbl = resize_dereference_mesh_paths();
795	for_each_mesh_entry(tbl, node, i) {
796		mpath = node->mpath;
797		if (rcu_access_pointer(mpath->next_hop) == sta) {
798			spin_lock(&tbl->hashwlock[i]);
799			__mesh_path_del(tbl, node);
800			spin_unlock(&tbl->hashwlock[i]);
801		}
802	}
803	read_unlock_bh(&pathtbl_resize_lock);
804	rcu_read_unlock();
805}
806
807static void table_flush_by_iface(struct mesh_table *tbl,
808				 struct ieee80211_sub_if_data *sdata)
809{
810	struct mesh_path *mpath;
811	struct mpath_node *node;
812	int i;
813
814	WARN_ON(!rcu_read_lock_held());
815	for_each_mesh_entry(tbl, node, i) {
816		mpath = node->mpath;
817		if (mpath->sdata != sdata)
818			continue;
819		spin_lock_bh(&tbl->hashwlock[i]);
820		__mesh_path_del(tbl, node);
821		spin_unlock_bh(&tbl->hashwlock[i]);
822	}
823}
824
825/**
826 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
827 *
828 * This function deletes both mesh paths as well as mesh portal paths.
829 *
830 * @sdata: interface data to match
831 *
832 */
833void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
834{
835	struct mesh_table *tbl;
836
837	rcu_read_lock();
838	read_lock_bh(&pathtbl_resize_lock);
839	tbl = resize_dereference_mesh_paths();
840	table_flush_by_iface(tbl, sdata);
841	tbl = resize_dereference_mpp_paths();
842	table_flush_by_iface(tbl, sdata);
843	read_unlock_bh(&pathtbl_resize_lock);
844	rcu_read_unlock();
845}
846
847/**
848 * mesh_path_del - delete a mesh path from the table
849 *
850 * @addr: dst address (ETH_ALEN length)
851 * @sdata: local subif
852 *
853 * Returns: 0 if successful
854 */
855int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
856{
857	struct mesh_table *tbl;
858	struct mesh_path *mpath;
859	struct mpath_node *node;
860	struct hlist_head *bucket;
861	int hash_idx;
862	int err = 0;
863
864	read_lock_bh(&pathtbl_resize_lock);
865	tbl = resize_dereference_mesh_paths();
866	hash_idx = mesh_table_hash(addr, sdata, tbl);
867	bucket = &tbl->hash_buckets[hash_idx];
868
869	spin_lock(&tbl->hashwlock[hash_idx]);
870	hlist_for_each_entry(node, bucket, list) {
871		mpath = node->mpath;
872		if (mpath->sdata == sdata &&
873		    ether_addr_equal(addr, mpath->dst)) {
874			__mesh_path_del(tbl, node);
875			goto enddel;
876		}
877	}
878
879	err = -ENXIO;
880enddel:
881	mesh_paths_generation++;
882	spin_unlock(&tbl->hashwlock[hash_idx]);
883	read_unlock_bh(&pathtbl_resize_lock);
884	return err;
885}
886
887/**
888 * mesh_path_tx_pending - sends pending frames in a mesh path queue
889 *
890 * @mpath: mesh path to activate
891 *
892 * Locking: the state_lock of the mpath structure must NOT be held when calling
893 * this function.
894 */
895void mesh_path_tx_pending(struct mesh_path *mpath)
896{
897	if (mpath->flags & MESH_PATH_ACTIVE)
898		ieee80211_add_pending_skbs(mpath->sdata->local,
899				&mpath->frame_queue);
900}
901
902/**
903 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
904 *
905 * @mpath: mesh path whose queue will be emptied
906 *
907 * If there is only one gate, the frames are transferred from the failed mpath
908 * queue to that gate's queue.  If there are more than one gates, the frames
909 * are copied from each gate to the next.  After frames are copied, the
910 * mpath queues are emptied onto the transmission queue.
911 */
912int mesh_path_send_to_gates(struct mesh_path *mpath)
913{
914	struct ieee80211_sub_if_data *sdata = mpath->sdata;
915	struct mesh_table *tbl;
916	struct mesh_path *from_mpath = mpath;
917	struct mpath_node *gate = NULL;
918	bool copy = false;
919	struct hlist_head *known_gates;
920
921	rcu_read_lock();
922	tbl = rcu_dereference(mesh_paths);
923	known_gates = tbl->known_gates;
924	rcu_read_unlock();
925
926	if (!known_gates)
927		return -EHOSTUNREACH;
928
929	hlist_for_each_entry_rcu(gate, known_gates, list) {
930		if (gate->mpath->sdata != sdata)
931			continue;
932
933		if (gate->mpath->flags & MESH_PATH_ACTIVE) {
934			mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
935			mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
936			from_mpath = gate->mpath;
937			copy = true;
938		} else {
939			mpath_dbg(sdata,
940				  "Not forwarding %p (flags %#x)\n",
941				  gate->mpath, gate->mpath->flags);
942		}
943	}
944
945	hlist_for_each_entry_rcu(gate, known_gates, list)
946		if (gate->mpath->sdata == sdata) {
947			mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
948			mesh_path_tx_pending(gate->mpath);
949		}
950
951	return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
952}
953
954/**
955 * mesh_path_discard_frame - discard a frame whose path could not be resolved
956 *
957 * @skb: frame to discard
958 * @sdata: network subif the frame was to be sent through
959 *
960 * Locking: the function must me called within a rcu_read_lock region
961 */
962void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
963			     struct sk_buff *skb)
964{
965	kfree_skb(skb);
966	sdata->u.mesh.mshstats.dropped_frames_no_route++;
967}
968
969/**
970 * mesh_path_flush_pending - free the pending queue of a mesh path
971 *
972 * @mpath: mesh path whose queue has to be freed
973 *
974 * Locking: the function must me called within a rcu_read_lock region
975 */
976void mesh_path_flush_pending(struct mesh_path *mpath)
977{
978	struct sk_buff *skb;
979
980	while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
981		mesh_path_discard_frame(mpath->sdata, skb);
982}
983
984/**
985 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
986 *
987 * @mpath: the mesh path to modify
988 * @next_hop: the next hop to force
989 *
990 * Locking: this function must be called holding mpath->state_lock
991 */
992void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
993{
994	spin_lock_bh(&mpath->state_lock);
995	mesh_path_assign_nexthop(mpath, next_hop);
996	mpath->sn = 0xffff;
997	mpath->metric = 0;
998	mpath->hop_count = 0;
999	mpath->exp_time = 0;
1000	mpath->flags |= MESH_PATH_FIXED;
1001	mesh_path_activate(mpath);
1002	spin_unlock_bh(&mpath->state_lock);
1003	mesh_path_tx_pending(mpath);
1004}
1005
1006static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
1007{
1008	struct mesh_path *mpath;
1009	struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
1010	mpath = node->mpath;
1011	hlist_del_rcu(p);
1012	if (free_leafs) {
1013		del_timer_sync(&mpath->timer);
1014		kfree(mpath);
1015	}
1016	kfree(node);
1017}
1018
1019static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1020{
1021	struct mesh_path *mpath;
1022	struct mpath_node *node, *new_node;
1023	u32 hash_idx;
1024
1025	new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1026	if (new_node == NULL)
1027		return -ENOMEM;
1028
1029	node = hlist_entry(p, struct mpath_node, list);
1030	mpath = node->mpath;
1031	new_node->mpath = mpath;
1032	hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1033	hlist_add_head(&new_node->list,
1034			&newtbl->hash_buckets[hash_idx]);
1035	return 0;
1036}
1037
1038int mesh_pathtbl_init(void)
1039{
1040	struct mesh_table *tbl_path, *tbl_mpp;
1041	int ret;
1042
1043	tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1044	if (!tbl_path)
1045		return -ENOMEM;
1046	tbl_path->free_node = &mesh_path_node_free;
1047	tbl_path->copy_node = &mesh_path_node_copy;
1048	tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
1049	tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1050	if (!tbl_path->known_gates) {
1051		ret = -ENOMEM;
1052		goto free_path;
1053	}
1054	INIT_HLIST_HEAD(tbl_path->known_gates);
1055
1056
1057	tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1058	if (!tbl_mpp) {
1059		ret = -ENOMEM;
1060		goto free_path;
1061	}
1062	tbl_mpp->free_node = &mesh_path_node_free;
1063	tbl_mpp->copy_node = &mesh_path_node_copy;
1064	tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
1065	tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1066	if (!tbl_mpp->known_gates) {
1067		ret = -ENOMEM;
1068		goto free_mpp;
1069	}
1070	INIT_HLIST_HEAD(tbl_mpp->known_gates);
1071
1072	/* Need no locking since this is during init */
1073	RCU_INIT_POINTER(mesh_paths, tbl_path);
1074	RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1075
1076	return 0;
1077
1078free_mpp:
1079	mesh_table_free(tbl_mpp, true);
1080free_path:
1081	mesh_table_free(tbl_path, true);
1082	return ret;
1083}
1084
1085void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1086{
1087	struct mesh_table *tbl;
1088	struct mesh_path *mpath;
1089	struct mpath_node *node;
1090	int i;
1091
1092	rcu_read_lock();
1093	tbl = rcu_dereference(mesh_paths);
1094	for_each_mesh_entry(tbl, node, i) {
1095		if (node->mpath->sdata != sdata)
1096			continue;
1097		mpath = node->mpath;
1098		if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1099		    (!(mpath->flags & MESH_PATH_FIXED)) &&
1100		     time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1101			mesh_path_del(mpath->sdata, mpath->dst);
1102	}
1103	rcu_read_unlock();
1104}
1105
1106void mesh_pathtbl_unregister(void)
1107{
1108	/* no need for locking during exit path */
1109	mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
1110	mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
1111}
1112