1/* Copyright (C) 2007-2014 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include "main.h"
19#include "distributed-arp-table.h"
20#include "send.h"
21#include "routing.h"
22#include "translation-table.h"
23#include "soft-interface.h"
24#include "hard-interface.h"
25#include "gateway_common.h"
26#include "gateway_client.h"
27#include "originator.h"
28#include "network-coding.h"
29#include "fragmentation.h"
30#include "multicast.h"
31
32static void batadv_send_outstanding_bcast_packet(struct work_struct *work);
33
34/* send out an already prepared packet to the given address via the
35 * specified batman interface
36 */
37int batadv_send_skb_packet(struct sk_buff *skb,
38			   struct batadv_hard_iface *hard_iface,
39			   const uint8_t *dst_addr)
40{
41	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
42	struct ethhdr *ethhdr;
43
44	if (hard_iface->if_status != BATADV_IF_ACTIVE)
45		goto send_skb_err;
46
47	if (unlikely(!hard_iface->net_dev))
48		goto send_skb_err;
49
50	if (!(hard_iface->net_dev->flags & IFF_UP)) {
51		pr_warn("Interface %s is not up - can't send packet via that interface!\n",
52			hard_iface->net_dev->name);
53		goto send_skb_err;
54	}
55
56	/* push to the ethernet header. */
57	if (batadv_skb_head_push(skb, ETH_HLEN) < 0)
58		goto send_skb_err;
59
60	skb_reset_mac_header(skb);
61
62	ethhdr = eth_hdr(skb);
63	ether_addr_copy(ethhdr->h_source, hard_iface->net_dev->dev_addr);
64	ether_addr_copy(ethhdr->h_dest, dst_addr);
65	ethhdr->h_proto = htons(ETH_P_BATMAN);
66
67	skb_set_network_header(skb, ETH_HLEN);
68	skb->protocol = htons(ETH_P_BATMAN);
69
70	skb->dev = hard_iface->net_dev;
71
72	/* Save a clone of the skb to use when decoding coded packets */
73	batadv_nc_skb_store_for_decoding(bat_priv, skb);
74
75	/* dev_queue_xmit() returns a negative result on error.	 However on
76	 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
77	 * (which is > 0). This will not be treated as an error.
78	 */
79	return dev_queue_xmit(skb);
80send_skb_err:
81	kfree_skb(skb);
82	return NET_XMIT_DROP;
83}
84
85/**
86 * batadv_send_skb_to_orig - Lookup next-hop and transmit skb.
87 * @skb: Packet to be transmitted.
88 * @orig_node: Final destination of the packet.
89 * @recv_if: Interface used when receiving the packet (can be NULL).
90 *
91 * Looks up the best next-hop towards the passed originator and passes the
92 * skb on for preparation of MAC header. If the packet originated from this
93 * host, NULL can be passed as recv_if and no interface alternating is
94 * attempted.
95 *
96 * Returns NET_XMIT_SUCCESS on success, NET_XMIT_DROP on failure, or
97 * NET_XMIT_POLICED if the skb is buffered for later transmit.
98 */
99int batadv_send_skb_to_orig(struct sk_buff *skb,
100			    struct batadv_orig_node *orig_node,
101			    struct batadv_hard_iface *recv_if)
102{
103	struct batadv_priv *bat_priv = orig_node->bat_priv;
104	struct batadv_neigh_node *neigh_node;
105	int ret = NET_XMIT_DROP;
106
107	/* batadv_find_router() increases neigh_nodes refcount if found. */
108	neigh_node = batadv_find_router(bat_priv, orig_node, recv_if);
109	if (!neigh_node)
110		goto out;
111
112	/* Check if the skb is too large to send in one piece and fragment
113	 * it if needed.
114	 */
115	if (atomic_read(&bat_priv->fragmentation) &&
116	    skb->len > neigh_node->if_incoming->net_dev->mtu) {
117		/* Fragment and send packet. */
118		if (batadv_frag_send_packet(skb, orig_node, neigh_node))
119			ret = NET_XMIT_SUCCESS;
120
121		goto out;
122	}
123
124	/* try to network code the packet, if it is received on an interface
125	 * (i.e. being forwarded). If the packet originates from this node or if
126	 * network coding fails, then send the packet as usual.
127	 */
128	if (recv_if && batadv_nc_skb_forward(skb, neigh_node)) {
129		ret = NET_XMIT_POLICED;
130	} else {
131		batadv_send_skb_packet(skb, neigh_node->if_incoming,
132				       neigh_node->addr);
133		ret = NET_XMIT_SUCCESS;
134	}
135
136out:
137	if (neigh_node)
138		batadv_neigh_node_free_ref(neigh_node);
139
140	return ret;
141}
142
143/**
144 * batadv_send_skb_push_fill_unicast - extend the buffer and initialize the
145 *  common fields for unicast packets
146 * @skb: the skb carrying the unicast header to initialize
147 * @hdr_size: amount of bytes to push at the beginning of the skb
148 * @orig_node: the destination node
149 *
150 * Returns false if the buffer extension was not possible or true otherwise.
151 */
152static bool
153batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
154				  struct batadv_orig_node *orig_node)
155{
156	struct batadv_unicast_packet *unicast_packet;
157	uint8_t ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
158
159	if (batadv_skb_head_push(skb, hdr_size) < 0)
160		return false;
161
162	unicast_packet = (struct batadv_unicast_packet *)skb->data;
163	unicast_packet->version = BATADV_COMPAT_VERSION;
164	/* batman packet type: unicast */
165	unicast_packet->packet_type = BATADV_UNICAST;
166	/* set unicast ttl */
167	unicast_packet->ttl = BATADV_TTL;
168	/* copy the destination for faster routing */
169	ether_addr_copy(unicast_packet->dest, orig_node->orig);
170	/* set the destination tt version number */
171	unicast_packet->ttvn = ttvn;
172
173	return true;
174}
175
176/**
177 * batadv_send_skb_prepare_unicast - encapsulate an skb with a unicast header
178 * @skb: the skb containing the payload to encapsulate
179 * @orig_node: the destination node
180 *
181 * Returns false if the payload could not be encapsulated or true otherwise.
182 */
183static bool batadv_send_skb_prepare_unicast(struct sk_buff *skb,
184					    struct batadv_orig_node *orig_node)
185{
186	size_t uni_size = sizeof(struct batadv_unicast_packet);
187
188	return batadv_send_skb_push_fill_unicast(skb, uni_size, orig_node);
189}
190
191/**
192 * batadv_send_skb_prepare_unicast_4addr - encapsulate an skb with a
193 *  unicast 4addr header
194 * @bat_priv: the bat priv with all the soft interface information
195 * @skb: the skb containing the payload to encapsulate
196 * @orig_node: the destination node
197 * @packet_subtype: the unicast 4addr packet subtype to use
198 *
199 * Returns false if the payload could not be encapsulated or true otherwise.
200 */
201bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
202					   struct sk_buff *skb,
203					   struct batadv_orig_node *orig,
204					   int packet_subtype)
205{
206	struct batadv_hard_iface *primary_if;
207	struct batadv_unicast_4addr_packet *uc_4addr_packet;
208	bool ret = false;
209
210	primary_if = batadv_primary_if_get_selected(bat_priv);
211	if (!primary_if)
212		goto out;
213
214	/* Pull the header space and fill the unicast_packet substructure.
215	 * We can do that because the first member of the uc_4addr_packet
216	 * is of type struct unicast_packet
217	 */
218	if (!batadv_send_skb_push_fill_unicast(skb, sizeof(*uc_4addr_packet),
219					       orig))
220		goto out;
221
222	uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
223	uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
224	ether_addr_copy(uc_4addr_packet->src, primary_if->net_dev->dev_addr);
225	uc_4addr_packet->subtype = packet_subtype;
226	uc_4addr_packet->reserved = 0;
227
228	ret = true;
229out:
230	if (primary_if)
231		batadv_hardif_free_ref(primary_if);
232	return ret;
233}
234
235/**
236 * batadv_send_skb_unicast - encapsulate and send an skb via unicast
237 * @bat_priv: the bat priv with all the soft interface information
238 * @skb: payload to send
239 * @packet_type: the batman unicast packet type to use
240 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
241 *  4addr packets)
242 * @orig_node: the originator to send the packet to
243 * @vid: the vid to be used to search the translation table
244 *
245 * Wrap the given skb into a batman-adv unicast or unicast-4addr header
246 * depending on whether BATADV_UNICAST or BATADV_UNICAST_4ADDR was supplied
247 * as packet_type. Then send this frame to the given orig_node and release a
248 * reference to this orig_node.
249 *
250 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
251 */
252int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
253			    struct sk_buff *skb, int packet_type,
254			    int packet_subtype,
255			    struct batadv_orig_node *orig_node,
256			    unsigned short vid)
257{
258	struct ethhdr *ethhdr;
259	struct batadv_unicast_packet *unicast_packet;
260	int ret = NET_XMIT_DROP;
261
262	if (!orig_node)
263		goto out;
264
265	switch (packet_type) {
266	case BATADV_UNICAST:
267		if (!batadv_send_skb_prepare_unicast(skb, orig_node))
268			goto out;
269		break;
270	case BATADV_UNICAST_4ADDR:
271		if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
272							   orig_node,
273							   packet_subtype))
274			goto out;
275		break;
276	default:
277		/* this function supports UNICAST and UNICAST_4ADDR only. It
278		 * should never be invoked with any other packet type
279		 */
280		goto out;
281	}
282
283	/* skb->data might have been reallocated by
284	 * batadv_send_skb_prepare_unicast{,_4addr}()
285	 */
286	ethhdr = eth_hdr(skb);
287	unicast_packet = (struct batadv_unicast_packet *)skb->data;
288
289	/* inform the destination node that we are still missing a correct route
290	 * for this client. The destination will receive this packet and will
291	 * try to reroute it because the ttvn contained in the header is less
292	 * than the current one
293	 */
294	if (batadv_tt_global_client_is_roaming(bat_priv, ethhdr->h_dest, vid))
295		unicast_packet->ttvn = unicast_packet->ttvn - 1;
296
297	if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
298		ret = NET_XMIT_SUCCESS;
299
300out:
301	if (orig_node)
302		batadv_orig_node_free_ref(orig_node);
303	if (ret == NET_XMIT_DROP)
304		kfree_skb(skb);
305	return ret;
306}
307
308/**
309 * batadv_send_skb_via_tt_generic - send an skb via TT lookup
310 * @bat_priv: the bat priv with all the soft interface information
311 * @skb: payload to send
312 * @packet_type: the batman unicast packet type to use
313 * @packet_subtype: the unicast 4addr packet subtype (only relevant for unicast
314 *  4addr packets)
315 * @dst_hint: can be used to override the destination contained in the skb
316 * @vid: the vid to be used to search the translation table
317 *
318 * Look up the recipient node for the destination address in the ethernet
319 * header via the translation table. Wrap the given skb into a batman-adv
320 * unicast or unicast-4addr header depending on whether BATADV_UNICAST or
321 * BATADV_UNICAST_4ADDR was supplied as packet_type. Then send this frame
322 * to the according destination node.
323 *
324 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
325 */
326int batadv_send_skb_via_tt_generic(struct batadv_priv *bat_priv,
327				   struct sk_buff *skb, int packet_type,
328				   int packet_subtype, uint8_t *dst_hint,
329				   unsigned short vid)
330{
331	struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
332	struct batadv_orig_node *orig_node;
333	uint8_t *src, *dst;
334
335	src = ethhdr->h_source;
336	dst = ethhdr->h_dest;
337
338	/* if we got an hint! let's send the packet to this client (if any) */
339	if (dst_hint) {
340		src = NULL;
341		dst = dst_hint;
342	}
343	orig_node = batadv_transtable_search(bat_priv, src, dst, vid);
344
345	return batadv_send_skb_unicast(bat_priv, skb, packet_type,
346				       packet_subtype, orig_node, vid);
347}
348
349/**
350 * batadv_send_skb_via_gw - send an skb via gateway lookup
351 * @bat_priv: the bat priv with all the soft interface information
352 * @skb: payload to send
353 * @vid: the vid to be used to search the translation table
354 *
355 * Look up the currently selected gateway. Wrap the given skb into a batman-adv
356 * unicast header and send this frame to this gateway node.
357 *
358 * Returns NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise.
359 */
360int batadv_send_skb_via_gw(struct batadv_priv *bat_priv, struct sk_buff *skb,
361			   unsigned short vid)
362{
363	struct batadv_orig_node *orig_node;
364
365	orig_node = batadv_gw_get_selected_orig(bat_priv);
366	return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0,
367				       orig_node, vid);
368}
369
370void batadv_schedule_bat_ogm(struct batadv_hard_iface *hard_iface)
371{
372	struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
373
374	if ((hard_iface->if_status == BATADV_IF_NOT_IN_USE) ||
375	    (hard_iface->if_status == BATADV_IF_TO_BE_REMOVED))
376		return;
377
378	/* the interface gets activated here to avoid race conditions between
379	 * the moment of activating the interface in
380	 * hardif_activate_interface() where the originator mac is set and
381	 * outdated packets (especially uninitialized mac addresses) in the
382	 * packet queue
383	 */
384	if (hard_iface->if_status == BATADV_IF_TO_BE_ACTIVATED)
385		hard_iface->if_status = BATADV_IF_ACTIVE;
386
387	bat_priv->bat_algo_ops->bat_ogm_schedule(hard_iface);
388}
389
390static void batadv_forw_packet_free(struct batadv_forw_packet *forw_packet)
391{
392	if (forw_packet->skb)
393		kfree_skb(forw_packet->skb);
394	if (forw_packet->if_incoming)
395		batadv_hardif_free_ref(forw_packet->if_incoming);
396	if (forw_packet->if_outgoing)
397		batadv_hardif_free_ref(forw_packet->if_outgoing);
398	kfree(forw_packet);
399}
400
401static void
402_batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
403				 struct batadv_forw_packet *forw_packet,
404				 unsigned long send_time)
405{
406	/* add new packet to packet list */
407	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
408	hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
409	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
410
411	/* start timer for this packet */
412	queue_delayed_work(batadv_event_workqueue, &forw_packet->delayed_work,
413			   send_time);
414}
415
416/* add a broadcast packet to the queue and setup timers. broadcast packets
417 * are sent multiple times to increase probability for being received.
418 *
419 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
420 * errors.
421 *
422 * The skb is not consumed, so the caller should make sure that the
423 * skb is freed.
424 */
425int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
426				    const struct sk_buff *skb,
427				    unsigned long delay)
428{
429	struct batadv_hard_iface *primary_if = NULL;
430	struct batadv_forw_packet *forw_packet;
431	struct batadv_bcast_packet *bcast_packet;
432	struct sk_buff *newskb;
433
434	if (!batadv_atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
435		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
436			   "bcast packet queue full\n");
437		goto out;
438	}
439
440	primary_if = batadv_primary_if_get_selected(bat_priv);
441	if (!primary_if)
442		goto out_and_inc;
443
444	forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
445
446	if (!forw_packet)
447		goto out_and_inc;
448
449	newskb = skb_copy(skb, GFP_ATOMIC);
450	if (!newskb)
451		goto packet_free;
452
453	/* as we have a copy now, it is safe to decrease the TTL */
454	bcast_packet = (struct batadv_bcast_packet *)newskb->data;
455	bcast_packet->ttl--;
456
457	skb_reset_mac_header(newskb);
458
459	forw_packet->skb = newskb;
460	forw_packet->if_incoming = primary_if;
461	forw_packet->if_outgoing = NULL;
462
463	/* how often did we send the bcast packet ? */
464	forw_packet->num_packets = 0;
465
466	INIT_DELAYED_WORK(&forw_packet->delayed_work,
467			  batadv_send_outstanding_bcast_packet);
468
469	_batadv_add_bcast_packet_to_list(bat_priv, forw_packet, delay);
470	return NETDEV_TX_OK;
471
472packet_free:
473	kfree(forw_packet);
474out_and_inc:
475	atomic_inc(&bat_priv->bcast_queue_left);
476out:
477	if (primary_if)
478		batadv_hardif_free_ref(primary_if);
479	return NETDEV_TX_BUSY;
480}
481
482static void batadv_send_outstanding_bcast_packet(struct work_struct *work)
483{
484	struct batadv_hard_iface *hard_iface;
485	struct delayed_work *delayed_work;
486	struct batadv_forw_packet *forw_packet;
487	struct sk_buff *skb1;
488	struct net_device *soft_iface;
489	struct batadv_priv *bat_priv;
490
491	delayed_work = container_of(work, struct delayed_work, work);
492	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
493				   delayed_work);
494	soft_iface = forw_packet->if_incoming->soft_iface;
495	bat_priv = netdev_priv(soft_iface);
496
497	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
498	hlist_del(&forw_packet->list);
499	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
500
501	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
502		goto out;
503
504	if (batadv_dat_drop_broadcast_packet(bat_priv, forw_packet))
505		goto out;
506
507	/* rebroadcast packet */
508	rcu_read_lock();
509	list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
510		if (hard_iface->soft_iface != soft_iface)
511			continue;
512
513		if (forw_packet->num_packets >= hard_iface->num_bcasts)
514			continue;
515
516		/* send a copy of the saved skb */
517		skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
518		if (skb1)
519			batadv_send_skb_packet(skb1, hard_iface,
520					       batadv_broadcast_addr);
521	}
522	rcu_read_unlock();
523
524	forw_packet->num_packets++;
525
526	/* if we still have some more bcasts to send */
527	if (forw_packet->num_packets < BATADV_NUM_BCASTS_MAX) {
528		_batadv_add_bcast_packet_to_list(bat_priv, forw_packet,
529						 msecs_to_jiffies(5));
530		return;
531	}
532
533out:
534	batadv_forw_packet_free(forw_packet);
535	atomic_inc(&bat_priv->bcast_queue_left);
536}
537
538void batadv_send_outstanding_bat_ogm_packet(struct work_struct *work)
539{
540	struct delayed_work *delayed_work;
541	struct batadv_forw_packet *forw_packet;
542	struct batadv_priv *bat_priv;
543
544	delayed_work = container_of(work, struct delayed_work, work);
545	forw_packet = container_of(delayed_work, struct batadv_forw_packet,
546				   delayed_work);
547	bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
548	spin_lock_bh(&bat_priv->forw_bat_list_lock);
549	hlist_del(&forw_packet->list);
550	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
551
552	if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_DEACTIVATING)
553		goto out;
554
555	bat_priv->bat_algo_ops->bat_ogm_emit(forw_packet);
556
557	/* we have to have at least one packet in the queue to determine the
558	 * queues wake up time unless we are shutting down.
559	 *
560	 * only re-schedule if this is the "original" copy, e.g. the OGM of the
561	 * primary interface should only be rescheduled once per period, but
562	 * this function will be called for the forw_packet instances of the
563	 * other secondary interfaces as well.
564	 */
565	if (forw_packet->own &&
566	    forw_packet->if_incoming == forw_packet->if_outgoing)
567		batadv_schedule_bat_ogm(forw_packet->if_incoming);
568
569out:
570	/* don't count own packet */
571	if (!forw_packet->own)
572		atomic_inc(&bat_priv->batman_queue_left);
573
574	batadv_forw_packet_free(forw_packet);
575}
576
577void
578batadv_purge_outstanding_packets(struct batadv_priv *bat_priv,
579				 const struct batadv_hard_iface *hard_iface)
580{
581	struct batadv_forw_packet *forw_packet;
582	struct hlist_node *safe_tmp_node;
583	bool pending;
584
585	if (hard_iface)
586		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
587			   "purge_outstanding_packets(): %s\n",
588			   hard_iface->net_dev->name);
589	else
590		batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
591			   "purge_outstanding_packets()\n");
592
593	/* free bcast list */
594	spin_lock_bh(&bat_priv->forw_bcast_list_lock);
595	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
596				  &bat_priv->forw_bcast_list, list) {
597		/* if purge_outstanding_packets() was called with an argument
598		 * we delete only packets belonging to the given interface
599		 */
600		if ((hard_iface) &&
601		    (forw_packet->if_incoming != hard_iface))
602			continue;
603
604		spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
605
606		/* batadv_send_outstanding_bcast_packet() will lock the list to
607		 * delete the item from the list
608		 */
609		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
610		spin_lock_bh(&bat_priv->forw_bcast_list_lock);
611
612		if (pending) {
613			hlist_del(&forw_packet->list);
614			batadv_forw_packet_free(forw_packet);
615		}
616	}
617	spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
618
619	/* free batman packet list */
620	spin_lock_bh(&bat_priv->forw_bat_list_lock);
621	hlist_for_each_entry_safe(forw_packet, safe_tmp_node,
622				  &bat_priv->forw_bat_list, list) {
623		/* if purge_outstanding_packets() was called with an argument
624		 * we delete only packets belonging to the given interface
625		 */
626		if ((hard_iface) &&
627		    (forw_packet->if_incoming != hard_iface) &&
628		    (forw_packet->if_outgoing != hard_iface))
629			continue;
630
631		spin_unlock_bh(&bat_priv->forw_bat_list_lock);
632
633		/* send_outstanding_bat_packet() will lock the list to
634		 * delete the item from the list
635		 */
636		pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
637		spin_lock_bh(&bat_priv->forw_bat_list_lock);
638
639		if (pending) {
640			hlist_del(&forw_packet->list);
641			batadv_forw_packet_free(forw_packet);
642		}
643	}
644	spin_unlock_bh(&bat_priv->forw_bat_list_lock);
645}
646