Lines Matching refs:bat_priv

95 	struct batadv_priv *bat_priv = netdev_priv(dev);
96 struct net_device_stats *stats = &bat_priv->stats;
98 stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX);
99 stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES);
100 stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED);
101 stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX);
102 stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES);
108 struct batadv_priv *bat_priv = netdev_priv(dev);
119 if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) {
120 batadv_tt_local_remove(bat_priv, old_addr, BATADV_NO_FLAGS,
156 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
177 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
196 if (batadv_bla_tx(bat_priv, skb, vid))
223 gw_mode = atomic_read(&bat_priv->gw_mode);
256 forw_mode = batadv_mcast_forw_mode(bat_priv, skb,
270 primary_if = batadv_primary_if_get_selected(bat_priv);
278 if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb))
299 seqno = atomic_inc_return(&bat_priv->bcast_seqno);
302 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
313 ret = batadv_gw_out_of_range(bat_priv, skb);
316 ret = batadv_send_skb_via_gw(bat_priv, skb, vid);
318 ret = batadv_send_skb_unicast(bat_priv, skb,
322 if (batadv_dat_snoop_outgoing_arp_request(bat_priv,
326 batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb);
328 ret = batadv_send_skb_via_tt(bat_priv, skb, dst_hint,
335 batadv_inc_counter(bat_priv, BATADV_CNT_TX);
336 batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len);
342 batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED);
354 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
403 batadv_inc_counter(bat_priv, BATADV_CNT_RX);
404 batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
412 if (batadv_bla_rx(bat_priv, skb, vid, is_bcast))
416 batadv_tt_add_temporary_global_entry(bat_priv, orig_node,
423 if (batadv_vlan_ap_isola_get(bat_priv, vid) &&
424 batadv_tt_global_is_isolated(bat_priv, ethhdr->h_source,
429 skb->mark &= ~bat_priv->isolation_mark_mask;
430 skb->mark |= bat_priv->isolation_mark;
432 } else if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source,
454 spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
456 spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
464 * @bat_priv: the bat priv with all the soft interface information
470 struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv,
476 hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) {
493 * @bat_priv: the bat priv with all the soft interface information
498 int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
503 vlan = batadv_softif_vlan_get(bat_priv, vid);
513 vlan->bat_priv = bat_priv;
519 err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
525 spin_lock_bh(&bat_priv->softif_vlan_list_lock);
526 hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
527 spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
532 batadv_tt_local_add(bat_priv->soft_iface,
533 bat_priv->soft_iface->dev_addr, vid,
541 * @bat_priv: the bat priv with all the soft interface information
544 static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
550 batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
553 batadv_sysfs_del_vlan(bat_priv, vlan);
570 struct batadv_priv *bat_priv = netdev_priv(dev);
588 vlan = batadv_softif_vlan_get(bat_priv, vid);
590 return batadv_softif_create_vlan(bat_priv, vid);
596 ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
607 batadv_tt_local_add(bat_priv->soft_iface,
608 bat_priv->soft_iface->dev_addr, vid,
628 struct batadv_priv *bat_priv = netdev_priv(dev);
637 vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG);
641 batadv_softif_destroy_vlan(bat_priv, vlan);
689 struct batadv_priv *bat_priv;
692 bat_priv = container_of(work, struct batadv_priv,
694 soft_iface = bat_priv->soft_iface;
697 vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS);
699 batadv_softif_destroy_vlan(bat_priv, vlan);
715 struct batadv_priv *bat_priv;
722 bat_priv = netdev_priv(dev);
723 bat_priv->soft_iface = dev;
724 INIT_WORK(&bat_priv->cleanup_work, batadv_softif_destroy_finish);
729 bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(uint64_t));
730 if (!bat_priv->bat_counters)
733 atomic_set(&bat_priv->aggregated_ogms, 1);
734 atomic_set(&bat_priv->bonding, 0);
736 atomic_set(&bat_priv->bridge_loop_avoidance, 0);
739 atomic_set(&bat_priv->distributed_arp_table, 1);
742 bat_priv->mcast.flags = BATADV_NO_FLAGS;
743 atomic_set(&bat_priv->multicast_mode, 1);
744 atomic_set(&bat_priv->mcast.num_disabled, 0);
745 atomic_set(&bat_priv->mcast.num_want_all_unsnoopables, 0);
746 atomic_set(&bat_priv->mcast.num_want_all_ipv4, 0);
747 atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0);
749 atomic_set(&bat_priv->gw_mode, BATADV_GW_MODE_OFF);
750 atomic_set(&bat_priv->gw_sel_class, 20);
751 atomic_set(&bat_priv->gw.bandwidth_down, 100);
752 atomic_set(&bat_priv->gw.bandwidth_up, 20);
753 atomic_set(&bat_priv->orig_interval, 1000);
754 atomic_set(&bat_priv->hop_penalty, 30);
756 atomic_set(&bat_priv->log_level, 0);
758 atomic_set(&bat_priv->fragmentation, 1);
759 atomic_set(&bat_priv->packet_size_max, ETH_DATA_LEN);
760 atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN);
761 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
763 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
764 atomic_set(&bat_priv->bcast_seqno, 1);
765 atomic_set(&bat_priv->tt.vn, 0);
766 atomic_set(&bat_priv->tt.local_changes, 0);
767 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
769 atomic_set(&bat_priv->bla.num_requests, 0);
771 bat_priv->tt.last_changeset = NULL;
772 bat_priv->tt.last_changeset_len = 0;
773 bat_priv->isolation_mark = 0;
774 bat_priv->isolation_mark_mask = 0;
778 atomic_set(&bat_priv->frag_seqno, random_seqno);
780 bat_priv->primary_if = NULL;
781 bat_priv->num_ifaces = 0;
783 batadv_nc_init_bat_priv(bat_priv);
785 ret = batadv_algo_select(bat_priv, batadv_routing_algo);
802 free_percpu(bat_priv->bat_counters);
803 bat_priv->bat_counters = NULL;
885 /* some scheduled RCU callbacks need the bat_priv struct to accomplish
887 * netdev and its private data (bat_priv)
953 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
955 queue_work(batadv_event_workqueue, &bat_priv->cleanup_work);
1095 struct batadv_priv *bat_priv = netdev_priv(dev);
1099 data[i] = batadv_sum_counter(bat_priv, i);