Lines Matching defs:queue

64 	u_int16_t queue_num;			/* number of this queue */
74 struct list_head queue_list; /* packets in queue */
158 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
188 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
190 list_add_tail(&entry->list, &queue->queue_list);
191 queue->queue_total++;
195 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
198 queue->queue_total--;
202 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
206 spin_lock_bh(&queue->lock);
208 list_for_each_entry(i, &queue->queue_list, list) {
216 __dequeue_entry(queue, entry);
218 spin_unlock_bh(&queue->lock);
224 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
228 spin_lock_bh(&queue->lock);
229 list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
232 queue->queue_total--;
236 spin_unlock_bh(&queue->lock);
282 nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
325 switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
331 if (!(queue->flags & NFQA_CFG_F_GSO) &&
336 data_len = ACCESS_ONCE(queue->copy_range);
347 if (queue->flags & NFQA_CFG_F_CONNTRACK)
350 if (queue->flags & NFQA_CFG_F_UID_GID) {
355 skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
373 nfmsg->res_id = htons(queue->queue_num);
470 if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk &&
509 __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
517 nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
522 spin_lock_bh(&queue->lock);
524 if (queue->queue_total >= queue->queue_maxlen) {
525 if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
529 queue->queue_dropped++;
531 queue->queue_total);
535 entry->id = ++queue->id_sequence;
539 err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
541 queue->queue_user_dropped++;
545 __enqueue_entry(queue, entry);
547 spin_unlock_bh(&queue->lock);
553 spin_unlock_bh(&queue->lock);
600 __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
611 ret = __nfqnl_enqueue_packet(net, queue, entry);
622 ret = __nfqnl_enqueue_packet(net, queue, entry_seg);
633 struct nfqnl_instance *queue;
641 queue = instance_lookup(q, queuenum);
642 if (!queue)
645 if (queue->copy_mode == NFQNL_COPY_NONE)
659 if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
660 return __nfqnl_enqueue_packet(net, queue, entry);
675 err = __nfqnl_enqueue_packet_gso(net, queue,
727 nfqnl_set_mode(struct nfqnl_instance *queue,
732 spin_lock_bh(&queue->lock);
736 queue->copy_mode = mode;
737 queue->copy_range = 0;
741 queue->copy_mode = mode;
743 queue->copy_range = NFQNL_MAX_COPY_RANGE;
745 queue->copy_range = range;
752 spin_unlock_bh(&queue->lock);
779 /* drop all packets with either indev or outdev == ifindex from all queue
865 struct nfqnl_instance *queue;
867 queue = instance_lookup(q, queue_num);
868 if (!queue)
871 if (queue->peer_portid != nlportid)
874 return queue;
907 struct nfqnl_instance *queue;
914 queue = verdict_instance_lookup(q, queue_num,
916 if (IS_ERR(queue))
917 return PTR_ERR(queue);
926 spin_lock_bh(&queue->lock);
928 list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
931 __dequeue_entry(queue, entry);
935 spin_unlock_bh(&queue->lock);
957 struct nfqnl_instance *queue;
966 queue = instance_lookup(q, queue_num);
967 if (!queue)
968 queue = verdict_instance_lookup(q, queue_num,
970 if (IS_ERR(queue))
971 return PTR_ERR(queue);
979 entry = find_dequeue_entry(queue, ntohl(vhdr->id));
1035 struct nfqnl_instance *queue;
1044 /* Obsolete commands without queue context */
1052 queue = instance_lookup(q, queue_num);
1053 if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
1061 if (queue) {
1065 queue = instance_create(q, queue_num,
1067 if (IS_ERR(queue)) {
1068 ret = PTR_ERR(queue);
1073 if (!queue) {
1077 instance_destroy(q, queue);
1091 if (!queue) {
1096 nfqnl_set_mode(queue, params->copy_mode,
1103 if (!queue) {
1108 spin_lock_bh(&queue->lock);
1109 queue->queue_maxlen = ntohl(*queue_maxlen);
1110 spin_unlock_bh(&queue->lock);
1116 if (!queue) {
1137 spin_lock_bh(&queue->lock);
1138 queue->flags &= ~mask;
1139 queue->flags |= flags & mask;
1140 spin_unlock_bh(&queue->lock);
1346 MODULE_DESCRIPTION("netfilter packet queue handler");