Lines Matching refs:queue

35 static void tx_start(struct b43legacy_pioqueue *queue)
37 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
41 static void tx_octet(struct b43legacy_pioqueue *queue,
44 if (queue->need_workarounds) {
45 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet);
46 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
49 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
51 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet);
76 static void tx_data(struct b43legacy_pioqueue *queue,
84 if (queue->need_workarounds) {
87 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data);
89 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
95 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data);
98 tx_octet(queue, packet[octets -
102 static void tx_complete(struct b43legacy_pioqueue *queue,
105 if (queue->need_workarounds) {
106 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA,
108 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
112 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
116 static u16 generate_cookie(struct b43legacy_pioqueue *queue,
126 switch (queue->mmio_base) {
154 struct b43legacy_pioqueue *queue = NULL;
159 queue = pio->queue0;
162 queue = pio->queue1;
165 queue = pio->queue2;
168 queue = pio->queue3;
176 *packet = &(queue->tx_packets_cache[packetindex]);
178 return queue;
185 static int pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
198 err = b43legacy_generate_txhdr(queue->dev,
201 generate_cookie(queue, packet));
205 tx_start(queue);
207 if (queue->need_workarounds)
209 tx_data(queue, txhdr, (u8 *)skb->data, octets);
210 tx_complete(queue, skb);
218 struct b43legacy_pioqueue *queue = packet->queue;
226 list_move(&packet->list, &queue->txfree);
227 queue->nr_txfree++;
232 struct b43legacy_pioqueue *queue = packet->queue;
238 if (queue->tx_devq_size < octets) {
239 b43legacywarn(queue->dev->wl, "PIO queue too small. "
245 B43legacy_WARN_ON(queue->tx_devq_packets >
247 B43legacy_WARN_ON(queue->tx_devq_used > queue->tx_devq_size);
249 * TX queue. If not, return and let the TX tasklet
252 if (queue->tx_devq_packets == B43legacy_PIO_MAXTXDEVQPACKETS)
254 if (queue->tx_devq_used + octets > queue->tx_devq_size)
257 err = pio_tx_write_fragment(queue, skb, packet,
267 * (We must not overflow the device TX queue)
269 queue->tx_devq_packets++;
270 queue->tx_devq_used += octets;
275 list_move_tail(&packet->list, &queue->txrunning);
282 struct b43legacy_pioqueue *queue = (struct b43legacy_pioqueue *)d;
283 struct b43legacy_wldev *dev = queue->dev;
290 if (queue->tx_frozen)
292 txctl = b43legacy_pio_read(queue, B43legacy_PIO_TXCTL);
296 list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) {
298 * the device queue is full. In case of failure, the
312 static void setup_txqueues(struct b43legacy_pioqueue *queue)
317 queue->nr_txfree = B43legacy_PIO_MAXTXPACKETS;
319 packet = &(queue->tx_packets_cache[i]);
321 packet->queue = queue;
324 list_add(&packet->list, &queue->txfree);
332 struct b43legacy_pioqueue *queue;
336 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
337 if (!queue)
340 queue->dev = dev;
341 queue->mmio_base = pio_mmio_base;
342 queue->need_workarounds = (dev->dev->id.revision < 3);
344 INIT_LIST_HEAD(&queue->txfree);
345 INIT_LIST_HEAD(&queue->txqueue);
346 INIT_LIST_HEAD(&queue->txrunning);
347 tasklet_init(&queue->txtask, tx_tasklet,
348 (unsigned long)queue);
354 qsize = b43legacy_read16(dev, queue->mmio_base
363 b43legacyerr(dev->wl, "PIO tx device-queue too small (%u)\n",
368 queue->tx_devq_size = qsize;
370 setup_txqueues(queue);
373 return queue;
376 kfree(queue);
377 queue = NULL;
381 static void cancel_transfers(struct b43legacy_pioqueue *queue)
385 tasklet_disable(&queue->txtask);
387 list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list)
389 list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list)
393 static void b43legacy_destroy_pioqueue(struct b43legacy_pioqueue *queue)
395 if (!queue)
398 cancel_transfers(queue);
399 kfree(queue);
423 struct b43legacy_pioqueue *queue;
426 queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO1_BASE);
427 if (!queue)
429 pio->queue0 = queue;
431 queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO2_BASE);
432 if (!queue)
434 pio->queue1 = queue;
436 queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO3_BASE);
437 if (!queue)
439 pio->queue2 = queue;
441 queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO4_BASE);
442 if (!queue)
444 pio->queue3 = queue;
469 struct b43legacy_pioqueue *queue = dev->pio.queue1;
472 B43legacy_WARN_ON(queue->tx_suspended);
473 B43legacy_WARN_ON(list_empty(&queue->txfree));
475 packet = list_entry(queue->txfree.next, struct b43legacy_pio_txpacket,
479 list_move_tail(&packet->list, &queue->txqueue);
480 queue->nr_txfree--;
481 B43legacy_WARN_ON(queue->nr_txfree >= B43legacy_PIO_MAXTXPACKETS);
483 tasklet_schedule(&queue->txtask);
491 struct b43legacy_pioqueue *queue;
496 queue = parse_cookie(dev, status->cookie, &packet);
497 B43legacy_WARN_ON(!queue);
502 queue->tx_devq_packets--;
503 queue->tx_devq_used -= (packet->skb->len +
545 if (!list_empty(&queue->txqueue))
546 tasklet_schedule(&queue->txtask);
549 static void pio_rx_error(struct b43legacy_pioqueue *queue,
555 b43legacyerr(queue->dev->wl, "PIO RX error: %s\n", error);
556 b43legacy_pio_write(queue, B43legacy_PIO_RXCTL,
559 B43legacy_WARN_ON(queue->mmio_base != B43legacy_MMIO_PIO1_BASE);
562 b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
567 void b43legacy_pio_rx(struct b43legacy_pioqueue *queue)
578 tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL);
581 b43legacy_pio_write(queue, B43legacy_PIO_RXCTL,
585 tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL);
590 b43legacydbg(queue->dev->wl, "PIO RX timed out\n");
594 len = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
596 pio_rx_error(queue, 0, "len > 0x700");
599 if (unlikely(len == 0 && queue->mmio_base !=
601 pio_rx_error(queue, 0, "len == 0");
605 if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE)
610 tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
616 pio_rx_error(queue,
617 (queue->mmio_base == B43legacy_MMIO_PIO1_BASE),
621 if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE) {
626 b43legacy_handle_hwtxstatus(queue->dev, hw);
633 pio_rx_error(queue, 1, "OOM");
638 tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
642 tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
645 b43legacy_rx(queue->dev, skb, rxhdr);
648 void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue)
650 b43legacy_power_saving_ctl_bits(queue->dev, -1, 1);
651 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
652 b43legacy_pio_read(queue, B43legacy_PIO_TXCTL)
656 void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue)
658 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
659 b43legacy_pio_read(queue, B43legacy_PIO_TXCTL)
661 b43legacy_power_saving_ctl_bits(queue->dev, -1, -1);
662 tasklet_schedule(&queue->txtask);