Searched refs:queue (Results 76 - 100 of 742) sorted by relevance

1234567891011>>

/drivers/watchdog/
H A Dcpu5wdt.c65 int queue; member in struct:__anon7257
85 if (cpu5wdt_device.queue && ticks)
109 if (!cpu5wdt_device.queue) {
110 cpu5wdt_device.queue = 1;
226 cpu5wdt_device.queue = 0;
265 if (cpu5wdt_device.queue) {
266 cpu5wdt_device.queue = 0;
H A Drdc321x_wdt.c60 int queue; member in struct:__anon7287
88 if (rdc321x_wdt_device.queue && ticks)
107 if (!rdc321x_wdt_device.queue) {
108 rdc321x_wdt_device.queue = 1;
261 rdc321x_wdt_device.queue = 0;
276 if (rdc321x_wdt_device.queue) {
277 rdc321x_wdt_device.queue = 0;
/drivers/ptp/
H A Dptp_chardev.c274 struct timestamp_event_queue *queue = &ptp->tsevq; local
292 ptp->defunct || queue_cnt(queue))) {
308 spin_lock_irqsave(&queue->lock, flags);
310 qcnt = queue_cnt(queue);
316 event[i] = queue->buf[queue->head];
317 queue->head = (queue->head + 1) % PTP_MAX_TIMESTAMPS;
320 spin_unlock_irqrestore(&queue->lock, flags);
/drivers/misc/vmw_vmci/
H A Dvmci_queue_pair.c69 * In more detail. When a VMCI queue pair is first created, it will be in the
78 * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
83 * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
84 * is created by a VMX using the queue pair device backend that
85 * sets the UVAs of the queue pair immediately and stores the
89 * Once the queue pair is in one of the created states (with the exception of
91 * queue pair. Again we have two new states possible:
96 * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
97 * pair, and attaches to a queue pair previously created by the host side.
99 * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pai
272 struct vmci_queue *queue = q; local
296 struct vmci_queue *queue; local
342 __qp_memcpy_to_queue(struct vmci_queue *queue, u64 queue_offset, const void *src, size_t size, bool is_iovec) argument
402 __qp_memcpy_from_queue(void *dest, const struct vmci_queue *queue, u64 queue_offset, size_t size, bool is_iovec) argument
560 qp_memcpy_to_queue(struct vmci_queue *queue, u64 queue_offset, const void *src, size_t src_offset, size_t size) argument
568 qp_memcpy_from_queue(void *dest, size_t dest_offset, const struct vmci_queue *queue, u64 queue_offset, size_t size) argument
580 qp_memcpy_to_queue_iov(struct vmci_queue *queue, u64 queue_offset, const void *src, size_t src_offset, size_t size) argument
596 qp_memcpy_from_queue_iov(void *dest, size_t dest_offset, const struct vmci_queue *queue, u64 queue_offset, size_t size) argument
616 struct vmci_queue *queue; local
643 qp_host_free_queue(struct vmci_queue *queue, u64 queue_size) argument
687 qp_acquire_queue_mutex(struct vmci_queue *queue) argument
698 qp_release_queue_mutex(struct vmci_queue *queue) argument
[all...]
/drivers/net/ethernet/ibm/ehea/
H A Dehea_qmr.c39 static void *hw_qpageit_get_inc(struct hw_queue *queue) argument
41 void *retvalue = hw_qeit_get(queue);
43 queue->current_q_offset += queue->pagesize;
44 if (queue->current_q_offset > queue->queue_length) {
45 queue->current_q_offset -= queue->pagesize;
54 static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages, argument
66 queue
104 hw_queue_dtor(struct hw_queue *queue) argument
[all...]
/drivers/usb/musb/
H A Dmusb_gadget.h134 struct list_head *queue = &ep->req_list; local
136 if (list_empty(queue))
138 return container_of(queue->next, struct musb_request, list);
/drivers/usb/gadget/udc/
H A Ds3c-hsudc.c105 * @queue: Transfer request queue for the endpoint.
114 struct list_head queue; member in struct:s3c_hsudc_ep
124 * @queue: Used for inserting this request to the endpoint request queue.
128 struct list_head queue; member in struct:s3c_hsudc_req
251 list_del_init(&hsreq->queue);
275 while (!list_empty(&hsep->queue)) {
276 hsreq = list_entry(hsep->queue.next,
277 struct s3c_hsudc_req, queue);
[all...]
H A Dgoku_udc.c276 INIT_LIST_HEAD(&req->queue);
289 WARN_ON(!list_empty(&req->queue));
301 list_del_init(&req->queue);
320 /* don't modify queue heads during completion callback */
487 if (dbuff && !list_empty(&ep->queue)) {
488 req = list_entry(ep->queue.next,
489 struct goku_request, queue);
521 if (unlikely(list_empty (&ep->queue)))
523 req = list_entry(ep->queue.next, struct goku_request, queue);
[all...]
H A Domap_udc.c276 INIT_LIST_HEAD(&req->queue);
297 list_del_init(&req->queue);
315 /* don't modify queue heads during completion callback */
419 /* return: 0 = still running, 1 = queue empty, negative = errno */
648 if (!list_empty(&ep->queue)) {
649 req = container_of(ep->queue.next,
650 struct omap_req, queue);
655 if (!list_empty(&ep->queue)) {
656 req = container_of(ep->queue.next,
657 struct omap_req, queue);
[all...]
/drivers/block/zram/
H A Dzram_drv.h103 struct request_queue *queue; member in struct:zram
/drivers/infiniband/hw/cxgb4/
H A Dt4.h148 #define T4_ERR_DDP_QUEUE_NUM 0x17 /* invalid ddp queue number */
287 union t4_wr *queue; member in struct:t4_sq
313 union t4_recv_wr *queue; member in struct:t4_rq
380 return wq->rq.queue[wq->rq.size].status.host_wq_pidx;
430 return wq->sq.queue[wq->sq.size].status.host_wq_pidx;
458 /* Flush host queue memory writes. */
482 /* Flush host queue memory writes. */
504 return wq->rq.queue[wq->rq.size].status.qp_err;
509 wq->rq.queue[wq->rq.size].status.qp_err = 1;
514 wq->rq.queue[w
532 struct t4_cqe *queue; member in struct:t4_cq
[all...]
/drivers/net/fddi/skfp/h/
H A Dhwmtm.h111 SMbuf *mb_free ; /* free queue */
150 struct s_smt_tx_queue *tx_p ; /* pointer to the transmit queue */
230 * txd *HWM_GET_TX_USED(smc,queue)
234 * number of used TxDs for the queue, specified by the index.
236 * para queue the number of the send queue: Can be specified by
239 * return number of used TxDs for this send queue
243 #define HWM_GET_TX_USED(smc,queue) (int) (smc)->hw.fp.tx_q[queue].tx_used
247 * txd *HWM_GET_CURR_TXD(smc,queue)
[all...]
/drivers/net/wireless/iwlwifi/
H A Diwl-trans.h149 * 0:7 tfd index - position within TX queue
150 * 8:12 TX queue id
198 * command queue, but after other high priority commands. valid only
368 * @cmd_queue: the index of the command queue.
380 * @scd_set_active: should the transport configure the SCD for HCMD queue
450 * @txq_enable: setup a queue. To setup an AC queue, use the
452 * this one. The op_mode must not configure the HCMD queue. The scheduler
455 * @txq_disable: de-configure a Tx queue to send AMPDUs
505 struct iwl_device_cmd *dev_cmd, int queue);
765 iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int queue) argument
777 iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn, struct sk_buff_head *skbs) argument
786 iwl_trans_txq_disable(struct iwl_trans *trans, int queue, bool configure_scd) argument
793 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, const struct iwl_trans_txq_scd_cfg *cfg) argument
804 iwl_trans_txq_enable(struct iwl_trans *trans, int queue, int fifo, int sta_id, int tid, int frame_limit, u16 ssn) argument
819 iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo) argument
[all...]
/drivers/net/wireless/prism54/
H A Disl_38xx.c57 /* data tx queue not empty */
61 /* management tx queue not empty */
66 /* data rx queue not empty */
70 /* management rx queue not empty */
102 /* either data or management transmit queue has a frame pending
223 isl38xx_in_queue(isl38xx_control_block *cb, int queue) argument
225 const s32 delta = (le32_to_cpu(cb->driver_curr_frag[queue]) -
226 le32_to_cpu(cb->device_curr_frag[queue]));
228 /* determine the amount of fragments in the queue depending on the type
229 * of the queue, eithe
[all...]
/drivers/scsi/aic7xxx/aicasm/
H A Daicasm.h46 #include "../queue.h"
48 #include <sys/queue.h>
/drivers/net/ethernet/freescale/
H A Ducc_geth_ethtool.c9 * Can only get/set settings of the first queue.
219 int queue = 0; local
226 ring->rx_pending = ug_info->bdRingLenRx[queue];
227 ring->rx_mini_pending = ug_info->bdRingLenRx[queue];
228 ring->rx_jumbo_pending = ug_info->bdRingLenRx[queue];
229 ring->tx_pending = ug_info->bdRingLenTx[queue];
238 int queue = 0, ret = 0; local
256 ug_info->bdRingLenRx[queue] = ring->rx_pending;
257 ug_info->bdRingLenTx[queue] = ring->tx_pending;
/drivers/staging/rtl8712/
H A Dosdep_service.h46 struct list_head queue; member in struct:__queue
56 INIT_LIST_HEAD(&((pqueue)->queue)); \
/drivers/ide/
H A Dide-pm.c21 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
29 ret = blk_execute_rq(drive->queue, NULL, rq, 0);
61 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
68 err = blk_execute_rq(drive->queue, NULL, rq, 1);
179 * This function cleans up the current PM request and stops the queue
184 struct request_queue *q = drive->queue;
229 struct request_queue *q = drive->queue;
/drivers/media/pci/cx23885/
H A Dcx23885-vbi.c214 list_add_tail(&buf->queue, &q->active);
222 queue);
224 list_add_tail(&buf->queue, &q->active);
237 struct cx23885_buffer, queue);
253 struct cx23885_buffer, queue);
255 list_del(&buf->queue);
/drivers/media/platform/omap3isp/
H A Dispvideo.c320 * Video queue operations
323 static int isp_video_queue_setup(struct vb2_queue *queue, argument
328 struct isp_video_fh *vfh = vb2_get_drv_priv(queue);
354 * queue handler, which can't return an error, this check is just a best
374 * isp_video_buffer_queue - Add buffer to streaming queue
414 video->ops->queue(video, buffer);
438 * Remove the current video buffer from the DMA queue and fill its timestamp and
445 * The DMA queue is expected to contain at least one buffer.
447 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue i
1233 struct vb2_queue *queue; local
[all...]
/drivers/md/
H A Draid0.c198 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
266 mddev->queue->backing_dev_info.congested_fn = raid0_congested;
267 mddev->queue->backing_dev_info.congested_data = mddev;
273 if ((mddev->chunk_sectors << 9) % queue_logical_block_size(mddev->queue)) {
280 blk_queue_io_min(mddev->queue, mddev->chunk_sectors << 9);
281 blk_queue_io_opt(mddev->queue,
285 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
287 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mddev->queue);
359 * @q: request queue
439 blk_queue_max_hw_sectors(mddev->queue, mdde
[all...]
/drivers/media/platform/vsp1/
H A Dvsp1_video.c566 * Return the next queued buffer or NULL if the queue is empty.
585 struct vsp1_video_buffer, queue);
593 list_del(&done->queue);
597 struct vsp1_video_buffer, queue);
622 video->ops->queue(video, buf);
773 list_add_tail(&buf->queue, &video->irqqueue);
781 video->ops->queue(video, buf);
784 if (vb2_is_streaming(&video->queue) &&
875 /* Remove all buffers from the IRQ queue. */
877 list_for_each_entry(buffer, &video->irqqueue, queue)
[all...]
/drivers/s390/net/
H A Dqeth_core_main.c64 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
68 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
394 dev_err(&card->gdev->dev, "Failed to create completion queue\n");
512 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue) argument
516 queue != 0 &&
517 queue == card->qdio.no_in_queues - 1;
1240 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue, argument
1248 atomic_dec(&queue->set_pci_flags_count);
1253 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(queue->card); ++i) {
1360 /* CHPP field bit 6 == 1 -> single queue */
3246 struct qeth_qdio_q *queue = card->qdio.in_q; local
3351 qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) argument
3371 qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) argument
3407 qeth_flush_buffers_on_no_pci(struct qeth_qdio_out_q *queue) argument
3423 qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index, int count) argument
3505 qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) argument
3545 qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue, unsigned long card_ptr) argument
3585 qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err, unsigned int queue, int first_element, int count) argument
3654 qeth_qdio_input_handler(struct ccw_device *ccwdev, unsigned int qdio_err, unsigned int queue, int first_elem, int count, unsigned long card_ptr) argument
3677 struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue]; local
3936 qeth_fill_buffer(struct qeth_qdio_out_q *queue, struct qeth_qdio_out_buffer *buf, struct sk_buff *skb, struct qeth_hdr *hdr, int offset, int hd_len) argument
3998 qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, int elements_needed, int offset, int hd_len) argument
4030 qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, struct sk_buff *skb, struct qeth_hdr *hdr, int elements_needed) argument
[all...]
/drivers/message/i2o/
H A Di2o_block.c26 * Added a queue depth.
29 * Removed queue walk, fixed for 64bitness.
34 * Heavily chop down the queue depths
88 * Frees the request queue, gendisk and the i2o_block_device structure.
92 blk_cleanup_queue(dev->gd->queue);
289 INIT_LIST_HEAD(&ireq->queue);
356 * @q: request queue for the request
393 * i2o_block_delayed_request_fn - delayed request queue function
394 * @work: the delayed request with the queue to start
396 * If the request queue i
963 struct request_queue *queue; local
1030 struct request_queue *queue; local
[all...]
/drivers/media/pci/cx88/
H A Dcx88-vbi.c105 buf = list_entry(q->active.next, struct cx88_buffer, vb.queue);
109 list_for_each_entry(buf, &q->active, vb.queue)
130 buf = list_entry(q->active.next, struct cx88_buffer, vb.queue);
131 list_del(&buf->vb.queue);
207 list_add_tail(&buf->vb.queue,&q->active);
216 prev = list_entry(q->active.prev, struct cx88_buffer, vb.queue);
217 list_add_tail(&buf->vb.queue,&q->active);

Completed in 1074 milliseconds

1234567891011>>