Searched defs:mq (Results 1 - 25 of 25) sorted by relevance

/drivers/misc/sgi-gru/
H A Dgrukservices.h45 void *mq; /* message queue vaddress */ member in struct:gru_message_queue_desc
46 unsigned long mq_gpa; /* global address of mq */
H A Dgrukservices.c146 #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
555 struct message_queue *mq = p; local
559 memset(mq, 0, bytes);
560 mq->start = &mq->data;
561 mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES;
562 mq->next = &mq
840 struct message_queue *mq = mqd->mq; local
872 struct message_queue *mq = mqd->mq; local
999 void *p, *mq; local
[all...]
/drivers/scsi/arm/
H A Dmsgqueue.c27 struct msgqueue_entry *mq; local
29 if ((mq = msgq->free) != NULL)
30 msgq->free = mq->next;
32 return mq;
36 * Function: void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq)
39 * mq - message queue entry to free
41 static void mqe_free(MsgQueue_t *msgq, struct msgqueue_entry *mq) argument
43 if (mq) {
44 mq->next = msgq->free;
45 msgq->free = mq;
85 struct msgqueue_entry *mq = msgq->qe; local
103 struct msgqueue_entry *mq; local
120 struct msgqueue_entry *mq = mqe_alloc(msgq); local
153 struct msgqueue_entry *mq, *mqnext; local
[all...]
/drivers/infiniband/hw/amso1100/
H A Dc2_ae.c149 struct c2_mq *mq = c2dev->qptr_array[mq_index]; local
164 wr = c2_mq_consume(mq);
326 c2_mq_free(mq);
H A Dc2_cq.c92 q = &cq->mq;
138 ce = c2_mq_consume(&cq->mq);
150 c2_mq_free(&cq->mq);
151 ce = c2_mq_consume(&cq->mq);
196 c2_mq_free(&cq->mq);
230 shared = cq->mq.peer;
250 ret = !c2_mq_empty(&cq->mq);
257 static void c2_free_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq) argument
259 dma_free_coherent(&c2dev->pcidev->dev, mq->q_size * mq
263 c2_alloc_cq_buf(struct c2_dev *c2dev, struct c2_mq *mq, size_t q_size, size_t msg_size) argument
[all...]
H A Dc2_provider.h100 struct c2_mq mq; member in struct:c2_cq
/drivers/input/joystick/
H A Dmaplecontrol.c26 static void dc_pad_callback(struct mapleq *mq) argument
29 struct maple_device *mapledev = mq->dev;
32 unsigned char *res = mq->recvbuf->buf;
/drivers/input/keyboard/
H A Dmaple_keyb.c139 static void dc_kbd_callback(struct mapleq *mq) argument
141 struct maple_device *mapledev = mq->dev;
143 unsigned long *buf = (unsigned long *)(mq->recvbuf->buf);
/drivers/input/mouse/
H A Dmaplemouse.c26 static void dc_mouse_callback(struct mapleq *mq) argument
29 struct maple_device *mapledev = mq->dev;
32 unsigned char *res = mq->recvbuf->buf;
/drivers/isdn/capi/
H A Dcapilib.c47 struct capilib_msgidqueue *mq; local
48 if ((mq = np->msgidfree) == NULL)
50 np->msgidfree = mq->next;
51 mq->msgid = msgid;
52 mq->next = NULL;
54 np->msgidlast->next = mq;
55 np->msgidlast = mq;
57 np->msgidqueue = mq;
67 struct capilib_msgidqueue *mq = *pp; local
68 *pp = mq
[all...]
/drivers/mmc/card/
H A Dqueue.c31 struct mmc_queue *mq = q->queuedata; local
41 if (mq && mmc_card_removed(mq->card))
51 struct mmc_queue *mq = d; local
52 struct request_queue *q = mq->queue;
56 down(&mq->thread_sem);
65 mq->mqrq_cur->req = req;
68 if (req || mq->mqrq_prev->req) {
71 mq->issue_fn(mq, re
115 struct mmc_queue *mq = q->queuedata; local
190 mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock, const char *subname) argument
321 mmc_cleanup_queue(struct mmc_queue *mq) argument
362 mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card) argument
394 mmc_packed_clean(struct mmc_queue *mq) argument
413 mmc_queue_suspend(struct mmc_queue *mq) argument
433 mmc_queue_resume(struct mmc_queue *mq) argument
449 mmc_queue_packed_map_sg(struct mmc_queue *mq, struct mmc_packed *packed, struct scatterlist *sg, enum mmc_packed_type cmd_type) argument
487 mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) argument
[all...]
H A Dblock.c1033 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) argument
1035 struct mmc_blk_data *md = mq->data;
1076 static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, argument
1079 struct mmc_blk_data *md = mq->data;
1143 static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) argument
1145 struct mmc_blk_data *md = mq->data;
1356 struct mmc_queue *mq)
1361 struct mmc_blk_data *md = mq->data;
1490 brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
1533 static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struc argument
1353 mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, int disable_multi, struct mmc_queue *mq) argument
1641 mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, struct mmc_queue *mq) argument
1802 mmc_blk_revert_packed_req(struct mmc_queue *mq, struct mmc_queue_req *mq_rq) argument
1826 mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) argument
2014 mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) argument
[all...]
/drivers/mtd/maps/
H A Dvmu-flash.c91 static void vmu_blockread(struct mapleq *mq) argument
96 mdev = mq->dev;
103 memcpy(card->blockread, mq->recvbuf->buf + 12,
193 list_del_init(&(mdev->mq->list));
194 kfree(mdev->mq->sendbuf);
195 mdev->mq->sendbuf = NULL;
285 kfree(mdev->mq->sendbuf);
286 mdev->mq->sendbuf = NULL;
287 list_del_init(&(mdev->mq->list));
501 static void vmu_queryblocks(struct mapleq *mq) argument
[all...]
/drivers/net/wireless/iwlwifi/dvm/
H A Dtx.c483 static int iwlagn_alloc_agg_txq(struct iwl_priv *priv, int mq) argument
490 priv->queue_to_mac80211[q] = mq;
H A Dmain.c1971 int mq = priv->queue_to_mac80211[queue]; local
1973 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
1976 if (atomic_inc_return(&priv->queue_stop_count[mq]) > 1) {
1979 queue, mq);
1983 set_bit(mq, &priv->transport_queue_stop);
1984 ieee80211_stop_queue(priv->hw, mq);
1990 int mq = priv->queue_to_mac80211[queue]; local
1992 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
1995 if (atomic_dec_return(&priv->queue_stop_count[mq]) > 0) {
1998 queue, mq);
2010 int mq; local
[all...]
/drivers/scsi/qla2xxx/
H A Dqla_dbg.c629 struct qla2xxx_mq_chain *mq = ptr; local
635 mq = ptr;
636 *last_chain = &mq->type;
637 mq->type = __constant_htonl(DUMP_CHAIN_MQ);
638 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
642 mq->count = htonl(que_cnt);
646 mq->qregs[que_idx] =
648 mq->qregs[que_idx+1] =
650 mq->qregs[que_idx+2] =
652 mq
[all...]
/drivers/sh/maple/
H A Dmaple.c122 void (*callback) (struct mapleq *mq),
140 struct mapleq *mq; local
143 mq = mdev->mq;
144 kmem_cache_free(maple_queue_cache, mq->recvbuf);
145 kfree(mq);
172 mdev->mq->command = command;
173 mdev->mq->length = length;
176 mdev->mq->sendbuf = sendbuf;
179 list_add_tail(&mdev->mq
121 maple_getcond_callback(struct maple_device *dev, void (*callback) (struct mapleq *mq), unsigned long interval, unsigned long function) argument
188 struct mapleq *mq; local
247 maple_build_block(struct mapleq *mq) argument
273 struct mapleq *mq, *nmq; local
633 struct mapleq *mq, *nmq; local
[all...]
/drivers/mailbox/
H A Domap-mailbox.c202 struct omap_mbox_queue *mq = mbox->txq; local
205 spin_lock_bh(&mq->lock);
207 if (kfifo_avail(&mq->fifo) < sizeof(msg)) {
212 if (kfifo_is_empty(&mq->fifo) && !mbox_fifo_full(mbox)) {
217 len = kfifo_in(&mq->fifo, (unsigned char *)&msg, sizeof(msg));
223 spin_unlock_bh(&mq->lock);
299 struct omap_mbox_queue *mq = mbox->txq; local
303 while (kfifo_len(&mq->fifo)) {
309 ret = kfifo_out(&mq->fifo, (unsigned char *)&msg,
322 struct omap_mbox_queue *mq local
354 struct omap_mbox_queue *mq = mbox->rxq; local
394 struct omap_mbox_queue *mq; local
425 struct omap_mbox_queue *mq; local
[all...]
/drivers/misc/sgi-xp/
H A Dxpc_uv.c109 xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name) argument
111 int mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
114 mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset,
116 if (mq->irq < 0)
117 return mq->irq;
119 mq->mmr_value = uv_read_global_mmr64(mmr_pnode, mq->mmr_offset);
123 mq
139 xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq) argument
158 xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq) argument
189 xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq) argument
215 struct xpc_gru_mq_uv *mq; local
308 xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq) argument
[all...]
/drivers/net/wireless/iwlwifi/mvm/
H A Dops.c710 int mq = mvm->queue_to_mac80211[queue]; local
712 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
715 if (atomic_inc_return(&mvm->mac80211_queue_stop_count[mq]) > 1) {
718 queue, mq);
722 ieee80211_stop_queue(mvm->hw, mq);
728 int mq = mvm->queue_to_mac80211[queue]; local
730 if (WARN_ON_ONCE(mq == IWL_INVALID_MAC80211_QUEUE))
733 if (atomic_dec_return(&mvm->mac80211_queue_stop_count[mq]) > 0) {
736 queue, mq);
740 ieee80211_wake_queue(mvm->hw, mq);
[all...]
/drivers/infiniband/hw/ocrdma/
H A Docrdma.h228 struct ocrdma_mq mq; member in struct:ocrdma_dev
H A Docrdma_hw.c107 (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
116 dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1);
121 return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
126 dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1);
131 return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
305 val |= dev->mq
545 ocrdma_mbx_create_mq(struct ocrdma_dev *dev, struct ocrdma_queue_info *mq, struct ocrdma_queue_info *cq) argument
[all...]
/drivers/md/
H A Ddm-cache-policy-mq.c16 #define DM_MSG_PREFIX "cache-policy-mq"
416 static void hash_insert(struct mq_policy *mq, struct entry *e) argument
418 unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits);
420 hlist_add_head(&e->hlist, mq->table + h);
423 static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock) argument
425 unsigned h = hash_64(from_oblock(oblock), mq->hash_bits);
426 struct hlist_head *bucket = mq->table + h;
446 static bool any_free_cblocks(struct mq_policy *mq) argument
448 return !epool_empty(&mq->cache_pool);
451 static bool any_clean_cblocks(struct mq_policy *mq) argument
472 in_cache(struct mq_policy *mq, struct entry *e) argument
482 push(struct mq_policy *mq, struct entry *e) argument
497 del(struct mq_policy *mq, struct entry *e) argument
507 pop(struct mq_policy *mq, struct queue *q) argument
524 updated_this_tick(struct mq_policy *mq, struct entry *e) argument
544 check_generation(struct mq_policy *mq) argument
584 requeue_and_update_tick(struct mq_policy *mq, struct entry *e) argument
617 demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock) argument
652 adjusted_promote_threshold(struct mq_policy *mq, bool discarded_oblock, int data_dir) argument
669 should_promote(struct mq_policy *mq, struct entry *e, bool discarded_oblock, int data_dir) argument
676 cache_entry_found(struct mq_policy *mq, struct entry *e, struct policy_result *result) argument
694 pre_cache_to_cache(struct mq_policy *mq, struct entry *e, struct policy_result *result) argument
729 pre_cache_entry_found(struct mq_policy *mq, struct entry *e, bool can_migrate, bool discarded_oblock, int data_dir, struct policy_result *result) argument
752 insert_in_pre_cache(struct mq_policy *mq, dm_oblock_t oblock) argument
776 insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock, struct policy_result *result) argument
811 no_entry_found(struct mq_policy *mq, dm_oblock_t oblock, bool can_migrate, bool discarded_oblock, int data_dir, struct policy_result *result) argument
832 map(struct mq_policy *mq, dm_oblock_t oblock, bool can_migrate, bool discarded_oblock, int data_dir, struct policy_result *result) argument
873 struct mq_policy *mq = to_mq_policy(p); local
881 copy_tick(struct mq_policy *mq) argument
895 struct mq_policy *mq = to_mq_policy(p); local
918 struct mq_policy *mq = to_mq_policy(p); local
936 __mq_set_clear_dirty(struct mq_policy *mq, dm_oblock_t oblock, bool set) argument
950 struct mq_policy *mq = to_mq_policy(p); local
959 struct mq_policy *mq = to_mq_policy(p); local
970 struct mq_policy *mq = to_mq_policy(p); local
983 mq_save_hints(struct mq_policy *mq, struct queue *q, policy_walk_fn fn, void *context) argument
1004 struct mq_policy *mq = to_mq_policy(p); local
1018 __remove_mapping(struct mq_policy *mq, dm_oblock_t oblock) argument
1031 struct mq_policy *mq = to_mq_policy(p); local
1038 __remove_cblock(struct mq_policy *mq, dm_cblock_t cblock) argument
1054 struct mq_policy *mq = to_mq_policy(p); local
1063 __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock, dm_cblock_t *cblock) argument
1083 struct mq_policy *mq = to_mq_policy(p); local
1092 __force_mapping(struct mq_policy *mq, dm_oblock_t current_oblock, dm_oblock_t new_oblock) argument
1108 struct mq_policy *mq = to_mq_policy(p); local
1118 struct mq_policy *mq = to_mq_policy(p); local
1129 struct mq_policy *mq = to_mq_policy(p); local
1140 struct mq_policy *mq = to_mq_policy(p); local
1170 struct mq_policy *mq = to_mq_policy(p); local
1187 init_policy_functions(struct mq_policy *mq) argument
1210 struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL); local
[all...]
/drivers/scsi/
H A Dscsi_lib.c24 #include <linux/blk-mq.h>
594 static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) argument
596 if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS)
598 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
602 gfp_t gfp_mask, bool mq)
609 if (mq) {
621 scsi_free_sgtable(sdb, mq);
601 scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, gfp_t gfp_mask, bool mq) argument
/drivers/scsi/lpfc/
H A Dlpfc_sli.c13161 * @mq: The queue structure to use to create the mailbox queue.
13173 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq, argument
13187 mq->page_count);
13191 switch (mq->entry_count) {
13209 list_for_each_entry(dmabuf, &mq->page_list, list) {
13220 * @mq: The queue structure to use to create the mailbox queue.
13224 * This function creates a mailbox queue, as detailed in @mq, on a port,
13239 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq, argument
13252 if (!mq || !cq)
13269 &mq_create_ext->u.request, mq
14005 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq) argument
[all...]

Completed in 1126 milliseconds