blk-mq.h revision feb71dae1f9e0aeb056f7f639a21e620d327fc66
1#ifndef BLK_MQ_H
2#define BLK_MQ_H
3
4#include <linux/blkdev.h>
5
6struct blk_mq_tags;
7
8struct blk_mq_cpu_notifier {
9	struct list_head list;
10	void *data;
11	void (*notify)(void *data, unsigned long action, unsigned int cpu);
12};
13
14struct blk_mq_hw_ctx {
15	struct {
16		spinlock_t		lock;
17		struct list_head	dispatch;
18	} ____cacheline_aligned_in_smp;
19
20	unsigned long		state;		/* BLK_MQ_S_* flags */
21	struct delayed_work	delayed_work;
22
23	unsigned long		flags;		/* BLK_MQ_F_* flags */
24
25	struct request_queue	*queue;
26	unsigned int		queue_num;
27
28	void			*driver_data;
29
30	unsigned int		nr_ctx;
31	struct blk_mq_ctx	**ctxs;
32	unsigned int 		nr_ctx_map;
33	unsigned long		*ctx_map;
34
35	struct request		**rqs;
36	struct list_head	page_list;
37	struct blk_mq_tags	*tags;
38
39	unsigned long		queued;
40	unsigned long		run;
41#define BLK_MQ_MAX_DISPATCH_ORDER	10
42	unsigned long		dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
43
44	unsigned int		queue_depth;
45	unsigned int		numa_node;
46	unsigned int		cmd_size;	/* per-request extra data */
47
48	struct blk_mq_cpu_notifier	cpu_notifier;
49	struct kobject		kobj;
50};
51
52struct blk_mq_reg {
53	struct blk_mq_ops	*ops;
54	unsigned int		nr_hw_queues;
55	unsigned int		queue_depth;
56	unsigned int		reserved_tags;
57	unsigned int		cmd_size;	/* per-request extra data */
58	int			numa_node;
59	unsigned int		timeout;
60	unsigned int		flags;		/* BLK_MQ_F_* */
61};
62
63typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
64typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
65typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int);
66typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
67typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
68typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
69
70struct blk_mq_ops {
71	/*
72	 * Queue request
73	 */
74	queue_rq_fn		*queue_rq;
75
76	/*
77	 * Map to specific hardware queue
78	 */
79	map_queue_fn		*map_queue;
80
81	/*
82	 * Called on request timeout
83	 */
84	rq_timed_out_fn		*timeout;
85
86	softirq_done_fn		*complete;
87
88	/*
89	 * Override for hctx allocations (should probably go)
90	 */
91	alloc_hctx_fn		*alloc_hctx;
92	free_hctx_fn		*free_hctx;
93
94	/*
95	 * Called when the block layer side of a hardware queue has been
96	 * set up, allowing the driver to allocate/init matching structures.
97	 * Ditto for exit/teardown.
98	 */
99	init_hctx_fn		*init_hctx;
100	exit_hctx_fn		*exit_hctx;
101};
102
103enum {
104	BLK_MQ_RQ_QUEUE_OK	= 0,	/* queued fine */
105	BLK_MQ_RQ_QUEUE_BUSY	= 1,	/* requeue IO for later */
106	BLK_MQ_RQ_QUEUE_ERROR	= 2,	/* end IO with error */
107
108	BLK_MQ_F_SHOULD_MERGE	= 1 << 0,
109	BLK_MQ_F_SHOULD_SORT	= 1 << 1,
110	BLK_MQ_F_SHOULD_IPI	= 1 << 2,
111
112	BLK_MQ_S_STOPPED	= 1 << 0,
113
114	BLK_MQ_MAX_DEPTH	= 2048,
115};
116
117struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *);
118int blk_mq_register_disk(struct gendisk *);
119void blk_mq_unregister_disk(struct gendisk *);
120void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data);
121
122void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
123
124void blk_mq_insert_request(struct request *, bool, bool, bool);
125void blk_mq_run_queues(struct request_queue *q, bool async);
126void blk_mq_free_request(struct request *rq);
127bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
128struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
129struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
130struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
131
132struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
133struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int);
134void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
135
136void blk_mq_end_io(struct request *rq, int error);
137
138void blk_mq_complete_request(struct request *rq);
139
140void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
141void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
142void blk_mq_stop_hw_queues(struct request_queue *q);
143void blk_mq_start_stopped_hw_queues(struct request_queue *q);
144
145/*
146 * Driver command data is immediately after the request. So subtract request
147 * size to get back to the original request.
148 */
149static inline struct request *blk_mq_rq_from_pdu(void *pdu)
150{
151	return pdu - sizeof(struct request);
152}
153static inline void *blk_mq_rq_to_pdu(struct request *rq)
154{
155	return (void *) rq + sizeof(*rq);
156}
157
158static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx,
159					       unsigned int tag)
160{
161	return hctx->rqs[tag];
162}
163
164#define queue_for_each_hw_ctx(q, hctx, i)				\
165	for ((i) = 0; (i) < (q)->nr_hw_queues &&			\
166	     ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
167
168#define queue_for_each_ctx(q, ctx, i)					\
169	for ((i) = 0; (i) < (q)->nr_queues &&				\
170	     ({ ctx = per_cpu_ptr((q)->queue_ctx, (i)); 1; }); (i)++)
171
172#define hctx_for_each_ctx(hctx, ctx, i)					\
173	for ((i) = 0; (i) < (hctx)->nr_ctx &&				\
174	     ({ ctx = (hctx)->ctxs[(i)]; 1; }); (i)++)
175
176#define blk_ctx_sum(q, sum)						\
177({									\
178	struct blk_mq_ctx *__x;						\
179	unsigned int __ret = 0, __i;					\
180									\
181	queue_for_each_ctx((q), __x, __i)				\
182		__ret += sum;						\
183	__ret;								\
184})
185
186#endif
187