1#ifndef SCM_BLK_H
2#define SCM_BLK_H
3
4#include <linux/interrupt.h>
5#include <linux/spinlock.h>
6#include <linux/blkdev.h>
7#include <linux/genhd.h>
8#include <linux/list.h>
9
10#include <asm/debug.h>
11#include <asm/eadm.h>
12
13#define SCM_NR_PARTS 8
14#define SCM_QUEUE_DELAY 5
15
16struct scm_blk_dev {
17	struct tasklet_struct tasklet;
18	struct request_queue *rq;
19	struct gendisk *gendisk;
20	struct scm_device *scmdev;
21	spinlock_t rq_lock;	/* guard the request queue */
22	spinlock_t lock;	/* guard the rest of the blockdev */
23	atomic_t queued_reqs;
24	enum {SCM_OPER, SCM_WR_PROHIBIT} state;
25	struct list_head finished_requests;
26#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
27	struct list_head cluster_list;
28#endif
29};
30
31struct scm_request {
32	struct scm_blk_dev *bdev;
33	struct request *request;
34	struct aidaw *aidaw;
35	struct aob *aob;
36	struct list_head list;
37	u8 retries;
38	int error;
39#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
40	struct {
41		enum {CLUSTER_NONE, CLUSTER_READ, CLUSTER_WRITE} state;
42		struct list_head list;
43		void **buf;
44	} cluster;
45#endif
46};
47
48#define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data)
49
50int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *);
51void scm_blk_dev_cleanup(struct scm_blk_dev *);
52void scm_blk_set_available(struct scm_blk_dev *);
53void scm_blk_irq(struct scm_device *, void *, int);
54
55void scm_request_finish(struct scm_request *);
56void scm_request_requeue(struct scm_request *);
57
58int scm_drv_init(void);
59void scm_drv_cleanup(void);
60
61#ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE
62void __scm_free_rq_cluster(struct scm_request *);
63int __scm_alloc_rq_cluster(struct scm_request *);
64void scm_request_cluster_init(struct scm_request *);
65bool scm_reserve_cluster(struct scm_request *);
66void scm_release_cluster(struct scm_request *);
67void scm_blk_dev_cluster_setup(struct scm_blk_dev *);
68bool scm_need_cluster_request(struct scm_request *);
69void scm_initiate_cluster_request(struct scm_request *);
70void scm_cluster_request_irq(struct scm_request *);
71bool scm_test_cluster_request(struct scm_request *);
72bool scm_cluster_size_valid(void);
73#else /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
74static inline void __scm_free_rq_cluster(struct scm_request *scmrq) {}
75static inline int __scm_alloc_rq_cluster(struct scm_request *scmrq)
76{
77	return 0;
78}
79static inline void scm_request_cluster_init(struct scm_request *scmrq) {}
80static inline bool scm_reserve_cluster(struct scm_request *scmrq)
81{
82	return true;
83}
84static inline void scm_release_cluster(struct scm_request *scmrq) {}
85static inline void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev) {}
86static inline bool scm_need_cluster_request(struct scm_request *scmrq)
87{
88	return false;
89}
90static inline void scm_initiate_cluster_request(struct scm_request *scmrq) {}
91static inline void scm_cluster_request_irq(struct scm_request *scmrq) {}
92static inline bool scm_test_cluster_request(struct scm_request *scmrq)
93{
94	return false;
95}
96static inline bool scm_cluster_size_valid(void)
97{
98	return true;
99}
100#endif /* CONFIG_SCM_BLOCK_CLUSTER_WRITE */
101
102extern debug_info_t *scm_debug;
103
104#define SCM_LOG(imp, txt) do {					\
105		debug_text_event(scm_debug, imp, txt);		\
106	} while (0)
107
108static inline void SCM_LOG_HEX(int level, void *data, int length)
109{
110	if (!debug_level_enabled(scm_debug, level))
111		return;
112	while (length > 0) {
113		debug_event(scm_debug, level, data, length);
114		length -= scm_debug->buf_size;
115		data += scm_debug->buf_size;
116	}
117}
118
119static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev)
120{
121	struct {
122		u64 address;
123		u8 oper_state;
124		u8 rank;
125	} __packed data = {
126		.address = scmdev->address,
127		.oper_state = scmdev->attrs.oper_state,
128		.rank = scmdev->attrs.rank,
129	};
130
131	SCM_LOG_HEX(level, &data, sizeof(data));
132}
133
134#endif /* SCM_BLK_H */
135