1#ifndef _FIREWIRE_CORE_H
2#define _FIREWIRE_CORE_H
3
4#include <linux/compiler.h>
5#include <linux/device.h>
6#include <linux/dma-mapping.h>
7#include <linux/fs.h>
8#include <linux/list.h>
9#include <linux/idr.h>
10#include <linux/mm_types.h>
11#include <linux/rwsem.h>
12#include <linux/slab.h>
13#include <linux/types.h>
14
15#include <linux/atomic.h>
16
17struct device;
18struct fw_card;
19struct fw_device;
20struct fw_iso_buffer;
21struct fw_iso_context;
22struct fw_iso_packet;
23struct fw_node;
24struct fw_packet;
25
26
27/* -card */
28
29extern __printf(2, 3)
30void fw_err(const struct fw_card *card, const char *fmt, ...);
31extern __printf(2, 3)
32void fw_notice(const struct fw_card *card, const char *fmt, ...);
33
34/* bitfields within the PHY registers */
35#define PHY_LINK_ACTIVE		0x80
36#define PHY_CONTENDER		0x40
37#define PHY_BUS_RESET		0x40
38#define PHY_EXTENDED_REGISTERS	0xe0
39#define PHY_BUS_SHORT_RESET	0x40
40#define PHY_INT_STATUS_BITS	0x3c
41#define PHY_ENABLE_ACCEL	0x02
42#define PHY_ENABLE_MULTI	0x01
43#define PHY_PAGE_SELECT		0xe0
44
45#define BANDWIDTH_AVAILABLE_INITIAL	4915
46#define BROADCAST_CHANNEL_INITIAL	(1 << 31 | 31)
47#define BROADCAST_CHANNEL_VALID		(1 << 30)
48
49#define CSR_STATE_BIT_CMSTR	(1 << 8)
50#define CSR_STATE_BIT_ABDICATE	(1 << 10)
51
52struct fw_card_driver {
53	/*
54	 * Enable the given card with the given initial config rom.
55	 * This function is expected to activate the card, and either
56	 * enable the PHY or set the link_on bit and initiate a bus
57	 * reset.
58	 */
59	int (*enable)(struct fw_card *card,
60		      const __be32 *config_rom, size_t length);
61
62	int (*read_phy_reg)(struct fw_card *card, int address);
63	int (*update_phy_reg)(struct fw_card *card, int address,
64			      int clear_bits, int set_bits);
65
66	/*
67	 * Update the config rom for an enabled card.  This function
68	 * should change the config rom that is presented on the bus
69	 * and initiate a bus reset.
70	 */
71	int (*set_config_rom)(struct fw_card *card,
72			      const __be32 *config_rom, size_t length);
73
74	void (*send_request)(struct fw_card *card, struct fw_packet *packet);
75	void (*send_response)(struct fw_card *card, struct fw_packet *packet);
76	/* Calling cancel is valid once a packet has been submitted. */
77	int (*cancel_packet)(struct fw_card *card, struct fw_packet *packet);
78
79	/*
80	 * Allow the specified node ID to do direct DMA out and in of
81	 * host memory.  The card will disable this for all node when
82	 * a bus reset happens, so driver need to reenable this after
83	 * bus reset.  Returns 0 on success, -ENODEV if the card
84	 * doesn't support this, -ESTALE if the generation doesn't
85	 * match.
86	 */
87	int (*enable_phys_dma)(struct fw_card *card,
88			       int node_id, int generation);
89
90	u32 (*read_csr)(struct fw_card *card, int csr_offset);
91	void (*write_csr)(struct fw_card *card, int csr_offset, u32 value);
92
93	struct fw_iso_context *
94	(*allocate_iso_context)(struct fw_card *card,
95				int type, int channel, size_t header_size);
96	void (*free_iso_context)(struct fw_iso_context *ctx);
97
98	int (*start_iso)(struct fw_iso_context *ctx,
99			 s32 cycle, u32 sync, u32 tags);
100
101	int (*set_iso_channels)(struct fw_iso_context *ctx, u64 *channels);
102
103	int (*queue_iso)(struct fw_iso_context *ctx,
104			 struct fw_iso_packet *packet,
105			 struct fw_iso_buffer *buffer,
106			 unsigned long payload);
107
108	void (*flush_queue_iso)(struct fw_iso_context *ctx);
109
110	int (*flush_iso_completions)(struct fw_iso_context *ctx);
111
112	int (*stop_iso)(struct fw_iso_context *ctx);
113};
114
115void fw_card_initialize(struct fw_card *card,
116		const struct fw_card_driver *driver, struct device *device);
117int fw_card_add(struct fw_card *card,
118		u32 max_receive, u32 link_speed, u64 guid);
119void fw_core_remove_card(struct fw_card *card);
120int fw_compute_block_crc(__be32 *block);
121void fw_schedule_bm_work(struct fw_card *card, unsigned long delay);
122
123/* -cdev */
124
125extern const struct file_operations fw_device_ops;
126
127void fw_device_cdev_update(struct fw_device *device);
128void fw_device_cdev_remove(struct fw_device *device);
129void fw_cdev_handle_phy_packet(struct fw_card *card, struct fw_packet *p);
130
131
132/* -device */
133
134extern struct rw_semaphore fw_device_rwsem;
135extern struct idr fw_device_idr;
136extern int fw_cdev_major;
137
138static inline struct fw_device *fw_device_get(struct fw_device *device)
139{
140	get_device(&device->device);
141
142	return device;
143}
144
145static inline void fw_device_put(struct fw_device *device)
146{
147	put_device(&device->device);
148}
149
150struct fw_device *fw_device_get_by_devt(dev_t devt);
151int fw_device_set_broadcast_channel(struct device *dev, void *gen);
152void fw_node_event(struct fw_card *card, struct fw_node *node, int event);
153
154
155/* -iso */
156
157int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count);
158int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card,
159			  enum dma_data_direction direction);
160int fw_iso_buffer_map_vma(struct fw_iso_buffer *buffer,
161			  struct vm_area_struct *vma);
162
163
164/* -topology */
165
166enum {
167	FW_NODE_CREATED,
168	FW_NODE_UPDATED,
169	FW_NODE_DESTROYED,
170	FW_NODE_LINK_ON,
171	FW_NODE_LINK_OFF,
172	FW_NODE_INITIATED_RESET,
173};
174
175struct fw_node {
176	u16 node_id;
177	u8 color;
178	u8 port_count;
179	u8 link_on:1;
180	u8 initiated_reset:1;
181	u8 b_path:1;
182	u8 phy_speed:2;	/* As in the self ID packet. */
183	u8 max_speed:2;	/* Minimum of all phy-speeds on the path from the
184			 * local node to this node. */
185	u8 max_depth:4;	/* Maximum depth to any leaf node */
186	u8 max_hops:4;	/* Max hops in this sub tree */
187	atomic_t ref_count;
188
189	/* For serializing node topology into a list. */
190	struct list_head link;
191
192	/* Upper layer specific data. */
193	void *data;
194
195	struct fw_node *ports[0];
196};
197
198static inline struct fw_node *fw_node_get(struct fw_node *node)
199{
200	atomic_inc(&node->ref_count);
201
202	return node;
203}
204
205static inline void fw_node_put(struct fw_node *node)
206{
207	if (atomic_dec_and_test(&node->ref_count))
208		kfree(node);
209}
210
211void fw_core_handle_bus_reset(struct fw_card *card, int node_id,
212	int generation, int self_id_count, u32 *self_ids, bool bm_abdicate);
213void fw_destroy_nodes(struct fw_card *card);
214
215/*
216 * Check whether new_generation is the immediate successor of old_generation.
217 * Take counter roll-over at 255 (as per OHCI) into account.
218 */
219static inline bool is_next_generation(int new_generation, int old_generation)
220{
221	return (new_generation & 0xff) == ((old_generation + 1) & 0xff);
222}
223
224
225/* -transaction */
226
227#define TCODE_LINK_INTERNAL		0xe
228
229#define TCODE_IS_READ_REQUEST(tcode)	(((tcode) & ~1) == 4)
230#define TCODE_IS_BLOCK_PACKET(tcode)	(((tcode) &  1) != 0)
231#define TCODE_IS_LINK_INTERNAL(tcode)	((tcode) == TCODE_LINK_INTERNAL)
232#define TCODE_IS_REQUEST(tcode)		(((tcode) &  2) == 0)
233#define TCODE_IS_RESPONSE(tcode)	(((tcode) &  2) != 0)
234#define TCODE_HAS_REQUEST_DATA(tcode)	(((tcode) & 12) != 4)
235#define TCODE_HAS_RESPONSE_DATA(tcode)	(((tcode) & 12) != 0)
236
237#define LOCAL_BUS 0xffc0
238
239/* OHCI-1394's default upper bound for physical DMA: 4 GB */
240#define FW_MAX_PHYSICAL_RANGE		(1ULL << 32)
241
242void fw_core_handle_request(struct fw_card *card, struct fw_packet *request);
243void fw_core_handle_response(struct fw_card *card, struct fw_packet *packet);
244int fw_get_response_length(struct fw_request *request);
245void fw_fill_response(struct fw_packet *response, u32 *request_header,
246		      int rcode, void *payload, size_t length);
247
248#define FW_PHY_CONFIG_NO_NODE_ID	-1
249#define FW_PHY_CONFIG_CURRENT_GAP_COUNT	-1
250void fw_send_phy_config(struct fw_card *card,
251			int node_id, int generation, int gap_count);
252
253static inline bool is_ping_packet(u32 *data)
254{
255	return (data[0] & 0xc0ffffff) == 0 && ~data[0] == data[1];
256}
257
258#endif /* _FIREWIRE_CORE_H */
259