1/*
2 *  linux/include/linux/sunrpc/xprt.h
3 *
4 *  Declarations for the RPC transport interface.
5 *
6 *  Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
7 */
8
9#ifndef _LINUX_SUNRPC_XPRT_H
10#define _LINUX_SUNRPC_XPRT_H
11
12#include <linux/uio.h>
13#include <linux/socket.h>
14#include <linux/in.h>
15#include <linux/sunrpc/sched.h>
16#include <linux/sunrpc/xdr.h>
17
18extern unsigned int xprt_udp_slot_table_entries;
19extern unsigned int xprt_tcp_slot_table_entries;
20
21#define RPC_MIN_SLOT_TABLE	(2U)
22#define RPC_DEF_SLOT_TABLE	(16U)
23#define RPC_MAX_SLOT_TABLE	(128U)
24
25/*
26 * RPC call and reply header size as number of 32bit words (verifier
27 * size computed separately)
28 */
29#define RPC_CALLHDRSIZE		6
30#define RPC_REPHDRSIZE		4
31
32/*
33 * Parameters for choosing a free port
34 */
35extern unsigned int xprt_min_resvport;
36extern unsigned int xprt_max_resvport;
37
38#define RPC_MIN_RESVPORT	(1U)
39#define RPC_MAX_RESVPORT	(65535U)
40#define RPC_DEF_MIN_RESVPORT	(665U)
41#define RPC_DEF_MAX_RESVPORT	(1023U)
42
43/*
44 * This describes a timeout strategy
45 */
46struct rpc_timeout {
47	unsigned long		to_initval,		/* initial timeout */
48				to_maxval,		/* max timeout */
49				to_increment;		/* if !exponential */
50	unsigned int		to_retries;		/* max # of retries */
51	unsigned char		to_exponential;
52};
53
54struct rpc_task;
55struct rpc_xprt;
56struct seq_file;
57
58/*
59 * This describes a complete RPC request
60 */
61struct rpc_rqst {
62	/*
63	 * This is the user-visible part
64	 */
65	struct rpc_xprt *	rq_xprt;		/* RPC client */
66	struct xdr_buf		rq_snd_buf;		/* send buffer */
67	struct xdr_buf		rq_rcv_buf;		/* recv buffer */
68
69	/*
70	 * This is the private part
71	 */
72	struct rpc_task *	rq_task;	/* RPC task data */
73	__u32			rq_xid;		/* request XID */
74	int			rq_cong;	/* has incremented xprt->cong */
75	int			rq_received;	/* receive completed */
76	u32			rq_seqno;	/* gss seq no. used on req. */
77	int			rq_enc_pages_num;
78	struct page		**rq_enc_pages;	/* scratch pages for use by
79						   gss privacy code */
80	void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
81	struct list_head	rq_list;
82
83	__u32 *			rq_buffer;	/* XDR encode buffer */
84	size_t			rq_bufsize;
85
86	struct xdr_buf		rq_private_buf;		/* The receive buffer
87							 * used in the softirq.
88							 */
89	unsigned long		rq_majortimeo;	/* major timeout alarm */
90	unsigned long		rq_timeout;	/* Current timeout value */
91	unsigned int		rq_retries;	/* # of retries */
92
93	/*
94	 * Partial send handling
95	 */
96	u32			rq_bytes_sent;	/* Bytes we have sent */
97
98	unsigned long		rq_xtime;	/* when transmitted */
99	int			rq_ntrans;
100};
101#define rq_svec			rq_snd_buf.head
102#define rq_slen			rq_snd_buf.len
103
104struct rpc_xprt_ops {
105	void		(*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
106	int		(*reserve_xprt)(struct rpc_task *task);
107	void		(*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
108	void		(*set_port)(struct rpc_xprt *xprt, unsigned short port);
109	void		(*connect)(struct rpc_task *task);
110	void *		(*buf_alloc)(struct rpc_task *task, size_t size);
111	void		(*buf_free)(struct rpc_task *task);
112	int		(*send_request)(struct rpc_task *task);
113	void		(*set_retrans_timeout)(struct rpc_task *task);
114	void		(*timer)(struct rpc_task *task);
115	void		(*release_request)(struct rpc_task *task);
116	void		(*close)(struct rpc_xprt *xprt);
117	void		(*destroy)(struct rpc_xprt *xprt);
118	void		(*print_stats)(struct rpc_xprt *xprt, struct seq_file *seq);
119};
120
121struct rpc_xprt {
122	struct rpc_xprt_ops *	ops;		/* transport methods */
123	struct socket *		sock;		/* BSD socket layer */
124	struct sock *		inet;		/* INET layer */
125
126	struct rpc_timeout	timeout;	/* timeout parms */
127	struct sockaddr_in	addr;		/* server address */
128	int			prot;		/* IP protocol */
129
130	unsigned long		cong;		/* current congestion */
131	unsigned long		cwnd;		/* congestion window */
132
133	size_t			rcvsize,	/* transport rcv buffer size */
134				sndsize;	/* transport send buffer size */
135
136	size_t			max_payload;	/* largest RPC payload size,
137						   in bytes */
138	unsigned int		tsh_size;	/* size of transport specific
139						   header */
140
141	struct rpc_wait_queue	sending;	/* requests waiting to send */
142	struct rpc_wait_queue	resend;		/* requests waiting to resend */
143	struct rpc_wait_queue	pending;	/* requests in flight */
144	struct rpc_wait_queue	backlog;	/* waiting for slot */
145	struct list_head	free;		/* free slots */
146	struct rpc_rqst *	slot;		/* slot table storage */
147	unsigned int		max_reqs;	/* total slots */
148	unsigned long		state;		/* transport state */
149	unsigned char		shutdown   : 1,	/* being shut down */
150				resvport   : 1; /* use a reserved port */
151
152	/*
153	 * XID
154	 */
155	__u32			xid;		/* Next XID value to use */
156
157	/*
158	 * State of TCP reply receive stuff
159	 */
160	u32			tcp_recm,	/* Fragment header */
161				tcp_xid,	/* Current XID */
162				tcp_reclen,	/* fragment length */
163				tcp_offset;	/* fragment offset */
164	unsigned long		tcp_copied,	/* copied to request */
165				tcp_flags;
166	/*
167	 * Connection of transports
168	 */
169	unsigned long		connect_timeout,
170				bind_timeout,
171				reestablish_timeout;
172	struct work_struct	connect_worker;
173	unsigned short		port;
174
175	/*
176	 * Disconnection of idle transports
177	 */
178	struct work_struct	task_cleanup;
179	struct timer_list	timer;
180	unsigned long		last_used,
181				idle_timeout;
182
183	/*
184	 * Send stuff
185	 */
186	spinlock_t		transport_lock;	/* lock transport info */
187	spinlock_t		reserve_lock;	/* lock slot table */
188	struct rpc_task *	snd_task;	/* Task blocked in send */
189
190	struct list_head	recv;
191
192	struct {
193		unsigned long		bind_count,	/* total number of binds */
194					connect_count,	/* total number of connects */
195					connect_start,	/* connect start timestamp */
196					connect_time,	/* jiffies waiting for connect */
197					sends,		/* how many complete requests */
198					recvs,		/* how many complete requests */
199					bad_xids;	/* lookup_rqst didn't find XID */
200
201		unsigned long long	req_u,		/* average requests on the wire */
202					bklog_u;	/* backlog queue utilization */
203	} stat;
204
205	void			(*old_data_ready)(struct sock *, int);
206	void			(*old_state_change)(struct sock *);
207	void			(*old_write_space)(struct sock *);
208};
209
210#define XPRT_LAST_FRAG		(1 << 0)
211#define XPRT_COPY_RECM		(1 << 1)
212#define XPRT_COPY_XID		(1 << 2)
213#define XPRT_COPY_DATA		(1 << 3)
214
215#ifdef __KERNEL__
216
217/*
218 * Transport operations used by ULPs
219 */
220struct rpc_xprt *	xprt_create_proto(int proto, struct sockaddr_in *addr, struct rpc_timeout *to);
221void			xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr);
222
223/*
224 * Generic internal transport functions
225 */
226void			xprt_connect(struct rpc_task *task);
227void			xprt_reserve(struct rpc_task *task);
228int			xprt_reserve_xprt(struct rpc_task *task);
229int			xprt_reserve_xprt_cong(struct rpc_task *task);
230int			xprt_prepare_transmit(struct rpc_task *task);
231void			xprt_transmit(struct rpc_task *task);
232void			xprt_end_transmit(struct rpc_task *task);
233int			xprt_adjust_timeout(struct rpc_rqst *req);
234void			xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
235void			xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
236void			xprt_release(struct rpc_task *task);
237int			xprt_destroy(struct rpc_xprt *xprt);
238
239static inline u32 *xprt_skip_transport_header(struct rpc_xprt *xprt, u32 *p)
240{
241	return p + xprt->tsh_size;
242}
243
244/*
245 * Transport switch helper functions
246 */
247void			xprt_set_retrans_timeout_def(struct rpc_task *task);
248void			xprt_set_retrans_timeout_rtt(struct rpc_task *task);
249void			xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
250void			xprt_wait_for_buffer_space(struct rpc_task *task);
251void			xprt_write_space(struct rpc_xprt *xprt);
252void			xprt_update_rtt(struct rpc_task *task);
253void			xprt_adjust_cwnd(struct rpc_task *task, int result);
254struct rpc_rqst *	xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid);
255void			xprt_complete_rqst(struct rpc_task *task, int copied);
256void			xprt_release_rqst_cong(struct rpc_task *task);
257void			xprt_disconnect(struct rpc_xprt *xprt);
258
259/*
260 * Socket transport setup operations
261 */
262int			xs_setup_udp(struct rpc_xprt *xprt, struct rpc_timeout *to);
263int			xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to);
264
265/*
266 * Reserved bit positions in xprt->state
267 */
268#define XPRT_LOCKED		(0)
269#define XPRT_CONNECTED		(1)
270#define XPRT_CONNECTING		(2)
271#define XPRT_CLOSE_WAIT		(3)
272
273static inline void xprt_set_connected(struct rpc_xprt *xprt)
274{
275	set_bit(XPRT_CONNECTED, &xprt->state);
276}
277
278static inline void xprt_clear_connected(struct rpc_xprt *xprt)
279{
280	clear_bit(XPRT_CONNECTED, &xprt->state);
281}
282
283static inline int xprt_connected(struct rpc_xprt *xprt)
284{
285	return test_bit(XPRT_CONNECTED, &xprt->state);
286}
287
288static inline int xprt_test_and_set_connected(struct rpc_xprt *xprt)
289{
290	return test_and_set_bit(XPRT_CONNECTED, &xprt->state);
291}
292
293static inline int xprt_test_and_clear_connected(struct rpc_xprt *xprt)
294{
295	return test_and_clear_bit(XPRT_CONNECTED, &xprt->state);
296}
297
298static inline void xprt_clear_connecting(struct rpc_xprt *xprt)
299{
300	smp_mb__before_clear_bit();
301	clear_bit(XPRT_CONNECTING, &xprt->state);
302	smp_mb__after_clear_bit();
303}
304
305static inline int xprt_connecting(struct rpc_xprt *xprt)
306{
307	return test_bit(XPRT_CONNECTING, &xprt->state);
308}
309
310static inline int xprt_test_and_set_connecting(struct rpc_xprt *xprt)
311{
312	return test_and_set_bit(XPRT_CONNECTING, &xprt->state);
313}
314
315#endif /* __KERNEL__*/
316
317#endif /* _LINUX_SUNRPC_XPRT_H */
318