gss_krb5_wrap.c revision 67f97d83bfcca9d9f8fbeeb14e7c644a82b24e12
1#include <linux/types.h>
2#include <linux/slab.h>
3#include <linux/jiffies.h>
4#include <linux/sunrpc/gss_krb5.h>
5#include <linux/random.h>
6#include <linux/pagemap.h>
7#include <asm/scatterlist.h>
8#include <linux/crypto.h>
9
10#ifdef RPC_DEBUG
11# define RPCDBG_FACILITY	RPCDBG_AUTH
12#endif
13
14static inline int
15gss_krb5_padding(int blocksize, int length)
16{
17	/* Most of the code is block-size independent but currently we
18	 * use only 8: */
19	BUG_ON(blocksize != 8);
20	return 8 - (length & 7);
21}
22
23static inline void
24gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
25{
26	int padding = gss_krb5_padding(blocksize, buf->len - offset);
27	char *p;
28	struct kvec *iov;
29
30	if (buf->page_len || buf->tail[0].iov_len)
31		iov = &buf->tail[0];
32	else
33		iov = &buf->head[0];
34	p = iov->iov_base + iov->iov_len;
35	iov->iov_len += padding;
36	buf->len += padding;
37	memset(p, padding, padding);
38}
39
40static inline int
41gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
42{
43	u8 *ptr;
44	u8 pad;
45	size_t len = buf->len;
46
47	if (len <= buf->head[0].iov_len) {
48		pad = *(u8 *)(buf->head[0].iov_base + len - 1);
49		if (pad > buf->head[0].iov_len)
50			return -EINVAL;
51		buf->head[0].iov_len -= pad;
52		goto out;
53	} else
54		len -= buf->head[0].iov_len;
55	if (len <= buf->page_len) {
56		unsigned int last = (buf->page_base + len - 1)
57					>>PAGE_CACHE_SHIFT;
58		unsigned int offset = (buf->page_base + len - 1)
59					& (PAGE_CACHE_SIZE - 1);
60		ptr = kmap_atomic(buf->pages[last], KM_USER0);
61		pad = *(ptr + offset);
62		kunmap_atomic(ptr, KM_USER0);
63		goto out;
64	} else
65		len -= buf->page_len;
66	BUG_ON(len > buf->tail[0].iov_len);
67	pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
68out:
69	/* XXX: NOTE: we do not adjust the page lengths--they represent
70	 * a range of data in the real filesystem page cache, and we need
71	 * to know that range so the xdr code can properly place read data.
72	 * However adjusting the head length, as we do above, is harmless.
73	 * In the case of a request that fits into a single page, the server
74	 * also uses length and head length together to determine the original
75	 * start of the request to copy the request for deferal; so it's
76	 * easier on the server if we adjust head and tail length in tandem.
77	 * It's not really a problem that we don't fool with the page and
78	 * tail lengths, though--at worst badly formed xdr might lead the
79	 * server to attempt to parse the padding.
80	 * XXX: Document all these weird requirements for gss mechanism
81	 * wrap/unwrap functions. */
82	if (pad > blocksize)
83		return -EINVAL;
84	if (buf->len > pad)
85		buf->len -= pad;
86	else
87		return -EINVAL;
88	return 0;
89}
90
91static inline void
92make_confounder(char *p, int blocksize)
93{
94	static u64 i = 0;
95	u64 *q = (u64 *)p;
96
97	/* rfc1964 claims this should be "random".  But all that's really
98	 * necessary is that it be unique.  And not even that is necessary in
99	 * our case since our "gssapi" implementation exists only to support
100	 * rpcsec_gss, so we know that the only buffers we will ever encrypt
101	 * already begin with a unique sequence number.  Just to hedge my bets
102	 * I'll make a half-hearted attempt at something unique, but ensuring
103	 * uniqueness would mean worrying about atomicity and rollover, and I
104	 * don't care enough. */
105
106	BUG_ON(blocksize != 8);
107	*q = i++;
108}
109
110/* Assumptions: the head and tail of inbuf are ours to play with.
111 * The pages, however, may be real pages in the page cache and we replace
112 * them with scratch pages from **pages before writing to them. */
113/* XXX: obviously the above should be documentation of wrap interface,
114 * and shouldn't be in this kerberos-specific file. */
115
116/* XXX factor out common code with seal/unseal. */
117
118u32
119gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
120		struct xdr_buf *buf, struct page **pages)
121{
122	struct krb5_ctx		*kctx = ctx->internal_ctx_id;
123	char			cksumdata[16];
124	struct xdr_netobj	md5cksum = {.len = 0, .data = cksumdata};
125	int			blocksize = 0, plainlen;
126	unsigned char		*ptr, *krb5_hdr, *msg_start;
127	s32			now;
128	int			headlen;
129	struct page		**tmp_pages;
130	u32			seq_send;
131
132	dprintk("RPC:       gss_wrap_kerberos\n");
133
134	now = get_seconds();
135
136	blocksize = crypto_blkcipher_blocksize(kctx->enc);
137	gss_krb5_add_padding(buf, offset, blocksize);
138	BUG_ON((buf->len - offset) % blocksize);
139	plainlen = blocksize + buf->len - offset;
140
141	headlen = g_token_size(&kctx->mech_used, 22 + plainlen) -
142						(buf->len - offset);
143
144	ptr = buf->head[0].iov_base + offset;
145	/* shift data to make room for header. */
146	/* XXX Would be cleverer to encrypt while copying. */
147	/* XXX bounds checking, slack, etc. */
148	memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset);
149	buf->head[0].iov_len += headlen;
150	buf->len += headlen;
151	BUG_ON((buf->len - offset - headlen) % blocksize);
152
153	g_make_token_header(&kctx->mech_used, 22 + plainlen, &ptr);
154
155
156	*ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff);
157	*ptr++ = (unsigned char) (KG_TOK_WRAP_MSG&0xff);
158
159	/* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */
160	krb5_hdr = ptr - 2;
161	msg_start = krb5_hdr + 24;
162
163	*(__be16 *)(krb5_hdr + 2) = htons(SGN_ALG_DES_MAC_MD5);
164	memset(krb5_hdr + 4, 0xff, 4);
165	*(__be16 *)(krb5_hdr + 4) = htons(SEAL_ALG_DES);
166
167	make_confounder(msg_start, blocksize);
168
169	/* XXXJBF: UGH!: */
170	tmp_pages = buf->pages;
171	buf->pages = pages;
172	if (make_checksum("md5", krb5_hdr, 8, buf,
173				offset + headlen - blocksize, &md5cksum))
174		return GSS_S_FAILURE;
175	buf->pages = tmp_pages;
176
177	if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
178			  md5cksum.data, md5cksum.len))
179		return GSS_S_FAILURE;
180	memcpy(krb5_hdr + 16,
181	       md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
182	       KRB5_CKSUM_LENGTH);
183
184	spin_lock(&krb5_seq_lock);
185	seq_send = kctx->seq_send++;
186	spin_unlock(&krb5_seq_lock);
187
188	/* XXX would probably be more efficient to compute checksum
189	 * and encrypt at the same time: */
190	if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
191			       seq_send, krb5_hdr + 16, krb5_hdr + 8)))
192		return GSS_S_FAILURE;
193
194	if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
195									pages))
196		return GSS_S_FAILURE;
197
198	return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
199}
200
201u32
202gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
203{
204	struct krb5_ctx		*kctx = ctx->internal_ctx_id;
205	int			signalg;
206	int			sealalg;
207	char			cksumdata[16];
208	struct xdr_netobj	md5cksum = {.len = 0, .data = cksumdata};
209	s32			now;
210	int			direction;
211	s32			seqnum;
212	unsigned char		*ptr;
213	int			bodysize;
214	void			*data_start, *orig_start;
215	int			data_len;
216	int			blocksize;
217
218	dprintk("RPC:       gss_unwrap_kerberos\n");
219
220	ptr = (u8 *)buf->head[0].iov_base + offset;
221	if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
222					buf->len - offset))
223		return GSS_S_DEFECTIVE_TOKEN;
224
225	if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) ||
226	    (*ptr++ !=  (KG_TOK_WRAP_MSG    &0xff))   )
227		return GSS_S_DEFECTIVE_TOKEN;
228
229	/* XXX sanity-check bodysize?? */
230
231	/* get the sign and seal algorithms */
232
233	signalg = ptr[0] + (ptr[1] << 8);
234	if (signalg != SGN_ALG_DES_MAC_MD5)
235		return GSS_S_DEFECTIVE_TOKEN;
236
237	sealalg = ptr[2] + (ptr[3] << 8);
238	if (sealalg != SEAL_ALG_DES)
239		return GSS_S_DEFECTIVE_TOKEN;
240
241	if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
242		return GSS_S_DEFECTIVE_TOKEN;
243
244	if (gss_decrypt_xdr_buf(kctx->enc, buf,
245			ptr + 22 - (unsigned char *)buf->head[0].iov_base))
246		return GSS_S_DEFECTIVE_TOKEN;
247
248	if (make_checksum("md5", ptr - 2, 8, buf,
249		 ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum))
250		return GSS_S_FAILURE;
251
252	if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
253			   md5cksum.data, md5cksum.len))
254		return GSS_S_FAILURE;
255
256	if (memcmp(md5cksum.data + 8, ptr + 14, 8))
257		return GSS_S_BAD_SIG;
258
259	/* it got through unscathed.  Make sure the context is unexpired */
260
261	now = get_seconds();
262
263	if (now > kctx->endtime)
264		return GSS_S_CONTEXT_EXPIRED;
265
266	/* do sequencing checks */
267
268	if (krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction,
269				    &seqnum))
270		return GSS_S_BAD_SIG;
271
272	if ((kctx->initiate && direction != 0xff) ||
273	    (!kctx->initiate && direction != 0))
274		return GSS_S_BAD_SIG;
275
276	/* Copy the data back to the right position.  XXX: Would probably be
277	 * better to copy and encrypt at the same time. */
278
279	blocksize = crypto_blkcipher_blocksize(kctx->enc);
280	data_start = ptr + 22 + blocksize;
281	orig_start = buf->head[0].iov_base + offset;
282	data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
283	memmove(orig_start, data_start, data_len);
284	buf->head[0].iov_len -= (data_start - orig_start);
285	buf->len -= (data_start - orig_start);
286
287	if (gss_krb5_remove_padding(buf, blocksize))
288		return GSS_S_DEFECTIVE_TOKEN;
289
290	return GSS_S_COMPLETE;
291}
292