gss_krb5_wrap.c revision 4ab4b0bedda7d41c63cef98cd5d6cabada460936
1#include <linux/types.h>
2#include <linux/slab.h>
3#include <linux/jiffies.h>
4#include <linux/sunrpc/gss_krb5.h>
5#include <linux/random.h>
6#include <linux/pagemap.h>
7#include <linux/crypto.h>
8
9#ifdef RPC_DEBUG
10# define RPCDBG_FACILITY	RPCDBG_AUTH
11#endif
12
13static inline int
14gss_krb5_padding(int blocksize, int length)
15{
16	/* Most of the code is block-size independent but currently we
17	 * use only 8: */
18	BUG_ON(blocksize != 8);
19	return 8 - (length & 7);
20}
21
22static inline void
23gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
24{
25	int padding = gss_krb5_padding(blocksize, buf->len - offset);
26	char *p;
27	struct kvec *iov;
28
29	if (buf->page_len || buf->tail[0].iov_len)
30		iov = &buf->tail[0];
31	else
32		iov = &buf->head[0];
33	p = iov->iov_base + iov->iov_len;
34	iov->iov_len += padding;
35	buf->len += padding;
36	memset(p, padding, padding);
37}
38
39static inline int
40gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
41{
42	u8 *ptr;
43	u8 pad;
44	size_t len = buf->len;
45
46	if (len <= buf->head[0].iov_len) {
47		pad = *(u8 *)(buf->head[0].iov_base + len - 1);
48		if (pad > buf->head[0].iov_len)
49			return -EINVAL;
50		buf->head[0].iov_len -= pad;
51		goto out;
52	} else
53		len -= buf->head[0].iov_len;
54	if (len <= buf->page_len) {
55		unsigned int last = (buf->page_base + len - 1)
56					>>PAGE_CACHE_SHIFT;
57		unsigned int offset = (buf->page_base + len - 1)
58					& (PAGE_CACHE_SIZE - 1);
59		ptr = kmap_atomic(buf->pages[last], KM_USER0);
60		pad = *(ptr + offset);
61		kunmap_atomic(ptr, KM_USER0);
62		goto out;
63	} else
64		len -= buf->page_len;
65	BUG_ON(len > buf->tail[0].iov_len);
66	pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
67out:
68	/* XXX: NOTE: we do not adjust the page lengths--they represent
69	 * a range of data in the real filesystem page cache, and we need
70	 * to know that range so the xdr code can properly place read data.
71	 * However adjusting the head length, as we do above, is harmless.
72	 * In the case of a request that fits into a single page, the server
73	 * also uses length and head length together to determine the original
74	 * start of the request to copy the request for deferal; so it's
75	 * easier on the server if we adjust head and tail length in tandem.
76	 * It's not really a problem that we don't fool with the page and
77	 * tail lengths, though--at worst badly formed xdr might lead the
78	 * server to attempt to parse the padding.
79	 * XXX: Document all these weird requirements for gss mechanism
80	 * wrap/unwrap functions. */
81	if (pad > blocksize)
82		return -EINVAL;
83	if (buf->len > pad)
84		buf->len -= pad;
85	else
86		return -EINVAL;
87	return 0;
88}
89
90static inline void
91make_confounder(char *p, int blocksize)
92{
93	static u64 i = 0;
94	u64 *q = (u64 *)p;
95
96	/* rfc1964 claims this should be "random".  But all that's really
97	 * necessary is that it be unique.  And not even that is necessary in
98	 * our case since our "gssapi" implementation exists only to support
99	 * rpcsec_gss, so we know that the only buffers we will ever encrypt
100	 * already begin with a unique sequence number.  Just to hedge my bets
101	 * I'll make a half-hearted attempt at something unique, but ensuring
102	 * uniqueness would mean worrying about atomicity and rollover, and I
103	 * don't care enough. */
104
105	BUG_ON(blocksize != 8);
106	*q = i++;
107}
108
109/* Assumptions: the head and tail of inbuf are ours to play with.
110 * The pages, however, may be real pages in the page cache and we replace
111 * them with scratch pages from **pages before writing to them. */
112/* XXX: obviously the above should be documentation of wrap interface,
113 * and shouldn't be in this kerberos-specific file. */
114
115/* XXX factor out common code with seal/unseal. */
116
117u32
118gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
119		struct xdr_buf *buf, struct page **pages)
120{
121	struct krb5_ctx		*kctx = ctx->internal_ctx_id;
122	char			cksumdata[16];
123	struct xdr_netobj	md5cksum = {.len = 0, .data = cksumdata};
124	int			blocksize = 0, plainlen;
125	unsigned char		*ptr, *krb5_hdr, *msg_start;
126	s32			now;
127	int			headlen;
128	struct page		**tmp_pages;
129	u32			seq_send;
130
131	dprintk("RPC:       gss_wrap_kerberos\n");
132
133	now = get_seconds();
134
135	blocksize = crypto_blkcipher_blocksize(kctx->enc);
136	gss_krb5_add_padding(buf, offset, blocksize);
137	BUG_ON((buf->len - offset) % blocksize);
138	plainlen = blocksize + buf->len - offset;
139
140	headlen = g_token_size(&kctx->mech_used, 24 + plainlen) -
141						(buf->len - offset);
142
143	ptr = buf->head[0].iov_base + offset;
144	/* shift data to make room for header. */
145	/* XXX Would be cleverer to encrypt while copying. */
146	/* XXX bounds checking, slack, etc. */
147	memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset);
148	buf->head[0].iov_len += headlen;
149	buf->len += headlen;
150	BUG_ON((buf->len - offset - headlen) % blocksize);
151
152	g_make_token_header(&kctx->mech_used, 24 + plainlen, &ptr);
153
154
155	*ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff);
156	*ptr++ = (unsigned char) (KG_TOK_WRAP_MSG&0xff);
157
158	/* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */
159	krb5_hdr = ptr - 2;
160	msg_start = krb5_hdr + 24;
161
162	*(__be16 *)(krb5_hdr + 2) = htons(SGN_ALG_DES_MAC_MD5);
163	memset(krb5_hdr + 4, 0xff, 4);
164	*(__be16 *)(krb5_hdr + 4) = htons(SEAL_ALG_DES);
165
166	make_confounder(msg_start, blocksize);
167
168	/* XXXJBF: UGH!: */
169	tmp_pages = buf->pages;
170	buf->pages = pages;
171	if (make_checksum("md5", krb5_hdr, 8, buf,
172				offset + headlen - blocksize, &md5cksum))
173		return GSS_S_FAILURE;
174	buf->pages = tmp_pages;
175
176	if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
177			  md5cksum.data, md5cksum.len))
178		return GSS_S_FAILURE;
179	memcpy(krb5_hdr + 16, md5cksum.data + md5cksum.len - 8, 8);
180
181	spin_lock(&krb5_seq_lock);
182	seq_send = kctx->seq_send++;
183	spin_unlock(&krb5_seq_lock);
184
185	/* XXX would probably be more efficient to compute checksum
186	 * and encrypt at the same time: */
187	if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
188			       seq_send, krb5_hdr + 16, krb5_hdr + 8)))
189		return GSS_S_FAILURE;
190
191	if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
192									pages))
193		return GSS_S_FAILURE;
194
195	return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
196}
197
198u32
199gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
200{
201	struct krb5_ctx		*kctx = ctx->internal_ctx_id;
202	int			signalg;
203	int			sealalg;
204	char			cksumdata[16];
205	struct xdr_netobj	md5cksum = {.len = 0, .data = cksumdata};
206	s32			now;
207	int			direction;
208	s32			seqnum;
209	unsigned char		*ptr;
210	int			bodysize;
211	void			*data_start, *orig_start;
212	int			data_len;
213	int			blocksize;
214
215	dprintk("RPC:       gss_unwrap_kerberos\n");
216
217	ptr = (u8 *)buf->head[0].iov_base + offset;
218	if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
219					buf->len - offset))
220		return GSS_S_DEFECTIVE_TOKEN;
221
222	if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) ||
223	    (*ptr++ !=  (KG_TOK_WRAP_MSG    &0xff))   )
224		return GSS_S_DEFECTIVE_TOKEN;
225
226	/* XXX sanity-check bodysize?? */
227
228	/* get the sign and seal algorithms */
229
230	signalg = ptr[0] + (ptr[1] << 8);
231	if (signalg != SGN_ALG_DES_MAC_MD5)
232		return GSS_S_DEFECTIVE_TOKEN;
233
234	sealalg = ptr[2] + (ptr[3] << 8);
235	if (sealalg != SEAL_ALG_DES)
236		return GSS_S_DEFECTIVE_TOKEN;
237
238	if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
239		return GSS_S_DEFECTIVE_TOKEN;
240
241	if (gss_decrypt_xdr_buf(kctx->enc, buf,
242			ptr + 22 - (unsigned char *)buf->head[0].iov_base))
243		return GSS_S_DEFECTIVE_TOKEN;
244
245	if (make_checksum("md5", ptr - 2, 8, buf,
246		 ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum))
247		return GSS_S_FAILURE;
248
249	if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
250			   md5cksum.data, md5cksum.len))
251		return GSS_S_FAILURE;
252
253	if (memcmp(md5cksum.data + 8, ptr + 14, 8))
254		return GSS_S_BAD_SIG;
255
256	/* it got through unscathed.  Make sure the context is unexpired */
257
258	now = get_seconds();
259
260	if (now > kctx->endtime)
261		return GSS_S_CONTEXT_EXPIRED;
262
263	/* do sequencing checks */
264
265	if (krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction,
266				    &seqnum))
267		return GSS_S_BAD_SIG;
268
269	if ((kctx->initiate && direction != 0xff) ||
270	    (!kctx->initiate && direction != 0))
271		return GSS_S_BAD_SIG;
272
273	/* Copy the data back to the right position.  XXX: Would probably be
274	 * better to copy and encrypt at the same time. */
275
276	blocksize = crypto_blkcipher_blocksize(kctx->enc);
277	data_start = ptr + 22 + blocksize;
278	orig_start = buf->head[0].iov_base + offset;
279	data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
280	memmove(orig_start, data_start, data_len);
281	buf->head[0].iov_len -= (data_start - orig_start);
282	buf->len -= (data_start - orig_start);
283
284	if (gss_krb5_remove_padding(buf, blocksize))
285		return GSS_S_DEFECTIVE_TOKEN;
286
287	return GSS_S_COMPLETE;
288}
289