gss_krb5_wrap.c revision 863a24882ed0a57ff25daaf39885f3a47b706e4b
1#include <linux/types.h>
2#include <linux/slab.h>
3#include <linux/jiffies.h>
4#include <linux/sunrpc/gss_krb5.h>
5#include <linux/random.h>
6#include <linux/pagemap.h>
7#include <linux/crypto.h>
8
9#ifdef RPC_DEBUG
10# define RPCDBG_FACILITY	RPCDBG_AUTH
11#endif
12
13static inline int
14gss_krb5_padding(int blocksize, int length)
15{
16	/* Most of the code is block-size independent but currently we
17	 * use only 8: */
18	BUG_ON(blocksize != 8);
19	return 8 - (length & 7);
20}
21
22static inline void
23gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
24{
25	int padding = gss_krb5_padding(blocksize, buf->len - offset);
26	char *p;
27	struct kvec *iov;
28
29	if (buf->page_len || buf->tail[0].iov_len)
30		iov = &buf->tail[0];
31	else
32		iov = &buf->head[0];
33	p = iov->iov_base + iov->iov_len;
34	iov->iov_len += padding;
35	buf->len += padding;
36	memset(p, padding, padding);
37}
38
39static inline int
40gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
41{
42	u8 *ptr;
43	u8 pad;
44	size_t len = buf->len;
45
46	if (len <= buf->head[0].iov_len) {
47		pad = *(u8 *)(buf->head[0].iov_base + len - 1);
48		if (pad > buf->head[0].iov_len)
49			return -EINVAL;
50		buf->head[0].iov_len -= pad;
51		goto out;
52	} else
53		len -= buf->head[0].iov_len;
54	if (len <= buf->page_len) {
55		unsigned int last = (buf->page_base + len - 1)
56					>>PAGE_CACHE_SHIFT;
57		unsigned int offset = (buf->page_base + len - 1)
58					& (PAGE_CACHE_SIZE - 1);
59		ptr = kmap_atomic(buf->pages[last], KM_USER0);
60		pad = *(ptr + offset);
61		kunmap_atomic(ptr, KM_USER0);
62		goto out;
63	} else
64		len -= buf->page_len;
65	BUG_ON(len > buf->tail[0].iov_len);
66	pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
67out:
68	/* XXX: NOTE: we do not adjust the page lengths--they represent
69	 * a range of data in the real filesystem page cache, and we need
70	 * to know that range so the xdr code can properly place read data.
71	 * However adjusting the head length, as we do above, is harmless.
72	 * In the case of a request that fits into a single page, the server
73	 * also uses length and head length together to determine the original
74	 * start of the request to copy the request for deferal; so it's
75	 * easier on the server if we adjust head and tail length in tandem.
76	 * It's not really a problem that we don't fool with the page and
77	 * tail lengths, though--at worst badly formed xdr might lead the
78	 * server to attempt to parse the padding.
79	 * XXX: Document all these weird requirements for gss mechanism
80	 * wrap/unwrap functions. */
81	if (pad > blocksize)
82		return -EINVAL;
83	if (buf->len > pad)
84		buf->len -= pad;
85	else
86		return -EINVAL;
87	return 0;
88}
89
90static void
91make_confounder(char *p, u32 conflen)
92{
93	static u64 i = 0;
94	u64 *q = (u64 *)p;
95
96	/* rfc1964 claims this should be "random".  But all that's really
97	 * necessary is that it be unique.  And not even that is necessary in
98	 * our case since our "gssapi" implementation exists only to support
99	 * rpcsec_gss, so we know that the only buffers we will ever encrypt
100	 * already begin with a unique sequence number.  Just to hedge my bets
101	 * I'll make a half-hearted attempt at something unique, but ensuring
102	 * uniqueness would mean worrying about atomicity and rollover, and I
103	 * don't care enough. */
104
105	/* initialize to random value */
106	if (i == 0) {
107		i = random32();
108		i = (i << 32) | random32();
109	}
110
111	switch (conflen) {
112	case 16:
113		*q++ = i++;
114		/* fall through */
115	case 8:
116		*q++ = i++;
117		break;
118	default:
119		BUG();
120	}
121}
122
123/* Assumptions: the head and tail of inbuf are ours to play with.
124 * The pages, however, may be real pages in the page cache and we replace
125 * them with scratch pages from **pages before writing to them. */
126/* XXX: obviously the above should be documentation of wrap interface,
127 * and shouldn't be in this kerberos-specific file. */
128
129/* XXX factor out common code with seal/unseal. */
130
131u32
132gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
133		struct xdr_buf *buf, struct page **pages)
134{
135	struct krb5_ctx		*kctx = ctx->internal_ctx_id;
136	char			cksumdata[16];
137	struct xdr_netobj	md5cksum = {.len = 0, .data = cksumdata};
138	int			blocksize = 0, plainlen;
139	unsigned char		*ptr, *msg_start;
140	s32			now;
141	int			headlen;
142	struct page		**tmp_pages;
143	u32			seq_send;
144
145	dprintk("RPC:       gss_wrap_kerberos\n");
146
147	now = get_seconds();
148
149	blocksize = crypto_blkcipher_blocksize(kctx->enc);
150	gss_krb5_add_padding(buf, offset, blocksize);
151	BUG_ON((buf->len - offset) % blocksize);
152	plainlen = blocksize + buf->len - offset;
153
154	headlen = g_token_size(&kctx->mech_used, 24 + plainlen) -
155						(buf->len - offset);
156
157	ptr = buf->head[0].iov_base + offset;
158	/* shift data to make room for header. */
159	/* XXX Would be cleverer to encrypt while copying. */
160	/* XXX bounds checking, slack, etc. */
161	memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset);
162	buf->head[0].iov_len += headlen;
163	buf->len += headlen;
164	BUG_ON((buf->len - offset - headlen) % blocksize);
165
166	g_make_token_header(&kctx->mech_used,
167				GSS_KRB5_TOK_HDR_LEN + 8 + plainlen, &ptr);
168
169
170	/* ptr now at header described in rfc 1964, section 1.2.1: */
171	ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
172	ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
173
174	msg_start = ptr + 24;
175
176	*(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5);
177	memset(ptr + 4, 0xff, 4);
178	*(__be16 *)(ptr + 4) = htons(SEAL_ALG_DES);
179
180	make_confounder(msg_start, blocksize);
181
182	/* XXXJBF: UGH!: */
183	tmp_pages = buf->pages;
184	buf->pages = pages;
185	if (make_checksum("md5", ptr, 8, buf,
186				offset + headlen - blocksize, &md5cksum))
187		return GSS_S_FAILURE;
188	buf->pages = tmp_pages;
189
190	if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
191			  md5cksum.data, md5cksum.len))
192		return GSS_S_FAILURE;
193	memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8);
194
195	spin_lock(&krb5_seq_lock);
196	seq_send = kctx->seq_send++;
197	spin_unlock(&krb5_seq_lock);
198
199	/* XXX would probably be more efficient to compute checksum
200	 * and encrypt at the same time: */
201	if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
202			       seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
203		return GSS_S_FAILURE;
204
205	if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
206									pages))
207		return GSS_S_FAILURE;
208
209	return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
210}
211
212u32
213gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
214{
215	struct krb5_ctx		*kctx = ctx->internal_ctx_id;
216	int			signalg;
217	int			sealalg;
218	char			cksumdata[16];
219	struct xdr_netobj	md5cksum = {.len = 0, .data = cksumdata};
220	s32			now;
221	int			direction;
222	s32			seqnum;
223	unsigned char		*ptr;
224	int			bodysize;
225	void			*data_start, *orig_start;
226	int			data_len;
227	int			blocksize;
228
229	dprintk("RPC:       gss_unwrap_kerberos\n");
230
231	ptr = (u8 *)buf->head[0].iov_base + offset;
232	if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
233					buf->len - offset))
234		return GSS_S_DEFECTIVE_TOKEN;
235
236	if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
237	    (ptr[1] !=  (KG_TOK_WRAP_MSG & 0xff)))
238		return GSS_S_DEFECTIVE_TOKEN;
239
240	/* XXX sanity-check bodysize?? */
241
242	/* get the sign and seal algorithms */
243
244	signalg = ptr[2] + (ptr[3] << 8);
245	if (signalg != SGN_ALG_DES_MAC_MD5)
246		return GSS_S_DEFECTIVE_TOKEN;
247
248	sealalg = ptr[4] + (ptr[5] << 8);
249	if (sealalg != SEAL_ALG_DES)
250		return GSS_S_DEFECTIVE_TOKEN;
251
252	if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
253		return GSS_S_DEFECTIVE_TOKEN;
254
255	if (gss_decrypt_xdr_buf(kctx->enc, buf,
256			ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base))
257		return GSS_S_DEFECTIVE_TOKEN;
258
259	if (make_checksum("md5", ptr, 8, buf,
260		 ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base, &md5cksum))
261		return GSS_S_FAILURE;
262
263	if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
264			   md5cksum.data, md5cksum.len))
265		return GSS_S_FAILURE;
266
267	if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8))
268		return GSS_S_BAD_SIG;
269
270	/* it got through unscathed.  Make sure the context is unexpired */
271
272	now = get_seconds();
273
274	if (now > kctx->endtime)
275		return GSS_S_CONTEXT_EXPIRED;
276
277	/* do sequencing checks */
278
279	if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8,
280				    &direction, &seqnum))
281		return GSS_S_BAD_SIG;
282
283	if ((kctx->initiate && direction != 0xff) ||
284	    (!kctx->initiate && direction != 0))
285		return GSS_S_BAD_SIG;
286
287	/* Copy the data back to the right position.  XXX: Would probably be
288	 * better to copy and encrypt at the same time. */
289
290	blocksize = crypto_blkcipher_blocksize(kctx->enc);
291	data_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8 + blocksize;
292	orig_start = buf->head[0].iov_base + offset;
293	data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
294	memmove(orig_start, data_start, data_len);
295	buf->head[0].iov_len -= (data_start - orig_start);
296	buf->len -= (data_start - orig_start);
297
298	if (gss_krb5_remove_padding(buf, blocksize))
299		return GSS_S_DEFECTIVE_TOKEN;
300
301	return GSS_S_COMPLETE;
302}
303