1/*
2 * COPYRIGHT (c) 2008
3 * The Regents of the University of Michigan
4 * ALL RIGHTS RESERVED
5 *
6 * Permission is granted to use, copy, create derivative works
7 * and redistribute this software and such derivative works
8 * for any purpose, so long as the name of The University of
9 * Michigan is not used in any advertising or publicity
10 * pertaining to the use of distribution of this software
11 * without specific, written prior authorization.  If the
12 * above copyright notice or any other identification of the
13 * University of Michigan is included in any copy of any
14 * portion of this software, then the disclaimer below must
15 * also be included.
16 *
17 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION
18 * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY
19 * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF
20 * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING
21 * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF
22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE
23 * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE
24 * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR
25 * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING
26 * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN
27 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF
28 * SUCH DAMAGES.
29 */
30
31#include <linux/types.h>
32#include <linux/jiffies.h>
33#include <linux/sunrpc/gss_krb5.h>
34#include <linux/random.h>
35#include <linux/pagemap.h>
36#include <linux/crypto.h>
37
38#ifdef RPC_DEBUG
39# define RPCDBG_FACILITY	RPCDBG_AUTH
40#endif
41
42static inline int
43gss_krb5_padding(int blocksize, int length)
44{
45	return blocksize - (length % blocksize);
46}
47
48static inline void
49gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
50{
51	int padding = gss_krb5_padding(blocksize, buf->len - offset);
52	char *p;
53	struct kvec *iov;
54
55	if (buf->page_len || buf->tail[0].iov_len)
56		iov = &buf->tail[0];
57	else
58		iov = &buf->head[0];
59	p = iov->iov_base + iov->iov_len;
60	iov->iov_len += padding;
61	buf->len += padding;
62	memset(p, padding, padding);
63}
64
65static inline int
66gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
67{
68	u8 *ptr;
69	u8 pad;
70	size_t len = buf->len;
71
72	if (len <= buf->head[0].iov_len) {
73		pad = *(u8 *)(buf->head[0].iov_base + len - 1);
74		if (pad > buf->head[0].iov_len)
75			return -EINVAL;
76		buf->head[0].iov_len -= pad;
77		goto out;
78	} else
79		len -= buf->head[0].iov_len;
80	if (len <= buf->page_len) {
81		unsigned int last = (buf->page_base + len - 1)
82					>>PAGE_CACHE_SHIFT;
83		unsigned int offset = (buf->page_base + len - 1)
84					& (PAGE_CACHE_SIZE - 1);
85		ptr = kmap_atomic(buf->pages[last]);
86		pad = *(ptr + offset);
87		kunmap_atomic(ptr);
88		goto out;
89	} else
90		len -= buf->page_len;
91	BUG_ON(len > buf->tail[0].iov_len);
92	pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
93out:
94	/* XXX: NOTE: we do not adjust the page lengths--they represent
95	 * a range of data in the real filesystem page cache, and we need
96	 * to know that range so the xdr code can properly place read data.
97	 * However adjusting the head length, as we do above, is harmless.
98	 * In the case of a request that fits into a single page, the server
99	 * also uses length and head length together to determine the original
100	 * start of the request to copy the request for deferal; so it's
101	 * easier on the server if we adjust head and tail length in tandem.
102	 * It's not really a problem that we don't fool with the page and
103	 * tail lengths, though--at worst badly formed xdr might lead the
104	 * server to attempt to parse the padding.
105	 * XXX: Document all these weird requirements for gss mechanism
106	 * wrap/unwrap functions. */
107	if (pad > blocksize)
108		return -EINVAL;
109	if (buf->len > pad)
110		buf->len -= pad;
111	else
112		return -EINVAL;
113	return 0;
114}
115
116void
117gss_krb5_make_confounder(char *p, u32 conflen)
118{
119	static u64 i = 0;
120	u64 *q = (u64 *)p;
121
122	/* rfc1964 claims this should be "random".  But all that's really
123	 * necessary is that it be unique.  And not even that is necessary in
124	 * our case since our "gssapi" implementation exists only to support
125	 * rpcsec_gss, so we know that the only buffers we will ever encrypt
126	 * already begin with a unique sequence number.  Just to hedge my bets
127	 * I'll make a half-hearted attempt at something unique, but ensuring
128	 * uniqueness would mean worrying about atomicity and rollover, and I
129	 * don't care enough. */
130
131	/* initialize to random value */
132	if (i == 0) {
133		i = prandom_u32();
134		i = (i << 32) | prandom_u32();
135	}
136
137	switch (conflen) {
138	case 16:
139		*q++ = i++;
140		/* fall through */
141	case 8:
142		*q++ = i++;
143		break;
144	default:
145		BUG();
146	}
147}
148
149/* Assumptions: the head and tail of inbuf are ours to play with.
150 * The pages, however, may be real pages in the page cache and we replace
151 * them with scratch pages from **pages before writing to them. */
152/* XXX: obviously the above should be documentation of wrap interface,
153 * and shouldn't be in this kerberos-specific file. */
154
155/* XXX factor out common code with seal/unseal. */
156
157static u32
158gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset,
159		struct xdr_buf *buf, struct page **pages)
160{
161	char			cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
162	struct xdr_netobj	md5cksum = {.len = sizeof(cksumdata),
163					    .data = cksumdata};
164	int			blocksize = 0, plainlen;
165	unsigned char		*ptr, *msg_start;
166	s32			now;
167	int			headlen;
168	struct page		**tmp_pages;
169	u32			seq_send;
170	u8			*cksumkey;
171	u32			conflen = kctx->gk5e->conflen;
172
173	dprintk("RPC:       %s\n", __func__);
174
175	now = get_seconds();
176
177	blocksize = crypto_blkcipher_blocksize(kctx->enc);
178	gss_krb5_add_padding(buf, offset, blocksize);
179	BUG_ON((buf->len - offset) % blocksize);
180	plainlen = conflen + buf->len - offset;
181
182	headlen = g_token_size(&kctx->mech_used,
183		GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) -
184		(buf->len - offset);
185
186	ptr = buf->head[0].iov_base + offset;
187	/* shift data to make room for header. */
188	xdr_extend_head(buf, offset, headlen);
189
190	/* XXX Would be cleverer to encrypt while copying. */
191	BUG_ON((buf->len - offset - headlen) % blocksize);
192
193	g_make_token_header(&kctx->mech_used,
194				GSS_KRB5_TOK_HDR_LEN +
195				kctx->gk5e->cksumlength + plainlen, &ptr);
196
197
198	/* ptr now at header described in rfc 1964, section 1.2.1: */
199	ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff);
200	ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff);
201
202	msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength;
203
204	*(__be16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg);
205	memset(ptr + 4, 0xff, 4);
206	*(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg);
207
208	gss_krb5_make_confounder(msg_start, conflen);
209
210	if (kctx->gk5e->keyed_cksum)
211		cksumkey = kctx->cksum;
212	else
213		cksumkey = NULL;
214
215	/* XXXJBF: UGH!: */
216	tmp_pages = buf->pages;
217	buf->pages = pages;
218	if (make_checksum(kctx, ptr, 8, buf, offset + headlen - conflen,
219					cksumkey, KG_USAGE_SEAL, &md5cksum))
220		return GSS_S_FAILURE;
221	buf->pages = tmp_pages;
222
223	memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len);
224
225	spin_lock(&krb5_seq_lock);
226	seq_send = kctx->seq_send++;
227	spin_unlock(&krb5_seq_lock);
228
229	/* XXX would probably be more efficient to compute checksum
230	 * and encrypt at the same time: */
231	if ((krb5_make_seq_num(kctx, kctx->seq, kctx->initiate ? 0 : 0xff,
232			       seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8)))
233		return GSS_S_FAILURE;
234
235	if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
236		struct crypto_blkcipher *cipher;
237		int err;
238		cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
239						CRYPTO_ALG_ASYNC);
240		if (IS_ERR(cipher))
241			return GSS_S_FAILURE;
242
243		krb5_rc4_setup_enc_key(kctx, cipher, seq_send);
244
245		err = gss_encrypt_xdr_buf(cipher, buf,
246					  offset + headlen - conflen, pages);
247		crypto_free_blkcipher(cipher);
248		if (err)
249			return GSS_S_FAILURE;
250	} else {
251		if (gss_encrypt_xdr_buf(kctx->enc, buf,
252					offset + headlen - conflen, pages))
253			return GSS_S_FAILURE;
254	}
255
256	return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
257}
258
259static u32
260gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
261{
262	int			signalg;
263	int			sealalg;
264	char			cksumdata[GSS_KRB5_MAX_CKSUM_LEN];
265	struct xdr_netobj	md5cksum = {.len = sizeof(cksumdata),
266					    .data = cksumdata};
267	s32			now;
268	int			direction;
269	s32			seqnum;
270	unsigned char		*ptr;
271	int			bodysize;
272	void			*data_start, *orig_start;
273	int			data_len;
274	int			blocksize;
275	u32			conflen = kctx->gk5e->conflen;
276	int			crypt_offset;
277	u8			*cksumkey;
278
279	dprintk("RPC:       gss_unwrap_kerberos\n");
280
281	ptr = (u8 *)buf->head[0].iov_base + offset;
282	if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
283					buf->len - offset))
284		return GSS_S_DEFECTIVE_TOKEN;
285
286	if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) ||
287	    (ptr[1] !=  (KG_TOK_WRAP_MSG & 0xff)))
288		return GSS_S_DEFECTIVE_TOKEN;
289
290	/* XXX sanity-check bodysize?? */
291
292	/* get the sign and seal algorithms */
293
294	signalg = ptr[2] + (ptr[3] << 8);
295	if (signalg != kctx->gk5e->signalg)
296		return GSS_S_DEFECTIVE_TOKEN;
297
298	sealalg = ptr[4] + (ptr[5] << 8);
299	if (sealalg != kctx->gk5e->sealalg)
300		return GSS_S_DEFECTIVE_TOKEN;
301
302	if ((ptr[6] != 0xff) || (ptr[7] != 0xff))
303		return GSS_S_DEFECTIVE_TOKEN;
304
305	/*
306	 * Data starts after token header and checksum.  ptr points
307	 * to the beginning of the token header
308	 */
309	crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) -
310					(unsigned char *)buf->head[0].iov_base;
311
312	/*
313	 * Need plaintext seqnum to derive encryption key for arcfour-hmac
314	 */
315	if (krb5_get_seq_num(kctx, ptr + GSS_KRB5_TOK_HDR_LEN,
316			     ptr + 8, &direction, &seqnum))
317		return GSS_S_BAD_SIG;
318
319	if ((kctx->initiate && direction != 0xff) ||
320	    (!kctx->initiate && direction != 0))
321		return GSS_S_BAD_SIG;
322
323	if (kctx->enctype == ENCTYPE_ARCFOUR_HMAC) {
324		struct crypto_blkcipher *cipher;
325		int err;
326
327		cipher = crypto_alloc_blkcipher(kctx->gk5e->encrypt_name, 0,
328						CRYPTO_ALG_ASYNC);
329		if (IS_ERR(cipher))
330			return GSS_S_FAILURE;
331
332		krb5_rc4_setup_enc_key(kctx, cipher, seqnum);
333
334		err = gss_decrypt_xdr_buf(cipher, buf, crypt_offset);
335		crypto_free_blkcipher(cipher);
336		if (err)
337			return GSS_S_DEFECTIVE_TOKEN;
338	} else {
339		if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset))
340			return GSS_S_DEFECTIVE_TOKEN;
341	}
342
343	if (kctx->gk5e->keyed_cksum)
344		cksumkey = kctx->cksum;
345	else
346		cksumkey = NULL;
347
348	if (make_checksum(kctx, ptr, 8, buf, crypt_offset,
349					cksumkey, KG_USAGE_SEAL, &md5cksum))
350		return GSS_S_FAILURE;
351
352	if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN,
353						kctx->gk5e->cksumlength))
354		return GSS_S_BAD_SIG;
355
356	/* it got through unscathed.  Make sure the context is unexpired */
357
358	now = get_seconds();
359
360	if (now > kctx->endtime)
361		return GSS_S_CONTEXT_EXPIRED;
362
363	/* do sequencing checks */
364
365	/* Copy the data back to the right position.  XXX: Would probably be
366	 * better to copy and encrypt at the same time. */
367
368	blocksize = crypto_blkcipher_blocksize(kctx->enc);
369	data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) +
370					conflen;
371	orig_start = buf->head[0].iov_base + offset;
372	data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
373	memmove(orig_start, data_start, data_len);
374	buf->head[0].iov_len -= (data_start - orig_start);
375	buf->len -= (data_start - orig_start);
376
377	if (gss_krb5_remove_padding(buf, blocksize))
378		return GSS_S_DEFECTIVE_TOKEN;
379
380	return GSS_S_COMPLETE;
381}
382
383/*
384 * We can shift data by up to LOCAL_BUF_LEN bytes in a pass.  If we need
385 * to do more than that, we shift repeatedly.  Kevin Coffman reports
386 * seeing 28 bytes as the value used by Microsoft clients and servers
387 * with AES, so this constant is chosen to allow handling 28 in one pass
388 * without using too much stack space.
389 *
390 * If that proves to a problem perhaps we could use a more clever
391 * algorithm.
392 */
393#define LOCAL_BUF_LEN 32u
394
395static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift)
396{
397	char head[LOCAL_BUF_LEN];
398	char tmp[LOCAL_BUF_LEN];
399	unsigned int this_len, i;
400
401	BUG_ON(shift > LOCAL_BUF_LEN);
402
403	read_bytes_from_xdr_buf(buf, 0, head, shift);
404	for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) {
405		this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift));
406		read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len);
407		write_bytes_to_xdr_buf(buf, i, tmp, this_len);
408	}
409	write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift);
410}
411
412static void _rotate_left(struct xdr_buf *buf, unsigned int shift)
413{
414	int shifted = 0;
415	int this_shift;
416
417	shift %= buf->len;
418	while (shifted < shift) {
419		this_shift = min(shift - shifted, LOCAL_BUF_LEN);
420		rotate_buf_a_little(buf, this_shift);
421		shifted += this_shift;
422	}
423}
424
425static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift)
426{
427	struct xdr_buf subbuf;
428
429	xdr_buf_subsegment(buf, &subbuf, base, buf->len - base);
430	_rotate_left(&subbuf, shift);
431}
432
433static u32
434gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset,
435		     struct xdr_buf *buf, struct page **pages)
436{
437	int		blocksize;
438	u8		*ptr, *plainhdr;
439	s32		now;
440	u8		flags = 0x00;
441	__be16		*be16ptr, ec = 0;
442	__be64		*be64ptr;
443	u32		err;
444
445	dprintk("RPC:       %s\n", __func__);
446
447	if (kctx->gk5e->encrypt_v2 == NULL)
448		return GSS_S_FAILURE;
449
450	/* make room for gss token header */
451	if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN))
452		return GSS_S_FAILURE;
453
454	/* construct gss token header */
455	ptr = plainhdr = buf->head[0].iov_base + offset;
456	*ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff);
457	*ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff);
458
459	if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0)
460		flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR;
461	if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0)
462		flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY;
463	/* We always do confidentiality in wrap tokens */
464	flags |= KG2_TOKEN_FLAG_SEALED;
465
466	*ptr++ = flags;
467	*ptr++ = 0xff;
468	be16ptr = (__be16 *)ptr;
469
470	blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc);
471	*be16ptr++ = cpu_to_be16(ec);
472	/* "inner" token header always uses 0 for RRC */
473	*be16ptr++ = cpu_to_be16(0);
474
475	be64ptr = (__be64 *)be16ptr;
476	spin_lock(&krb5_seq_lock);
477	*be64ptr = cpu_to_be64(kctx->seq_send64++);
478	spin_unlock(&krb5_seq_lock);
479
480	err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, ec, pages);
481	if (err)
482		return err;
483
484	now = get_seconds();
485	return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
486}
487
488static u32
489gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
490{
491	s32		now;
492	u64		seqnum;
493	u8		*ptr;
494	u8		flags = 0x00;
495	u16		ec, rrc;
496	int		err;
497	u32		headskip, tailskip;
498	u8		decrypted_hdr[GSS_KRB5_TOK_HDR_LEN];
499	unsigned int	movelen;
500
501
502	dprintk("RPC:       %s\n", __func__);
503
504	if (kctx->gk5e->decrypt_v2 == NULL)
505		return GSS_S_FAILURE;
506
507	ptr = buf->head[0].iov_base + offset;
508
509	if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP)
510		return GSS_S_DEFECTIVE_TOKEN;
511
512	flags = ptr[2];
513	if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) ||
514	    (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)))
515		return GSS_S_BAD_SIG;
516
517	if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) {
518		dprintk("%s: token missing expected sealed flag\n", __func__);
519		return GSS_S_DEFECTIVE_TOKEN;
520	}
521
522	if (ptr[3] != 0xff)
523		return GSS_S_DEFECTIVE_TOKEN;
524
525	ec = be16_to_cpup((__be16 *)(ptr + 4));
526	rrc = be16_to_cpup((__be16 *)(ptr + 6));
527
528	seqnum = be64_to_cpup((__be64 *)(ptr + 8));
529
530	if (rrc != 0)
531		rotate_left(offset + 16, buf, rrc);
532
533	err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
534					&headskip, &tailskip);
535	if (err)
536		return GSS_S_FAILURE;
537
538	/*
539	 * Retrieve the decrypted gss token header and verify
540	 * it against the original
541	 */
542	err = read_bytes_from_xdr_buf(buf,
543				buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip,
544				decrypted_hdr, GSS_KRB5_TOK_HDR_LEN);
545	if (err) {
546		dprintk("%s: error %u getting decrypted_hdr\n", __func__, err);
547		return GSS_S_FAILURE;
548	}
549	if (memcmp(ptr, decrypted_hdr, 6)
550				|| memcmp(ptr + 8, decrypted_hdr + 8, 8)) {
551		dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__);
552		return GSS_S_FAILURE;
553	}
554
555	/* do sequencing checks */
556
557	/* it got through unscathed.  Make sure the context is unexpired */
558	now = get_seconds();
559	if (now > kctx->endtime)
560		return GSS_S_CONTEXT_EXPIRED;
561
562	/*
563	 * Move the head data back to the right position in xdr_buf.
564	 * We ignore any "ec" data since it might be in the head or
565	 * the tail, and we really don't need to deal with it.
566	 * Note that buf->head[0].iov_len may indicate the available
567	 * head buffer space rather than that actually occupied.
568	 */
569	movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len);
570	movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip;
571	BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen >
572							buf->head[0].iov_len);
573	memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen);
574	buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
575	buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
576
577	/* Trim off the checksum blob */
578	xdr_buf_trim(buf, GSS_KRB5_TOK_HDR_LEN + tailskip);
579	return GSS_S_COMPLETE;
580}
581
582u32
583gss_wrap_kerberos(struct gss_ctx *gctx, int offset,
584		  struct xdr_buf *buf, struct page **pages)
585{
586	struct krb5_ctx	*kctx = gctx->internal_ctx_id;
587
588	switch (kctx->enctype) {
589	default:
590		BUG();
591	case ENCTYPE_DES_CBC_RAW:
592	case ENCTYPE_DES3_CBC_RAW:
593	case ENCTYPE_ARCFOUR_HMAC:
594		return gss_wrap_kerberos_v1(kctx, offset, buf, pages);
595	case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
596	case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
597		return gss_wrap_kerberos_v2(kctx, offset, buf, pages);
598	}
599}
600
601u32
602gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf)
603{
604	struct krb5_ctx	*kctx = gctx->internal_ctx_id;
605
606	switch (kctx->enctype) {
607	default:
608		BUG();
609	case ENCTYPE_DES_CBC_RAW:
610	case ENCTYPE_DES3_CBC_RAW:
611	case ENCTYPE_ARCFOUR_HMAC:
612		return gss_unwrap_kerberos_v1(kctx, offset, buf);
613	case ENCTYPE_AES128_CTS_HMAC_SHA1_96:
614	case ENCTYPE_AES256_CTS_HMAC_SHA1_96:
615		return gss_unwrap_kerberos_v2(kctx, offset, buf);
616	}
617}
618
619