gss_krb5_wrap.c revision de9c17eb4a912c9028f7b470eb80815144883b26
1/* 2 * COPYRIGHT (c) 2008 3 * The Regents of the University of Michigan 4 * ALL RIGHTS RESERVED 5 * 6 * Permission is granted to use, copy, create derivative works 7 * and redistribute this software and such derivative works 8 * for any purpose, so long as the name of The University of 9 * Michigan is not used in any advertising or publicity 10 * pertaining to the use of distribution of this software 11 * without specific, written prior authorization. If the 12 * above copyright notice or any other identification of the 13 * University of Michigan is included in any copy of any 14 * portion of this software, then the disclaimer below must 15 * also be included. 16 * 17 * THIS SOFTWARE IS PROVIDED AS IS, WITHOUT REPRESENTATION 18 * FROM THE UNIVERSITY OF MICHIGAN AS TO ITS FITNESS FOR ANY 19 * PURPOSE, AND WITHOUT WARRANTY BY THE UNIVERSITY OF 20 * MICHIGAN OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING 21 * WITHOUT LIMITATION THE IMPLIED WARRANTIES OF 22 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE 23 * REGENTS OF THE UNIVERSITY OF MICHIGAN SHALL NOT BE LIABLE 24 * FOR ANY DAMAGES, INCLUDING SPECIAL, INDIRECT, INCIDENTAL, OR 25 * CONSEQUENTIAL DAMAGES, WITH RESPECT TO ANY CLAIM ARISING 26 * OUT OF OR IN CONNECTION WITH THE USE OF THE SOFTWARE, EVEN 27 * IF IT HAS BEEN OR IS HEREAFTER ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGES. 29 */ 30 31#include <linux/types.h> 32#include <linux/jiffies.h> 33#include <linux/sunrpc/gss_krb5.h> 34#include <linux/random.h> 35#include <linux/pagemap.h> 36#include <linux/crypto.h> 37 38#ifdef RPC_DEBUG 39# define RPCDBG_FACILITY RPCDBG_AUTH 40#endif 41 42static inline int 43gss_krb5_padding(int blocksize, int length) 44{ 45 return blocksize - (length % blocksize); 46} 47 48static inline void 49gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize) 50{ 51 int padding = gss_krb5_padding(blocksize, buf->len - offset); 52 char *p; 53 struct kvec *iov; 54 55 if (buf->page_len || buf->tail[0].iov_len) 56 iov = &buf->tail[0]; 57 else 58 iov = &buf->head[0]; 59 p = iov->iov_base + iov->iov_len; 60 iov->iov_len += padding; 61 buf->len += padding; 62 memset(p, padding, padding); 63} 64 65static inline int 66gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize) 67{ 68 u8 *ptr; 69 u8 pad; 70 size_t len = buf->len; 71 72 if (len <= buf->head[0].iov_len) { 73 pad = *(u8 *)(buf->head[0].iov_base + len - 1); 74 if (pad > buf->head[0].iov_len) 75 return -EINVAL; 76 buf->head[0].iov_len -= pad; 77 goto out; 78 } else 79 len -= buf->head[0].iov_len; 80 if (len <= buf->page_len) { 81 unsigned int last = (buf->page_base + len - 1) 82 >>PAGE_CACHE_SHIFT; 83 unsigned int offset = (buf->page_base + len - 1) 84 & (PAGE_CACHE_SIZE - 1); 85 ptr = kmap_atomic(buf->pages[last], KM_USER0); 86 pad = *(ptr + offset); 87 kunmap_atomic(ptr, KM_USER0); 88 goto out; 89 } else 90 len -= buf->page_len; 91 BUG_ON(len > buf->tail[0].iov_len); 92 pad = *(u8 *)(buf->tail[0].iov_base + len - 1); 93out: 94 /* XXX: NOTE: we do not adjust the page lengths--they represent 95 * a range of data in the real filesystem page cache, and we need 96 * to know that range so the xdr code can properly place read data. 97 * However adjusting the head length, as we do above, is harmless. 98 * In the case of a request that fits into a single page, the server 99 * also uses length and head length together to determine the original 100 * start of the request to copy the request for deferal; so it's 101 * easier on the server if we adjust head and tail length in tandem. 102 * It's not really a problem that we don't fool with the page and 103 * tail lengths, though--at worst badly formed xdr might lead the 104 * server to attempt to parse the padding. 105 * XXX: Document all these weird requirements for gss mechanism 106 * wrap/unwrap functions. */ 107 if (pad > blocksize) 108 return -EINVAL; 109 if (buf->len > pad) 110 buf->len -= pad; 111 else 112 return -EINVAL; 113 return 0; 114} 115 116static void 117make_confounder(char *p, u32 conflen) 118{ 119 static u64 i = 0; 120 u64 *q = (u64 *)p; 121 122 /* rfc1964 claims this should be "random". But all that's really 123 * necessary is that it be unique. And not even that is necessary in 124 * our case since our "gssapi" implementation exists only to support 125 * rpcsec_gss, so we know that the only buffers we will ever encrypt 126 * already begin with a unique sequence number. Just to hedge my bets 127 * I'll make a half-hearted attempt at something unique, but ensuring 128 * uniqueness would mean worrying about atomicity and rollover, and I 129 * don't care enough. */ 130 131 /* initialize to random value */ 132 if (i == 0) { 133 i = random32(); 134 i = (i << 32) | random32(); 135 } 136 137 switch (conflen) { 138 case 16: 139 *q++ = i++; 140 /* fall through */ 141 case 8: 142 *q++ = i++; 143 break; 144 default: 145 BUG(); 146 } 147} 148 149/* Assumptions: the head and tail of inbuf are ours to play with. 150 * The pages, however, may be real pages in the page cache and we replace 151 * them with scratch pages from **pages before writing to them. */ 152/* XXX: obviously the above should be documentation of wrap interface, 153 * and shouldn't be in this kerberos-specific file. */ 154 155/* XXX factor out common code with seal/unseal. */ 156 157static u32 158gss_wrap_kerberos_v1(struct krb5_ctx *kctx, int offset, 159 struct xdr_buf *buf, struct page **pages) 160{ 161 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 162 struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), 163 .data = cksumdata}; 164 int blocksize = 0, plainlen; 165 unsigned char *ptr, *msg_start; 166 s32 now; 167 int headlen; 168 struct page **tmp_pages; 169 u32 seq_send; 170 u8 *cksumkey; 171 172 dprintk("RPC: %s\n", __func__); 173 174 now = get_seconds(); 175 176 blocksize = crypto_blkcipher_blocksize(kctx->enc); 177 gss_krb5_add_padding(buf, offset, blocksize); 178 BUG_ON((buf->len - offset) % blocksize); 179 plainlen = blocksize + buf->len - offset; 180 181 headlen = g_token_size(&kctx->mech_used, 182 GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength + plainlen) - 183 (buf->len - offset); 184 185 ptr = buf->head[0].iov_base + offset; 186 /* shift data to make room for header. */ 187 xdr_extend_head(buf, offset, headlen); 188 189 /* XXX Would be cleverer to encrypt while copying. */ 190 BUG_ON((buf->len - offset - headlen) % blocksize); 191 192 g_make_token_header(&kctx->mech_used, 193 GSS_KRB5_TOK_HDR_LEN + 194 kctx->gk5e->cksumlength + plainlen, &ptr); 195 196 197 /* ptr now at header described in rfc 1964, section 1.2.1: */ 198 ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff); 199 ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff); 200 201 msg_start = ptr + GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength; 202 203 *(__be16 *)(ptr + 2) = cpu_to_le16(kctx->gk5e->signalg); 204 memset(ptr + 4, 0xff, 4); 205 *(__be16 *)(ptr + 4) = cpu_to_le16(kctx->gk5e->sealalg); 206 207 make_confounder(msg_start, blocksize); 208 209 if (kctx->gk5e->keyed_cksum) 210 cksumkey = kctx->cksum; 211 else 212 cksumkey = NULL; 213 214 /* XXXJBF: UGH!: */ 215 tmp_pages = buf->pages; 216 buf->pages = pages; 217 if (make_checksum(kctx, ptr, 8, buf, offset + headlen - blocksize, 218 cksumkey, &md5cksum)) 219 return GSS_S_FAILURE; 220 buf->pages = tmp_pages; 221 222 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data, md5cksum.len); 223 224 spin_lock(&krb5_seq_lock); 225 seq_send = kctx->seq_send++; 226 spin_unlock(&krb5_seq_lock); 227 228 /* XXX would probably be more efficient to compute checksum 229 * and encrypt at the same time: */ 230 if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, 231 seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) 232 return GSS_S_FAILURE; 233 234 if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, 235 pages)) 236 return GSS_S_FAILURE; 237 238 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; 239} 240 241static u32 242gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) 243{ 244 int signalg; 245 int sealalg; 246 char cksumdata[GSS_KRB5_MAX_CKSUM_LEN]; 247 struct xdr_netobj md5cksum = {.len = sizeof(cksumdata), 248 .data = cksumdata}; 249 s32 now; 250 int direction; 251 s32 seqnum; 252 unsigned char *ptr; 253 int bodysize; 254 void *data_start, *orig_start; 255 int data_len; 256 int blocksize; 257 int crypt_offset; 258 u8 *cksumkey; 259 260 dprintk("RPC: gss_unwrap_kerberos\n"); 261 262 ptr = (u8 *)buf->head[0].iov_base + offset; 263 if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, 264 buf->len - offset)) 265 return GSS_S_DEFECTIVE_TOKEN; 266 267 if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) || 268 (ptr[1] != (KG_TOK_WRAP_MSG & 0xff))) 269 return GSS_S_DEFECTIVE_TOKEN; 270 271 /* XXX sanity-check bodysize?? */ 272 273 /* get the sign and seal algorithms */ 274 275 signalg = ptr[2] + (ptr[3] << 8); 276 if (signalg != kctx->gk5e->signalg) 277 return GSS_S_DEFECTIVE_TOKEN; 278 279 sealalg = ptr[4] + (ptr[5] << 8); 280 if (sealalg != kctx->gk5e->sealalg) 281 return GSS_S_DEFECTIVE_TOKEN; 282 283 if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) 284 return GSS_S_DEFECTIVE_TOKEN; 285 286 /* 287 * Data starts after token header and checksum. ptr points 288 * to the beginning of the token header 289 */ 290 crypt_offset = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) - 291 (unsigned char *)buf->head[0].iov_base; 292 if (gss_decrypt_xdr_buf(kctx->enc, buf, crypt_offset)) 293 return GSS_S_DEFECTIVE_TOKEN; 294 295 if (kctx->gk5e->keyed_cksum) 296 cksumkey = kctx->cksum; 297 else 298 cksumkey = NULL; 299 300 if (make_checksum(kctx, ptr, 8, buf, crypt_offset, 301 cksumkey, &md5cksum)) 302 return GSS_S_FAILURE; 303 304 if (memcmp(md5cksum.data, ptr + GSS_KRB5_TOK_HDR_LEN, 305 kctx->gk5e->cksumlength)) 306 return GSS_S_BAD_SIG; 307 308 /* it got through unscathed. Make sure the context is unexpired */ 309 310 now = get_seconds(); 311 312 if (now > kctx->endtime) 313 return GSS_S_CONTEXT_EXPIRED; 314 315 /* do sequencing checks */ 316 317 if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, 318 &direction, &seqnum)) 319 return GSS_S_BAD_SIG; 320 321 if ((kctx->initiate && direction != 0xff) || 322 (!kctx->initiate && direction != 0)) 323 return GSS_S_BAD_SIG; 324 325 /* Copy the data back to the right position. XXX: Would probably be 326 * better to copy and encrypt at the same time. */ 327 328 blocksize = crypto_blkcipher_blocksize(kctx->enc); 329 data_start = ptr + (GSS_KRB5_TOK_HDR_LEN + kctx->gk5e->cksumlength) + 330 blocksize; 331 orig_start = buf->head[0].iov_base + offset; 332 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; 333 memmove(orig_start, data_start, data_len); 334 buf->head[0].iov_len -= (data_start - orig_start); 335 buf->len -= (data_start - orig_start); 336 337 if (gss_krb5_remove_padding(buf, blocksize)) 338 return GSS_S_DEFECTIVE_TOKEN; 339 340 return GSS_S_COMPLETE; 341} 342 343/* 344 * We cannot currently handle tokens with rotated data. We need a 345 * generalized routine to rotate the data in place. It is anticipated 346 * that we won't encounter rotated data in the general case. 347 */ 348static u32 349rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc) 350{ 351 unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN); 352 353 if (realrrc == 0) 354 return 0; 355 356 dprintk("%s: cannot process token with rotated data: " 357 "rrc %u, realrrc %u\n", __func__, rrc, realrrc); 358 return 1; 359} 360 361static u32 362gss_wrap_kerberos_v2(struct krb5_ctx *kctx, u32 offset, 363 struct xdr_buf *buf, struct page **pages) 364{ 365 int blocksize; 366 u8 *ptr, *plainhdr; 367 s32 now; 368 u8 flags = 0x00; 369 __be16 *be16ptr, ec = 0; 370 __be64 *be64ptr; 371 u32 err; 372 373 dprintk("RPC: %s\n", __func__); 374 375 if (kctx->gk5e->encrypt_v2 == NULL) 376 return GSS_S_FAILURE; 377 378 /* make room for gss token header */ 379 if (xdr_extend_head(buf, offset, GSS_KRB5_TOK_HDR_LEN)) 380 return GSS_S_FAILURE; 381 382 /* construct gss token header */ 383 ptr = plainhdr = buf->head[0].iov_base + offset; 384 *ptr++ = (unsigned char) ((KG2_TOK_WRAP>>8) & 0xff); 385 *ptr++ = (unsigned char) (KG2_TOK_WRAP & 0xff); 386 387 if ((kctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0) 388 flags |= KG2_TOKEN_FLAG_SENTBYACCEPTOR; 389 if ((kctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) != 0) 390 flags |= KG2_TOKEN_FLAG_ACCEPTORSUBKEY; 391 /* We always do confidentiality in wrap tokens */ 392 flags |= KG2_TOKEN_FLAG_SEALED; 393 394 *ptr++ = flags; 395 *ptr++ = 0xff; 396 be16ptr = (__be16 *)ptr; 397 398 blocksize = crypto_blkcipher_blocksize(kctx->acceptor_enc); 399 *be16ptr++ = cpu_to_be16(ec); 400 /* "inner" token header always uses 0 for RRC */ 401 *be16ptr++ = cpu_to_be16(0); 402 403 be64ptr = (__be64 *)be16ptr; 404 spin_lock(&krb5_seq_lock); 405 *be64ptr = cpu_to_be64(kctx->seq_send64++); 406 spin_unlock(&krb5_seq_lock); 407 408 err = (*kctx->gk5e->encrypt_v2)(kctx, offset, buf, ec, pages); 409 if (err) 410 return err; 411 412 now = get_seconds(); 413 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; 414} 415 416static u32 417gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf) 418{ 419 s32 now; 420 u64 seqnum; 421 u8 *ptr; 422 u8 flags = 0x00; 423 u16 ec, rrc; 424 int err; 425 u32 headskip, tailskip; 426 u8 decrypted_hdr[GSS_KRB5_TOK_HDR_LEN]; 427 unsigned int movelen; 428 429 430 dprintk("RPC: %s\n", __func__); 431 432 if (kctx->gk5e->decrypt_v2 == NULL) 433 return GSS_S_FAILURE; 434 435 ptr = buf->head[0].iov_base + offset; 436 437 if (be16_to_cpu(*((__be16 *)ptr)) != KG2_TOK_WRAP) 438 return GSS_S_DEFECTIVE_TOKEN; 439 440 flags = ptr[2]; 441 if ((!kctx->initiate && (flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR)) || 442 (kctx->initiate && !(flags & KG2_TOKEN_FLAG_SENTBYACCEPTOR))) 443 return GSS_S_BAD_SIG; 444 445 if ((flags & KG2_TOKEN_FLAG_SEALED) == 0) { 446 dprintk("%s: token missing expected sealed flag\n", __func__); 447 return GSS_S_DEFECTIVE_TOKEN; 448 } 449 450 if (ptr[3] != 0xff) 451 return GSS_S_DEFECTIVE_TOKEN; 452 453 ec = be16_to_cpup((__be16 *)(ptr + 4)); 454 rrc = be16_to_cpup((__be16 *)(ptr + 6)); 455 456 seqnum = be64_to_cpup((__be64 *)(ptr + 8)); 457 458 if (rrc != 0) { 459 err = rotate_left(kctx, offset, buf, rrc); 460 if (err) 461 return GSS_S_FAILURE; 462 } 463 464 err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf, 465 &headskip, &tailskip); 466 if (err) 467 return GSS_S_FAILURE; 468 469 /* 470 * Retrieve the decrypted gss token header and verify 471 * it against the original 472 */ 473 err = read_bytes_from_xdr_buf(buf, 474 buf->len - GSS_KRB5_TOK_HDR_LEN - tailskip, 475 decrypted_hdr, GSS_KRB5_TOK_HDR_LEN); 476 if (err) { 477 dprintk("%s: error %u getting decrypted_hdr\n", __func__, err); 478 return GSS_S_FAILURE; 479 } 480 if (memcmp(ptr, decrypted_hdr, 6) 481 || memcmp(ptr + 8, decrypted_hdr + 8, 8)) { 482 dprintk("%s: token hdr, plaintext hdr mismatch!\n", __func__); 483 return GSS_S_FAILURE; 484 } 485 486 /* do sequencing checks */ 487 488 /* it got through unscathed. Make sure the context is unexpired */ 489 now = get_seconds(); 490 if (now > kctx->endtime) 491 return GSS_S_CONTEXT_EXPIRED; 492 493 /* 494 * Move the head data back to the right position in xdr_buf. 495 * We ignore any "ec" data since it might be in the head or 496 * the tail, and we really don't need to deal with it. 497 * Note that buf->head[0].iov_len may indicate the available 498 * head buffer space rather than that actually occupied. 499 */ 500 movelen = min_t(unsigned int, buf->head[0].iov_len, buf->len); 501 movelen -= offset + GSS_KRB5_TOK_HDR_LEN + headskip; 502 BUG_ON(offset + GSS_KRB5_TOK_HDR_LEN + headskip + movelen > 503 buf->head[0].iov_len); 504 memmove(ptr, ptr + GSS_KRB5_TOK_HDR_LEN + headskip, movelen); 505 buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; 506 buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip; 507 508 return GSS_S_COMPLETE; 509} 510 511u32 512gss_wrap_kerberos(struct gss_ctx *gctx, int offset, 513 struct xdr_buf *buf, struct page **pages) 514{ 515 struct krb5_ctx *kctx = gctx->internal_ctx_id; 516 517 switch (kctx->enctype) { 518 default: 519 BUG(); 520 case ENCTYPE_DES_CBC_RAW: 521 case ENCTYPE_DES3_CBC_RAW: 522 return gss_wrap_kerberos_v1(kctx, offset, buf, pages); 523 case ENCTYPE_AES128_CTS_HMAC_SHA1_96: 524 case ENCTYPE_AES256_CTS_HMAC_SHA1_96: 525 return gss_wrap_kerberos_v2(kctx, offset, buf, pages); 526 } 527} 528 529u32 530gss_unwrap_kerberos(struct gss_ctx *gctx, int offset, struct xdr_buf *buf) 531{ 532 struct krb5_ctx *kctx = gctx->internal_ctx_id; 533 534 switch (kctx->enctype) { 535 default: 536 BUG(); 537 case ENCTYPE_DES_CBC_RAW: 538 case ENCTYPE_DES3_CBC_RAW: 539 return gss_unwrap_kerberos_v1(kctx, offset, buf); 540 case ENCTYPE_AES128_CTS_HMAC_SHA1_96: 541 case ENCTYPE_AES256_CTS_HMAC_SHA1_96: 542 return gss_unwrap_kerberos_v2(kctx, offset, buf); 543 } 544} 545 546