gss_krb5_wrap.c revision 54ec3d462f3c2a3fe48a7bd592160bee31360087
1#include <linux/types.h> 2#include <linux/jiffies.h> 3#include <linux/sunrpc/gss_krb5.h> 4#include <linux/random.h> 5#include <linux/pagemap.h> 6#include <linux/crypto.h> 7 8#ifdef RPC_DEBUG 9# define RPCDBG_FACILITY RPCDBG_AUTH 10#endif 11 12static inline int 13gss_krb5_padding(int blocksize, int length) 14{ 15 return blocksize - (length % blocksize); 16} 17 18static inline void 19gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize) 20{ 21 int padding = gss_krb5_padding(blocksize, buf->len - offset); 22 char *p; 23 struct kvec *iov; 24 25 if (buf->page_len || buf->tail[0].iov_len) 26 iov = &buf->tail[0]; 27 else 28 iov = &buf->head[0]; 29 p = iov->iov_base + iov->iov_len; 30 iov->iov_len += padding; 31 buf->len += padding; 32 memset(p, padding, padding); 33} 34 35static inline int 36gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize) 37{ 38 u8 *ptr; 39 u8 pad; 40 size_t len = buf->len; 41 42 if (len <= buf->head[0].iov_len) { 43 pad = *(u8 *)(buf->head[0].iov_base + len - 1); 44 if (pad > buf->head[0].iov_len) 45 return -EINVAL; 46 buf->head[0].iov_len -= pad; 47 goto out; 48 } else 49 len -= buf->head[0].iov_len; 50 if (len <= buf->page_len) { 51 unsigned int last = (buf->page_base + len - 1) 52 >>PAGE_CACHE_SHIFT; 53 unsigned int offset = (buf->page_base + len - 1) 54 & (PAGE_CACHE_SIZE - 1); 55 ptr = kmap_atomic(buf->pages[last], KM_USER0); 56 pad = *(ptr + offset); 57 kunmap_atomic(ptr, KM_USER0); 58 goto out; 59 } else 60 len -= buf->page_len; 61 BUG_ON(len > buf->tail[0].iov_len); 62 pad = *(u8 *)(buf->tail[0].iov_base + len - 1); 63out: 64 /* XXX: NOTE: we do not adjust the page lengths--they represent 65 * a range of data in the real filesystem page cache, and we need 66 * to know that range so the xdr code can properly place read data. 67 * However adjusting the head length, as we do above, is harmless. 68 * In the case of a request that fits into a single page, the server 69 * also uses length and head length together to determine the original 70 * start of the request to copy the request for deferal; so it's 71 * easier on the server if we adjust head and tail length in tandem. 72 * It's not really a problem that we don't fool with the page and 73 * tail lengths, though--at worst badly formed xdr might lead the 74 * server to attempt to parse the padding. 75 * XXX: Document all these weird requirements for gss mechanism 76 * wrap/unwrap functions. */ 77 if (pad > blocksize) 78 return -EINVAL; 79 if (buf->len > pad) 80 buf->len -= pad; 81 else 82 return -EINVAL; 83 return 0; 84} 85 86static void 87make_confounder(char *p, u32 conflen) 88{ 89 static u64 i = 0; 90 u64 *q = (u64 *)p; 91 92 /* rfc1964 claims this should be "random". But all that's really 93 * necessary is that it be unique. And not even that is necessary in 94 * our case since our "gssapi" implementation exists only to support 95 * rpcsec_gss, so we know that the only buffers we will ever encrypt 96 * already begin with a unique sequence number. Just to hedge my bets 97 * I'll make a half-hearted attempt at something unique, but ensuring 98 * uniqueness would mean worrying about atomicity and rollover, and I 99 * don't care enough. */ 100 101 /* initialize to random value */ 102 if (i == 0) { 103 i = random32(); 104 i = (i << 32) | random32(); 105 } 106 107 switch (conflen) { 108 case 16: 109 *q++ = i++; 110 /* fall through */ 111 case 8: 112 *q++ = i++; 113 break; 114 default: 115 BUG(); 116 } 117} 118 119/* Assumptions: the head and tail of inbuf are ours to play with. 120 * The pages, however, may be real pages in the page cache and we replace 121 * them with scratch pages from **pages before writing to them. */ 122/* XXX: obviously the above should be documentation of wrap interface, 123 * and shouldn't be in this kerberos-specific file. */ 124 125/* XXX factor out common code with seal/unseal. */ 126 127u32 128gss_wrap_kerberos(struct gss_ctx *ctx, int offset, 129 struct xdr_buf *buf, struct page **pages) 130{ 131 struct krb5_ctx *kctx = ctx->internal_ctx_id; 132 char cksumdata[16]; 133 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; 134 int blocksize = 0, plainlen; 135 unsigned char *ptr, *msg_start; 136 s32 now; 137 int headlen; 138 struct page **tmp_pages; 139 u32 seq_send; 140 141 dprintk("RPC: gss_wrap_kerberos\n"); 142 143 now = get_seconds(); 144 145 blocksize = crypto_blkcipher_blocksize(kctx->enc); 146 gss_krb5_add_padding(buf, offset, blocksize); 147 BUG_ON((buf->len - offset) % blocksize); 148 plainlen = blocksize + buf->len - offset; 149 150 headlen = g_token_size(&kctx->mech_used, 24 + plainlen) - 151 (buf->len - offset); 152 153 ptr = buf->head[0].iov_base + offset; 154 /* shift data to make room for header. */ 155 xdr_extend_head(buf, offset, headlen); 156 157 /* XXX Would be cleverer to encrypt while copying. */ 158 BUG_ON((buf->len - offset - headlen) % blocksize); 159 160 g_make_token_header(&kctx->mech_used, 161 GSS_KRB5_TOK_HDR_LEN + 8 + plainlen, &ptr); 162 163 164 /* ptr now at header described in rfc 1964, section 1.2.1: */ 165 ptr[0] = (unsigned char) ((KG_TOK_WRAP_MSG >> 8) & 0xff); 166 ptr[1] = (unsigned char) (KG_TOK_WRAP_MSG & 0xff); 167 168 msg_start = ptr + 24; 169 170 *(__be16 *)(ptr + 2) = htons(SGN_ALG_DES_MAC_MD5); 171 memset(ptr + 4, 0xff, 4); 172 *(__be16 *)(ptr + 4) = htons(SEAL_ALG_DES); 173 174 make_confounder(msg_start, blocksize); 175 176 /* XXXJBF: UGH!: */ 177 tmp_pages = buf->pages; 178 buf->pages = pages; 179 if (make_checksum("md5", ptr, 8, buf, 180 offset + headlen - blocksize, &md5cksum)) 181 return GSS_S_FAILURE; 182 buf->pages = tmp_pages; 183 184 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, 185 md5cksum.data, md5cksum.len)) 186 return GSS_S_FAILURE; 187 memcpy(ptr + GSS_KRB5_TOK_HDR_LEN, md5cksum.data + md5cksum.len - 8, 8); 188 189 spin_lock(&krb5_seq_lock); 190 seq_send = kctx->seq_send++; 191 spin_unlock(&krb5_seq_lock); 192 193 /* XXX would probably be more efficient to compute checksum 194 * and encrypt at the same time: */ 195 if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, 196 seq_send, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8))) 197 return GSS_S_FAILURE; 198 199 if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, 200 pages)) 201 return GSS_S_FAILURE; 202 203 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; 204} 205 206u32 207gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) 208{ 209 struct krb5_ctx *kctx = ctx->internal_ctx_id; 210 int signalg; 211 int sealalg; 212 char cksumdata[16]; 213 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; 214 s32 now; 215 int direction; 216 s32 seqnum; 217 unsigned char *ptr; 218 int bodysize; 219 void *data_start, *orig_start; 220 int data_len; 221 int blocksize; 222 223 dprintk("RPC: gss_unwrap_kerberos\n"); 224 225 ptr = (u8 *)buf->head[0].iov_base + offset; 226 if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, 227 buf->len - offset)) 228 return GSS_S_DEFECTIVE_TOKEN; 229 230 if ((ptr[0] != ((KG_TOK_WRAP_MSG >> 8) & 0xff)) || 231 (ptr[1] != (KG_TOK_WRAP_MSG & 0xff))) 232 return GSS_S_DEFECTIVE_TOKEN; 233 234 /* XXX sanity-check bodysize?? */ 235 236 /* get the sign and seal algorithms */ 237 238 signalg = ptr[2] + (ptr[3] << 8); 239 if (signalg != SGN_ALG_DES_MAC_MD5) 240 return GSS_S_DEFECTIVE_TOKEN; 241 242 sealalg = ptr[4] + (ptr[5] << 8); 243 if (sealalg != SEAL_ALG_DES) 244 return GSS_S_DEFECTIVE_TOKEN; 245 246 if ((ptr[6] != 0xff) || (ptr[7] != 0xff)) 247 return GSS_S_DEFECTIVE_TOKEN; 248 249 if (gss_decrypt_xdr_buf(kctx->enc, buf, 250 ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base)) 251 return GSS_S_DEFECTIVE_TOKEN; 252 253 if (make_checksum("md5", ptr, 8, buf, 254 ptr + GSS_KRB5_TOK_HDR_LEN + 8 - (unsigned char *)buf->head[0].iov_base, &md5cksum)) 255 return GSS_S_FAILURE; 256 257 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, 258 md5cksum.data, md5cksum.len)) 259 return GSS_S_FAILURE; 260 261 if (memcmp(md5cksum.data + 8, ptr + GSS_KRB5_TOK_HDR_LEN, 8)) 262 return GSS_S_BAD_SIG; 263 264 /* it got through unscathed. Make sure the context is unexpired */ 265 266 now = get_seconds(); 267 268 if (now > kctx->endtime) 269 return GSS_S_CONTEXT_EXPIRED; 270 271 /* do sequencing checks */ 272 273 if (krb5_get_seq_num(kctx->seq, ptr + GSS_KRB5_TOK_HDR_LEN, ptr + 8, 274 &direction, &seqnum)) 275 return GSS_S_BAD_SIG; 276 277 if ((kctx->initiate && direction != 0xff) || 278 (!kctx->initiate && direction != 0)) 279 return GSS_S_BAD_SIG; 280 281 /* Copy the data back to the right position. XXX: Would probably be 282 * better to copy and encrypt at the same time. */ 283 284 blocksize = crypto_blkcipher_blocksize(kctx->enc); 285 data_start = ptr + GSS_KRB5_TOK_HDR_LEN + 8 + blocksize; 286 orig_start = buf->head[0].iov_base + offset; 287 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; 288 memmove(orig_start, data_start, data_len); 289 buf->head[0].iov_len -= (data_start - orig_start); 290 buf->len -= (data_start - orig_start); 291 292 if (gss_krb5_remove_padding(buf, blocksize)) 293 return GSS_S_DEFECTIVE_TOKEN; 294 295 return GSS_S_COMPLETE; 296} 297