gss_krb5_wrap.c revision d922a84a8bf1d627810906d033223d4fa629fdbf
1#include <linux/types.h> 2#include <linux/slab.h> 3#include <linux/jiffies.h> 4#include <linux/sunrpc/gss_krb5.h> 5#include <linux/random.h> 6#include <linux/pagemap.h> 7#include <asm/scatterlist.h> 8#include <linux/crypto.h> 9 10#ifdef RPC_DEBUG 11# define RPCDBG_FACILITY RPCDBG_AUTH 12#endif 13 14static inline int 15gss_krb5_padding(int blocksize, int length) 16{ 17 /* Most of the code is block-size independent but currently we 18 * use only 8: */ 19 BUG_ON(blocksize != 8); 20 return 8 - (length & 7); 21} 22 23static inline void 24gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize) 25{ 26 int padding = gss_krb5_padding(blocksize, buf->len - offset); 27 char *p; 28 struct kvec *iov; 29 30 if (buf->page_len || buf->tail[0].iov_len) 31 iov = &buf->tail[0]; 32 else 33 iov = &buf->head[0]; 34 p = iov->iov_base + iov->iov_len; 35 iov->iov_len += padding; 36 buf->len += padding; 37 memset(p, padding, padding); 38} 39 40static inline int 41gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize) 42{ 43 u8 *ptr; 44 u8 pad; 45 int len = buf->len; 46 47 if (len <= buf->head[0].iov_len) { 48 pad = *(u8 *)(buf->head[0].iov_base + len - 1); 49 if (pad > buf->head[0].iov_len) 50 return -EINVAL; 51 buf->head[0].iov_len -= pad; 52 goto out; 53 } else 54 len -= buf->head[0].iov_len; 55 if (len <= buf->page_len) { 56 int last = (buf->page_base + len - 1) 57 >>PAGE_CACHE_SHIFT; 58 int offset = (buf->page_base + len - 1) 59 & (PAGE_CACHE_SIZE - 1); 60 ptr = kmap_atomic(buf->pages[last], KM_USER0); 61 pad = *(ptr + offset); 62 kunmap_atomic(ptr, KM_USER0); 63 goto out; 64 } else 65 len -= buf->page_len; 66 BUG_ON(len > buf->tail[0].iov_len); 67 pad = *(u8 *)(buf->tail[0].iov_base + len - 1); 68out: 69 /* XXX: NOTE: we do not adjust the page lengths--they represent 70 * a range of data in the real filesystem page cache, and we need 71 * to know that range so the xdr code can properly place read data. 72 * However adjusting the head length, as we do above, is harmless. 73 * In the case of a request that fits into a single page, the server 74 * also uses length and head length together to determine the original 75 * start of the request to copy the request for deferal; so it's 76 * easier on the server if we adjust head and tail length in tandem. 77 * It's not really a problem that we don't fool with the page and 78 * tail lengths, though--at worst badly formed xdr might lead the 79 * server to attempt to parse the padding. 80 * XXX: Document all these weird requirements for gss mechanism 81 * wrap/unwrap functions. */ 82 if (pad > blocksize) 83 return -EINVAL; 84 if (buf->len > pad) 85 buf->len -= pad; 86 else 87 return -EINVAL; 88 return 0; 89} 90 91static inline void 92make_confounder(char *p, int blocksize) 93{ 94 static u64 i = 0; 95 u64 *q = (u64 *)p; 96 97 /* rfc1964 claims this should be "random". But all that's really 98 * necessary is that it be unique. And not even that is necessary in 99 * our case since our "gssapi" implementation exists only to support 100 * rpcsec_gss, so we know that the only buffers we will ever encrypt 101 * already begin with a unique sequence number. Just to hedge my bets 102 * I'll make a half-hearted attempt at something unique, but ensuring 103 * uniqueness would mean worrying about atomicity and rollover, and I 104 * don't care enough. */ 105 106 BUG_ON(blocksize != 8); 107 *q = i++; 108} 109 110/* Assumptions: the head and tail of inbuf are ours to play with. 111 * The pages, however, may be real pages in the page cache and we replace 112 * them with scratch pages from **pages before writing to them. */ 113/* XXX: obviously the above should be documentation of wrap interface, 114 * and shouldn't be in this kerberos-specific file. */ 115 116/* XXX factor out common code with seal/unseal. */ 117 118u32 119gss_wrap_kerberos(struct gss_ctx *ctx, int offset, 120 struct xdr_buf *buf, struct page **pages) 121{ 122 struct krb5_ctx *kctx = ctx->internal_ctx_id; 123 char cksumdata[16]; 124 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; 125 int blocksize = 0, plainlen; 126 unsigned char *ptr, *krb5_hdr, *msg_start; 127 s32 now; 128 int headlen; 129 struct page **tmp_pages; 130 u32 seq_send; 131 132 dprintk("RPC: gss_wrap_kerberos\n"); 133 134 now = get_seconds(); 135 136 blocksize = crypto_blkcipher_blocksize(kctx->enc); 137 gss_krb5_add_padding(buf, offset, blocksize); 138 BUG_ON((buf->len - offset) % blocksize); 139 plainlen = blocksize + buf->len - offset; 140 141 headlen = g_token_size(&kctx->mech_used, 22 + plainlen) - 142 (buf->len - offset); 143 144 ptr = buf->head[0].iov_base + offset; 145 /* shift data to make room for header. */ 146 /* XXX Would be cleverer to encrypt while copying. */ 147 /* XXX bounds checking, slack, etc. */ 148 memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset); 149 buf->head[0].iov_len += headlen; 150 buf->len += headlen; 151 BUG_ON((buf->len - offset - headlen) % blocksize); 152 153 g_make_token_header(&kctx->mech_used, 22 + plainlen, &ptr); 154 155 156 *ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff); 157 *ptr++ = (unsigned char) (KG_TOK_WRAP_MSG&0xff); 158 159 /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */ 160 krb5_hdr = ptr - 2; 161 msg_start = krb5_hdr + 24; 162 /* XXXJBF: */ BUG_ON(buf->head[0].iov_base + offset + headlen != msg_start + blocksize); 163 164 *(__be16 *)(krb5_hdr + 2) = htons(SGN_ALG_DES_MAC_MD5); 165 memset(krb5_hdr + 4, 0xff, 4); 166 *(__be16 *)(krb5_hdr + 4) = htons(SEAL_ALG_DES); 167 168 make_confounder(msg_start, blocksize); 169 170 /* XXXJBF: UGH!: */ 171 tmp_pages = buf->pages; 172 buf->pages = pages; 173 if (make_checksum("md5", krb5_hdr, 8, buf, 174 offset + headlen - blocksize, &md5cksum)) 175 return GSS_S_FAILURE; 176 buf->pages = tmp_pages; 177 178 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, 179 md5cksum.data, md5cksum.len)) 180 return GSS_S_FAILURE; 181 memcpy(krb5_hdr + 16, 182 md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH, 183 KRB5_CKSUM_LENGTH); 184 185 spin_lock(&krb5_seq_lock); 186 seq_send = kctx->seq_send++; 187 spin_unlock(&krb5_seq_lock); 188 189 /* XXX would probably be more efficient to compute checksum 190 * and encrypt at the same time: */ 191 if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, 192 seq_send, krb5_hdr + 16, krb5_hdr + 8))) 193 return GSS_S_FAILURE; 194 195 if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, 196 pages)) 197 return GSS_S_FAILURE; 198 199 return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE); 200} 201 202u32 203gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf) 204{ 205 struct krb5_ctx *kctx = ctx->internal_ctx_id; 206 int signalg; 207 int sealalg; 208 char cksumdata[16]; 209 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; 210 s32 now; 211 int direction; 212 s32 seqnum; 213 unsigned char *ptr; 214 int bodysize; 215 void *data_start, *orig_start; 216 int data_len; 217 int blocksize; 218 219 dprintk("RPC: gss_unwrap_kerberos\n"); 220 221 ptr = (u8 *)buf->head[0].iov_base + offset; 222 if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr, 223 buf->len - offset)) 224 return GSS_S_DEFECTIVE_TOKEN; 225 226 if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) || 227 (*ptr++ != (KG_TOK_WRAP_MSG &0xff)) ) 228 return GSS_S_DEFECTIVE_TOKEN; 229 230 /* XXX sanity-check bodysize?? */ 231 232 /* get the sign and seal algorithms */ 233 234 signalg = ptr[0] + (ptr[1] << 8); 235 sealalg = ptr[2] + (ptr[3] << 8); 236 237 /* Sanity checks */ 238 239 if ((ptr[4] != 0xff) || (ptr[5] != 0xff)) 240 return GSS_S_DEFECTIVE_TOKEN; 241 242 if (sealalg != SEAL_ALG_DES) 243 return GSS_S_DEFECTIVE_TOKEN; 244 if (signalg != SGN_ALG_DES_MAC_MD5) 245 return GSS_S_DEFECTIVE_TOKEN; 246 247 if (gss_decrypt_xdr_buf(kctx->enc, buf, 248 ptr + 22 - (unsigned char *)buf->head[0].iov_base)) 249 return GSS_S_DEFECTIVE_TOKEN; 250 251 if (make_checksum("md5", ptr - 2, 8, buf, 252 ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum)) 253 return GSS_S_FAILURE; 254 255 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, 256 md5cksum.data, md5cksum.len)) 257 return GSS_S_FAILURE; 258 259 if (memcmp(md5cksum.data + 8, ptr + 14, 8)) 260 return GSS_S_BAD_SIG; 261 262 /* it got through unscathed. Make sure the context is unexpired */ 263 264 now = get_seconds(); 265 266 if (now > kctx->endtime) 267 return GSS_S_CONTEXT_EXPIRED; 268 269 /* do sequencing checks */ 270 271 if (krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction, 272 &seqnum)) 273 return GSS_S_BAD_SIG; 274 275 if ((kctx->initiate && direction != 0xff) || 276 (!kctx->initiate && direction != 0)) 277 return GSS_S_BAD_SIG; 278 279 /* Copy the data back to the right position. XXX: Would probably be 280 * better to copy and encrypt at the same time. */ 281 282 blocksize = crypto_blkcipher_blocksize(kctx->enc); 283 data_start = ptr + 22 + blocksize; 284 orig_start = buf->head[0].iov_base + offset; 285 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start; 286 memmove(orig_start, data_start, data_len); 287 buf->head[0].iov_len -= (data_start - orig_start); 288 buf->len -= (data_start - orig_start); 289 290 if (gss_krb5_remove_padding(buf, blocksize)) 291 return GSS_S_DEFECTIVE_TOKEN; 292 293 return GSS_S_COMPLETE; 294} 295