1 #include <linux/types.h>
2 #include <linux/slab.h>
3 #include <linux/jiffies.h>
4 #include <linux/sunrpc/gss_krb5.h>
5 #include <linux/random.h>
6 #include <linux/pagemap.h>
7 #include <linux/crypto.h>
10 # define RPCDBG_FACILITY RPCDBG_AUTH
14 gss_krb5_padding(int blocksize, int length)
16 /* Most of the code is block-size independent but currently we
18 BUG_ON(blocksize != 8);
19 return 8 - (length & 7);
23 gss_krb5_add_padding(struct xdr_buf *buf, int offset, int blocksize)
25 int padding = gss_krb5_padding(blocksize, buf->len - offset);
29 if (buf->page_len || buf->tail[0].iov_len)
33 p = iov->iov_base + iov->iov_len;
34 iov->iov_len += padding;
36 memset(p, padding, padding);
40 gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
44 size_t len = buf->len;
46 if (len <= buf->head[0].iov_len) {
47 pad = *(u8 *)(buf->head[0].iov_base + len - 1);
48 if (pad > buf->head[0].iov_len)
50 buf->head[0].iov_len -= pad;
53 len -= buf->head[0].iov_len;
54 if (len <= buf->page_len) {
55 unsigned int last = (buf->page_base + len - 1)
57 unsigned int offset = (buf->page_base + len - 1)
58 & (PAGE_CACHE_SIZE - 1);
59 ptr = kmap_atomic(buf->pages[last], KM_USER0);
60 pad = *(ptr + offset);
61 kunmap_atomic(ptr, KM_USER0);
65 BUG_ON(len > buf->tail[0].iov_len);
66 pad = *(u8 *)(buf->tail[0].iov_base + len - 1);
68 /* XXX: NOTE: we do not adjust the page lengths--they represent
69 * a range of data in the real filesystem page cache, and we need
70 * to know that range so the xdr code can properly place read data.
71 * However adjusting the head length, as we do above, is harmless.
72 * In the case of a request that fits into a single page, the server
73 * also uses length and head length together to determine the original
74 * start of the request to copy the request for deferal; so it's
75 * easier on the server if we adjust head and tail length in tandem.
76 * It's not really a problem that we don't fool with the page and
77 * tail lengths, though--at worst badly formed xdr might lead the
78 * server to attempt to parse the padding.
79 * XXX: Document all these weird requirements for gss mechanism
80 * wrap/unwrap functions. */
91 make_confounder(char *p, int blocksize)
96 /* rfc1964 claims this should be "random". But all that's really
97 * necessary is that it be unique. And not even that is necessary in
98 * our case since our "gssapi" implementation exists only to support
99 * rpcsec_gss, so we know that the only buffers we will ever encrypt
100 * already begin with a unique sequence number. Just to hedge my bets
101 * I'll make a half-hearted attempt at something unique, but ensuring
102 * uniqueness would mean worrying about atomicity and rollover, and I
103 * don't care enough. */
105 BUG_ON(blocksize != 8);
109 /* Assumptions: the head and tail of inbuf are ours to play with.
110 * The pages, however, may be real pages in the page cache and we replace
111 * them with scratch pages from **pages before writing to them. */
112 /* XXX: obviously the above should be documentation of wrap interface,
113 * and shouldn't be in this kerberos-specific file. */
115 /* XXX factor out common code with seal/unseal. */
118 gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
119 struct xdr_buf *buf, struct page **pages)
121 struct krb5_ctx *kctx = ctx->internal_ctx_id;
123 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
124 int blocksize = 0, plainlen;
125 unsigned char *ptr, *krb5_hdr, *msg_start;
128 struct page **tmp_pages;
131 dprintk("RPC: gss_wrap_kerberos\n");
135 blocksize = crypto_blkcipher_blocksize(kctx->enc);
136 gss_krb5_add_padding(buf, offset, blocksize);
137 BUG_ON((buf->len - offset) % blocksize);
138 plainlen = blocksize + buf->len - offset;
140 headlen = g_token_size(&kctx->mech_used, 22 + plainlen) -
143 ptr = buf->head[0].iov_base + offset;
144 /* shift data to make room for header. */
145 /* XXX Would be cleverer to encrypt while copying. */
146 /* XXX bounds checking, slack, etc. */
147 memmove(ptr + headlen, ptr, buf->head[0].iov_len - offset);
148 buf->head[0].iov_len += headlen;
150 BUG_ON((buf->len - offset - headlen) % blocksize);
152 g_make_token_header(&kctx->mech_used, 22 + plainlen, &ptr);
155 *ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff);
156 *ptr++ = (unsigned char) (KG_TOK_WRAP_MSG&0xff);
158 /* ptr now at byte 2 of header described in rfc 1964, section 1.2.1: */
160 msg_start = krb5_hdr + 24;
162 *(__be16 *)(krb5_hdr + 2) = htons(SGN_ALG_DES_MAC_MD5);
163 memset(krb5_hdr + 4, 0xff, 4);
164 *(__be16 *)(krb5_hdr + 4) = htons(SEAL_ALG_DES);
166 make_confounder(msg_start, blocksize);
169 tmp_pages = buf->pages;
171 if (make_checksum("md5", krb5_hdr, 8, buf,
172 offset + headlen - blocksize, &md5cksum))
173 return GSS_S_FAILURE;
174 buf->pages = tmp_pages;
176 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
177 md5cksum.data, md5cksum.len))
178 return GSS_S_FAILURE;
179 memcpy(krb5_hdr + 16,
180 md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
183 spin_lock(&krb5_seq_lock);
184 seq_send = kctx->seq_send++;
185 spin_unlock(&krb5_seq_lock);
187 /* XXX would probably be more efficient to compute checksum
188 * and encrypt at the same time: */
189 if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
190 seq_send, krb5_hdr + 16, krb5_hdr + 8)))
191 return GSS_S_FAILURE;
193 if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
195 return GSS_S_FAILURE;
197 return (kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
201 gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
203 struct krb5_ctx *kctx = ctx->internal_ctx_id;
207 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
213 void *data_start, *orig_start;
217 dprintk("RPC: gss_unwrap_kerberos\n");
219 ptr = (u8 *)buf->head[0].iov_base + offset;
220 if (g_verify_token_header(&kctx->mech_used, &bodysize, &ptr,
222 return GSS_S_DEFECTIVE_TOKEN;
224 if ((*ptr++ != ((KG_TOK_WRAP_MSG>>8)&0xff)) ||
225 (*ptr++ != (KG_TOK_WRAP_MSG &0xff)) )
226 return GSS_S_DEFECTIVE_TOKEN;
228 /* XXX sanity-check bodysize?? */
230 /* get the sign and seal algorithms */
232 signalg = ptr[0] + (ptr[1] << 8);
233 if (signalg != SGN_ALG_DES_MAC_MD5)
234 return GSS_S_DEFECTIVE_TOKEN;
236 sealalg = ptr[2] + (ptr[3] << 8);
237 if (sealalg != SEAL_ALG_DES)
238 return GSS_S_DEFECTIVE_TOKEN;
240 if ((ptr[4] != 0xff) || (ptr[5] != 0xff))
241 return GSS_S_DEFECTIVE_TOKEN;
243 if (gss_decrypt_xdr_buf(kctx->enc, buf,
244 ptr + 22 - (unsigned char *)buf->head[0].iov_base))
245 return GSS_S_DEFECTIVE_TOKEN;
247 if (make_checksum("md5", ptr - 2, 8, buf,
248 ptr + 22 - (unsigned char *)buf->head[0].iov_base, &md5cksum))
249 return GSS_S_FAILURE;
251 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
252 md5cksum.data, md5cksum.len))
253 return GSS_S_FAILURE;
255 if (memcmp(md5cksum.data + 8, ptr + 14, 8))
256 return GSS_S_BAD_SIG;
258 /* it got through unscathed. Make sure the context is unexpired */
262 if (now > kctx->endtime)
263 return GSS_S_CONTEXT_EXPIRED;
265 /* do sequencing checks */
267 if (krb5_get_seq_num(kctx->seq, ptr + 14, ptr + 6, &direction,
269 return GSS_S_BAD_SIG;
271 if ((kctx->initiate && direction != 0xff) ||
272 (!kctx->initiate && direction != 0))
273 return GSS_S_BAD_SIG;
275 /* Copy the data back to the right position. XXX: Would probably be
276 * better to copy and encrypt at the same time. */
278 blocksize = crypto_blkcipher_blocksize(kctx->enc);
279 data_start = ptr + 22 + blocksize;
280 orig_start = buf->head[0].iov_base + offset;
281 data_len = (buf->head[0].iov_base + buf->head[0].iov_len) - data_start;
282 memmove(orig_start, data_start, data_len);
283 buf->head[0].iov_len -= (data_start - orig_start);
284 buf->len -= (data_start - orig_start);
286 if (gss_krb5_remove_padding(buf, blocksize))
287 return GSS_S_DEFECTIVE_TOKEN;
289 return GSS_S_COMPLETE;