2 * linux/net/sunrpc/gss_krb5_crypto.c
4 * Copyright (c) 2000 The Regents of the University of Michigan.
7 * Andy Adamson <andros@umich.edu>
8 * Bruce Fields <bfields@umich.edu>
12 * Copyright (C) 1998 by the FundsXpress, INC.
14 * All rights reserved.
16 * Export of this software from the United States of America may require
17 * a specific license from the United States Government. It is the
18 * responsibility of any person or organization contemplating export to
19 * obtain such a license before exporting.
21 * WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
22 * distribute this software and its documentation for any purpose and
23 * without fee is hereby granted, provided that the above copyright
24 * notice appear in all copies and that both that copyright notice and
25 * this permission notice appear in supporting documentation, and that
26 * the name of FundsXpress. not be used in advertising or publicity pertaining
27 * to distribution of the software without specific, written prior
28 * permission. FundsXpress makes no representations about the suitability of
29 * this software for any purpose. It is provided "as is" without express
30 * or implied warranty.
32 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
33 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
34 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
37 #include <linux/err.h>
38 #include <linux/types.h>
40 #include <linux/slab.h>
41 #include <linux/scatterlist.h>
42 #include <linux/crypto.h>
43 #include <linux/highmem.h>
44 #include <linux/pagemap.h>
45 #include <linux/sunrpc/gss_krb5.h>
48 # define RPCDBG_FACILITY RPCDBG_AUTH
53 struct crypto_blkcipher *tfm,
60 struct scatterlist sg[1];
61 u8 local_iv[16] = {0};
62 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
64 if (length % crypto_blkcipher_blocksize(tfm) != 0)
67 if (crypto_blkcipher_ivsize(tfm) > 16) {
68 dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n",
69 crypto_blkcipher_ivsize(tfm));
74 memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));
76 memcpy(out, in, length);
77 sg_set_buf(sg, out, length);
79 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
81 dprintk("RPC: krb5_encrypt returns %d\n",ret);
85 EXPORT_SYMBOL(krb5_encrypt);
89 struct crypto_blkcipher *tfm,
96 struct scatterlist sg[1];
97 u8 local_iv[16] = {0};
98 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };
100 if (length % crypto_blkcipher_blocksize(tfm) != 0)
103 if (crypto_blkcipher_ivsize(tfm) > 16) {
104 dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n",
105 crypto_blkcipher_ivsize(tfm));
109 memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));
111 memcpy(out, in, length);
112 sg_set_buf(sg, out, length);
114 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
116 dprintk("RPC: gss_k5decrypt returns %d\n",ret);
120 EXPORT_SYMBOL(krb5_decrypt);
123 process_xdr_buf(struct xdr_buf *buf, int offset, int len,
124 int (*actor)(struct scatterlist *, void *), void *data)
126 int i, page_len, thislen, page_offset, ret = 0;
127 struct scatterlist sg[1];
129 if (offset >= buf->head[0].iov_len) {
130 offset -= buf->head[0].iov_len;
132 thislen = buf->head[0].iov_len - offset;
135 sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
136 ret = actor(sg, data);
145 if (offset >= buf->page_len) {
146 offset -= buf->page_len;
148 page_len = buf->page_len - offset;
152 page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
153 i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
154 thislen = PAGE_CACHE_SIZE - page_offset;
156 if (thislen > page_len)
158 sg->page = buf->pages[i];
159 sg->offset = page_offset;
160 sg->length = thislen;
161 ret = actor(sg, data);
167 thislen = PAGE_CACHE_SIZE;
168 } while (page_len != 0);
174 if (offset < buf->tail[0].iov_len) {
175 thislen = buf->tail[0].iov_len - offset;
178 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
179 ret = actor(sg, data);
189 checksummer(struct scatterlist *sg, void *data)
191 struct hash_desc *desc = data;
193 return crypto_hash_update(desc, sg, sg->length);
196 /* checksum the plaintext data and hdrlen bytes of the token header */
198 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
199 int body_offset, struct xdr_netobj *cksum)
202 struct hash_desc desc; /* XXX add to ctx? */
203 struct scatterlist sg[1];
207 case CKSUMTYPE_RSA_MD5:
211 dprintk("RPC: krb5_make_checksum:"
212 " unsupported checksum %d", cksumtype);
213 return GSS_S_FAILURE;
215 desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
216 if (IS_ERR(desc.tfm))
217 return GSS_S_FAILURE;
218 cksum->len = crypto_hash_digestsize(desc.tfm);
219 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
221 err = crypto_hash_init(&desc);
224 sg_set_buf(sg, header, hdrlen);
225 err = crypto_hash_update(&desc, sg, hdrlen);
228 err = process_xdr_buf(body, body_offset, body->len - body_offset,
232 err = crypto_hash_final(&desc, cksum->data);
235 crypto_free_hash(desc.tfm);
236 return err ? GSS_S_FAILURE : 0;
239 EXPORT_SYMBOL(make_checksum);
241 struct encryptor_desc {
242 u8 iv[8]; /* XXX hard-coded blocksize */
243 struct blkcipher_desc desc;
245 struct xdr_buf *outbuf;
247 struct scatterlist infrags[4];
248 struct scatterlist outfrags[4];
254 encryptor(struct scatterlist *sg, void *data)
256 struct encryptor_desc *desc = data;
257 struct xdr_buf *outbuf = desc->outbuf;
258 struct page *in_page;
259 int thislen = desc->fraglen + sg->length;
263 /* Worst case is 4 fragments: head, end of page 1, start
264 * of page 2, tail. Anything more is a bug. */
265 BUG_ON(desc->fragno > 3);
266 desc->infrags[desc->fragno] = *sg;
267 desc->outfrags[desc->fragno] = *sg;
269 page_pos = desc->pos - outbuf->head[0].iov_len;
270 if (page_pos >= 0 && page_pos < outbuf->page_len) {
271 /* pages are not in place: */
272 int i = (page_pos + outbuf->page_base) >> PAGE_CACHE_SHIFT;
273 in_page = desc->pages[i];
277 desc->infrags[desc->fragno].page = in_page;
279 desc->fraglen += sg->length;
280 desc->pos += sg->length;
282 fraglen = thislen & 7; /* XXX hardcoded blocksize */
288 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags,
289 desc->infrags, thislen);
293 desc->outfrags[0].page = sg->page;
294 desc->outfrags[0].offset = sg->offset + sg->length - fraglen;
295 desc->outfrags[0].length = fraglen;
296 desc->infrags[0] = desc->outfrags[0];
297 desc->infrags[0].page = in_page;
299 desc->fraglen = fraglen;
308 gss_encrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
309 int offset, struct page **pages)
312 struct encryptor_desc desc;
314 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
316 memset(desc.iv, 0, sizeof(desc.iv));
318 desc.desc.info = desc.iv;
326 ret = process_xdr_buf(buf, offset, buf->len - offset, encryptor, &desc);
330 EXPORT_SYMBOL(gss_encrypt_xdr_buf);
332 struct decryptor_desc {
333 u8 iv[8]; /* XXX hard-coded blocksize */
334 struct blkcipher_desc desc;
335 struct scatterlist frags[4];
341 decryptor(struct scatterlist *sg, void *data)
343 struct decryptor_desc *desc = data;
344 int thislen = desc->fraglen + sg->length;
347 /* Worst case is 4 fragments: head, end of page 1, start
348 * of page 2, tail. Anything more is a bug. */
349 BUG_ON(desc->fragno > 3);
350 desc->frags[desc->fragno] = *sg;
352 desc->fraglen += sg->length;
354 fraglen = thislen & 7; /* XXX hardcoded blocksize */
360 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags,
361 desc->frags, thislen);
365 desc->frags[0].page = sg->page;
366 desc->frags[0].offset = sg->offset + sg->length - fraglen;
367 desc->frags[0].length = fraglen;
369 desc->fraglen = fraglen;
378 gss_decrypt_xdr_buf(struct crypto_blkcipher *tfm, struct xdr_buf *buf,
381 struct decryptor_desc desc;
384 BUG_ON((buf->len - offset) % crypto_blkcipher_blocksize(tfm) != 0);
386 memset(desc.iv, 0, sizeof(desc.iv));
388 desc.desc.info = desc.iv;
392 return process_xdr_buf(buf, offset, buf->len - offset, decryptor, &desc);
395 EXPORT_SYMBOL(gss_decrypt_xdr_buf);