1 /* $Id: cryptocop.c,v 1.13 2005/04/21 17:27:55 henriken Exp $
3 * Stream co-processor driver for the ETRAX FS
5 * Copyright (C) 2003-2005 Axis Communications AB
8 #include <linux/init.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/slab.h>
12 #include <linux/string.h>
15 #include <linux/spinlock.h>
16 #include <linux/stddef.h>
18 #include <asm/uaccess.h>
20 #include <asm/atomic.h>
22 #include <linux/list.h>
23 #include <linux/interrupt.h>
25 #include <asm/signal.h>
28 #include <asm/arch/dma.h>
29 #include <asm/arch/hwregs/dma.h>
30 #include <asm/arch/hwregs/reg_map.h>
31 #include <asm/arch/hwregs/reg_rdwr.h>
32 #include <asm/arch/hwregs/intr_vect_defs.h>
34 #include <asm/arch/hwregs/strcop.h>
35 #include <asm/arch/hwregs/strcop_defs.h>
36 #include <asm/arch/cryptocop.h>
40 #define DESCR_ALLOC_PAD (31)
42 struct cryptocop_dma_desc {
43 char *free_buf; /* If non-null will be kfreed in free_cdesc() */
44 dma_descr_data *dma_descr;
46 unsigned char dma_descr_buf[sizeof(dma_descr_data) + DESCR_ALLOC_PAD];
48 unsigned int from_pool:1; /* If 1 'allocated' from the descriptor pool. */
49 struct cryptocop_dma_desc *next;
53 struct cryptocop_int_operation{
55 cryptocop_session_id sid;
57 dma_descr_context ctx_out;
58 dma_descr_context ctx_in;
60 /* DMA descriptors allocated by driver. */
61 struct cryptocop_dma_desc *cdesc_out;
62 struct cryptocop_dma_desc *cdesc_in;
64 /* Strcop config to use. */
65 cryptocop_3des_mode tdes_mode;
66 cryptocop_csum_type csum_mode;
68 /* DMA descrs provided by consumer. */
69 dma_descr_data *ddesc_out;
70 dma_descr_data *ddesc_in;
74 struct cryptocop_tfrm_ctx {
75 cryptocop_tfrm_id tid;
76 unsigned int blocklength;
78 unsigned int start_ix;
80 struct cryptocop_tfrm_cfg *tcfg;
81 struct cryptocop_transform_ctx *tctx;
83 unsigned char previous_src;
84 unsigned char current_src;
86 /* Values to use in metadata out. */
87 unsigned char hash_conf;
88 unsigned char hash_mode;
89 unsigned char ciph_conf;
90 unsigned char cbcmode;
91 unsigned char decrypt;
93 unsigned int requires_padding:1;
94 unsigned int strict_block_length:1;
95 unsigned int active:1;
100 /* Pad (input) descriptors to put in the DMA out list when the transform
101 * output is put on the DMA in list. */
102 struct cryptocop_dma_desc *pad_descs;
104 struct cryptocop_tfrm_ctx *prev_src;
105 struct cryptocop_tfrm_ctx *curr_src;
108 unsigned char unit_no;
112 struct cryptocop_private{
113 cryptocop_session_id sid;
114 struct cryptocop_private *next;
119 struct cryptocop_transform_ctx{
120 struct cryptocop_transform_init init;
121 unsigned char dec_key[CRYPTOCOP_MAX_KEY_LENGTH];
122 unsigned int dec_key_set:1;
124 struct cryptocop_transform_ctx *next;
128 struct cryptocop_session{
129 cryptocop_session_id sid;
131 struct cryptocop_transform_ctx *tfrm_ctx;
133 struct cryptocop_session *next;
136 /* Priority levels for jobs sent to the cryptocop. Checksum operations from
137 kernel have highest priority since TCPIP stack processing must not
140 cryptocop_prio_kernel_csum = 0,
141 cryptocop_prio_kernel = 1,
142 cryptocop_prio_user = 2,
143 cryptocop_prio_no_prios = 3
144 } cryptocop_queue_priority;
146 struct cryptocop_prio_queue{
147 struct list_head jobs;
148 cryptocop_queue_priority prio;
151 struct cryptocop_prio_job{
152 struct list_head node;
153 cryptocop_queue_priority prio;
155 struct cryptocop_operation *oper;
156 struct cryptocop_int_operation *iop;
159 struct ioctl_job_cb_ctx {
160 unsigned int processed:1;
164 static struct cryptocop_session *cryptocop_sessions = NULL;
165 spinlock_t cryptocop_sessions_lock;
167 /* Next Session ID to assign. */
168 static cryptocop_session_id next_sid = 1;
170 /* Pad for checksum. */
171 static const char csum_zero_pad[1] = {0x00};
173 /* Trash buffer for mem2mem operations. */
174 #define MEM2MEM_DISCARD_BUF_LENGTH (512)
175 static unsigned char mem2mem_discard_buf[MEM2MEM_DISCARD_BUF_LENGTH];
177 /* Descriptor pool. */
178 /* FIXME Tweak this value. */
179 #define CRYPTOCOP_DESCRIPTOR_POOL_SIZE (100)
180 static struct cryptocop_dma_desc descr_pool[CRYPTOCOP_DESCRIPTOR_POOL_SIZE];
181 static struct cryptocop_dma_desc *descr_pool_free_list;
182 static int descr_pool_no_free;
183 static spinlock_t descr_pool_lock;
185 /* Lock to stop cryptocop to start processing of a new operation. The holder
186 of this lock MUST call cryptocop_start_job() after it is unlocked. */
187 spinlock_t cryptocop_process_lock;
189 static struct cryptocop_prio_queue cryptocop_job_queues[cryptocop_prio_no_prios];
190 static spinlock_t cryptocop_job_queue_lock;
191 static struct cryptocop_prio_job *cryptocop_running_job = NULL;
192 static spinlock_t running_job_lock;
194 /* The interrupt handler appends completed jobs to this list. The scehduled
195 * tasklet removes them upon sending the response to the crypto consumer. */
196 static struct list_head cryptocop_completed_jobs;
197 static spinlock_t cryptocop_completed_jobs_lock;
199 DECLARE_WAIT_QUEUE_HEAD(cryptocop_ioc_process_wq);
202 /** Local functions. **/
204 static int cryptocop_open(struct inode *, struct file *);
206 static int cryptocop_release(struct inode *, struct file *);
208 static int cryptocop_ioctl(struct inode *inode, struct file *file,
209 unsigned int cmd, unsigned long arg);
211 static void cryptocop_start_job(void);
213 static int cryptocop_job_queue_insert(cryptocop_queue_priority prio, struct cryptocop_operation *operation);
214 static int cryptocop_job_setup(struct cryptocop_prio_job **pj, struct cryptocop_operation *operation);
216 static int cryptocop_job_queue_init(void);
217 static void cryptocop_job_queue_close(void);
219 static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length);
221 static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length);
223 static int transform_ok(struct cryptocop_transform_init *tinit);
225 static struct cryptocop_session *get_session(cryptocop_session_id sid);
227 static struct cryptocop_transform_ctx *get_transform_ctx(struct cryptocop_session *sess, cryptocop_tfrm_id tid);
229 static void delete_internal_operation(struct cryptocop_int_operation *iop);
231 static void get_aes_decrypt_key(unsigned char *dec_key, const unsigned char *key, unsigned int keylength);
233 static int init_stream_coprocessor(void);
235 static void __exit exit_stream_coprocessor(void);
240 #define DEBUG_API(s) s
241 static void print_cryptocop_operation(struct cryptocop_operation *cop);
242 static void print_dma_descriptors(struct cryptocop_int_operation *iop);
243 static void print_strcop_crypto_op(struct strcop_crypto_op *cop);
244 static void print_lock_status(void);
245 static void print_user_dma_lists(struct cryptocop_dma_list_operation *dma_op);
246 #define assert(s) do{if (!(s)) panic(#s);} while(0);
254 /* Transform constants. */
255 #define DES_BLOCK_LENGTH (8)
256 #define AES_BLOCK_LENGTH (16)
257 #define MD5_BLOCK_LENGTH (64)
258 #define SHA1_BLOCK_LENGTH (64)
259 #define CSUM_BLOCK_LENGTH (2)
260 #define MD5_STATE_LENGTH (16)
261 #define SHA1_STATE_LENGTH (20)
263 /* The device number. */
264 #define CRYPTOCOP_MAJOR (254)
265 #define CRYPTOCOP_MINOR (0)
269 struct file_operations cryptocop_fops = {
271 open: cryptocop_open,
272 release: cryptocop_release,
273 ioctl: cryptocop_ioctl
277 static void free_cdesc(struct cryptocop_dma_desc *cdesc)
279 DEBUG(printk("free_cdesc: cdesc 0x%p, from_pool=%d\n", cdesc, cdesc->from_pool));
280 kfree(cdesc->free_buf);
282 if (cdesc->from_pool) {
283 unsigned long int flags;
284 spin_lock_irqsave(&descr_pool_lock, flags);
285 cdesc->next = descr_pool_free_list;
286 descr_pool_free_list = cdesc;
287 ++descr_pool_no_free;
288 spin_unlock_irqrestore(&descr_pool_lock, flags);
295 static struct cryptocop_dma_desc *alloc_cdesc(int alloc_flag)
297 int use_pool = (alloc_flag & GFP_ATOMIC) ? 1 : 0;
298 struct cryptocop_dma_desc *cdesc;
301 unsigned long int flags;
302 spin_lock_irqsave(&descr_pool_lock, flags);
303 if (!descr_pool_free_list) {
304 spin_unlock_irqrestore(&descr_pool_lock, flags);
305 DEBUG_API(printk("alloc_cdesc: pool is empty\n"));
308 cdesc = descr_pool_free_list;
309 descr_pool_free_list = descr_pool_free_list->next;
310 --descr_pool_no_free;
311 spin_unlock_irqrestore(&descr_pool_lock, flags);
312 cdesc->from_pool = 1;
314 cdesc = kmalloc(sizeof(struct cryptocop_dma_desc), alloc_flag);
316 DEBUG_API(printk("alloc_cdesc: kmalloc\n"));
319 cdesc->from_pool = 0;
321 cdesc->dma_descr = (dma_descr_data*)(((unsigned long int)cdesc + offsetof(struct cryptocop_dma_desc, dma_descr_buf) + DESCR_ALLOC_PAD) & ~0x0000001F);
325 cdesc->free_buf = NULL;
326 cdesc->dma_descr->out_eop = 0;
327 cdesc->dma_descr->in_eop = 0;
328 cdesc->dma_descr->intr = 0;
329 cdesc->dma_descr->eol = 0;
330 cdesc->dma_descr->wait = 0;
331 cdesc->dma_descr->buf = NULL;
332 cdesc->dma_descr->after = NULL;
334 DEBUG_API(printk("alloc_cdesc: return 0x%p, cdesc->dma_descr=0x%p, from_pool=%d\n", cdesc, cdesc->dma_descr, cdesc->from_pool));
339 static void setup_descr_chain(struct cryptocop_dma_desc *cd)
341 DEBUG(printk("setup_descr_chain: entering\n"));
344 cd->dma_descr->next = (dma_descr_data*)virt_to_phys(cd->next->dma_descr);
346 cd->dma_descr->next = NULL;
350 DEBUG(printk("setup_descr_chain: exit\n"));
354 /* Create a pad descriptor for the transform.
355 * Return -1 for error, 0 if pad created. */
356 static int create_pad_descriptor(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **pad_desc, int alloc_flag)
358 struct cryptocop_dma_desc *cdesc = NULL;
360 struct strcop_meta_out mo = {
368 DEBUG(printk("create_pad_descriptor: start.\n"));
369 /* Setup pad descriptor. */
371 DEBUG(printk("create_pad_descriptor: setting up padding.\n"));
372 cdesc = alloc_cdesc(alloc_flag);
374 DEBUG_API(printk("create_pad_descriptor: alloc pad desc\n"));
377 switch (tc->unit_no) {
379 error = create_md5_pad(alloc_flag, tc->consumed, &pad, &plen);
381 DEBUG_API(printk("create_pad_descriptor: create_md5_pad_failed\n"));
384 cdesc->free_buf = pad;
385 mo.hashsel = src_dma;
386 mo.hashconf = tc->hash_conf;
387 mo.hashmode = tc->hash_mode;
390 error = create_sha1_pad(alloc_flag, tc->consumed, &pad, &plen);
392 DEBUG_API(printk("create_pad_descriptor: create_sha1_pad_failed\n"));
395 cdesc->free_buf = pad;
396 mo.hashsel = src_dma;
397 mo.hashconf = tc->hash_conf;
398 mo.hashmode = tc->hash_mode;
401 if (tc->consumed % tc->blocklength){
402 pad = (char*)csum_zero_pad;
405 pad = (char*)cdesc; /* Use any pointer. */
408 mo.csumsel = src_dma;
411 cdesc->dma_descr->wait = 1;
412 cdesc->dma_descr->out_eop = 1; /* Since this is a pad output is pushed. EOP is ok here since the padded unit is the only one active. */
413 cdesc->dma_descr->buf = (char*)virt_to_phys((char*)pad);
414 cdesc->dma_descr->after = cdesc->dma_descr->buf + plen;
416 cdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo);
422 if (cdesc) free_cdesc(cdesc);
427 static int setup_key_dl_desc(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **kd, int alloc_flag)
429 struct cryptocop_dma_desc *key_desc = alloc_cdesc(alloc_flag);
430 struct strcop_meta_out mo = {0};
432 DEBUG(printk("setup_key_dl_desc\n"));
435 DEBUG_API(printk("setup_key_dl_desc: failed descriptor allocation.\n"));
440 if ((tc->tctx->init.alg == cryptocop_alg_aes) && (tc->tcfg->flags & CRYPTOCOP_DECRYPT)) {
441 /* Precook the AES decrypt key. */
442 if (!tc->tctx->dec_key_set){
443 get_aes_decrypt_key(tc->tctx->dec_key, tc->tctx->init.key, tc->tctx->init.keylen);
444 tc->tctx->dec_key_set = 1;
446 key_desc->dma_descr->buf = (char*)virt_to_phys(tc->tctx->dec_key);
447 key_desc->dma_descr->after = key_desc->dma_descr->buf + tc->tctx->init.keylen/8;
449 key_desc->dma_descr->buf = (char*)virt_to_phys(tc->tctx->init.key);
450 key_desc->dma_descr->after = key_desc->dma_descr->buf + tc->tctx->init.keylen/8;
452 /* Setup metadata. */
454 switch (tc->tctx->init.keylen) {
474 mo.ciphsel = mo.hashsel = mo.csumsel = src_none;
475 key_desc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo);
477 key_desc->dma_descr->out_eop = 1;
478 key_desc->dma_descr->wait = 1;
479 key_desc->dma_descr->intr = 0;
485 static int setup_cipher_iv_desc(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **id, int alloc_flag)
487 struct cryptocop_dma_desc *iv_desc = alloc_cdesc(alloc_flag);
488 struct strcop_meta_out mo = {0};
490 DEBUG(printk("setup_cipher_iv_desc\n"));
493 DEBUG_API(printk("setup_cipher_iv_desc: failed CBC IV descriptor allocation.\n"));
497 iv_desc->dma_descr->buf = (char*)virt_to_phys(tc->tcfg->iv);
498 iv_desc->dma_descr->after = iv_desc->dma_descr->buf + tc->blocklength;
500 /* Setup metadata. */
501 mo.hashsel = mo.csumsel = src_none;
502 mo.ciphsel = src_dma;
503 mo.ciphconf = tc->ciph_conf;
504 mo.cbcmode = tc->cbcmode;
506 iv_desc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo);
508 iv_desc->dma_descr->out_eop = 0;
509 iv_desc->dma_descr->wait = 1;
510 iv_desc->dma_descr->intr = 0;
516 /* Map the ouput length of the transform to operation output starting on the inject index. */
517 static int create_input_descriptors(struct cryptocop_operation *operation, struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **id, int alloc_flag)
520 struct cryptocop_dma_desc head = {0};
521 struct cryptocop_dma_desc *outdesc = &head;
522 size_t iov_offset = 0;
525 struct strcop_meta_in mi = {0};
527 size_t out_length = tc->produced;
531 assert(out_length != 0);
532 if (((tc->produced + tc->tcfg->inject_ix) > operation->tfrm_op.outlen) || (tc->produced && (operation->tfrm_op.outlen == 0))) {
533 DEBUG_API(printk("create_input_descriptors: operation outdata too small\n"));
536 /* Traverse the out iovec until the result inject index is reached. */
537 while ((outiov_ix < operation->tfrm_op.outcount) && ((out_ix + operation->tfrm_op.outdata[outiov_ix].iov_len) <= tc->tcfg->inject_ix)){
538 out_ix += operation->tfrm_op.outdata[outiov_ix].iov_len;
541 if (outiov_ix >= operation->tfrm_op.outcount){
542 DEBUG_API(printk("create_input_descriptors: operation outdata too small\n"));
545 iov_offset = tc->tcfg->inject_ix - out_ix;
546 mi.dmasel = tc->unit_no;
548 /* Setup the output descriptors. */
549 while ((out_length > 0) && (outiov_ix < operation->tfrm_op.outcount)) {
550 outdesc->next = alloc_cdesc(alloc_flag);
551 if (!outdesc->next) {
552 DEBUG_API(printk("create_input_descriptors: alloc_cdesc\n"));
556 outdesc = outdesc->next;
557 rem_length = operation->tfrm_op.outdata[outiov_ix].iov_len - iov_offset;
558 dlength = (out_length < rem_length) ? out_length : rem_length;
560 DEBUG(printk("create_input_descriptors:\n"
561 "outiov_ix=%d, rem_length=%d, dlength=%d\n"
562 "iov_offset=%d, outdata[outiov_ix].iov_len=%d\n"
563 "outcount=%d, outiov_ix=%d\n",
564 outiov_ix, rem_length, dlength, iov_offset, operation->tfrm_op.outdata[outiov_ix].iov_len, operation->tfrm_op.outcount, outiov_ix));
566 outdesc->dma_descr->buf = (char*)virt_to_phys(operation->tfrm_op.outdata[outiov_ix].iov_base + iov_offset);
567 outdesc->dma_descr->after = outdesc->dma_descr->buf + dlength;
568 outdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
570 out_length -= dlength;
571 iov_offset += dlength;
572 if (iov_offset >= operation->tfrm_op.outdata[outiov_ix].iov_len) {
578 DEBUG_API(printk("create_input_descriptors: not enough room for output, %d remained\n", out_length));
582 /* Set sync in last descriptor. */
584 outdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
591 outdesc = head.next->next;
592 free_cdesc(head.next);
599 static int create_output_descriptors(struct cryptocop_operation *operation, int *iniov_ix, int *iniov_offset, size_t desc_len, struct cryptocop_dma_desc **current_out_cdesc, struct strcop_meta_out *meta_out, int alloc_flag)
601 while (desc_len != 0) {
602 struct cryptocop_dma_desc *cdesc;
603 int rem_length = operation->tfrm_op.indata[*iniov_ix].iov_len - *iniov_offset;
604 int dlength = (desc_len < rem_length) ? desc_len : rem_length;
606 cdesc = alloc_cdesc(alloc_flag);
608 DEBUG_API(printk("create_output_descriptors: alloc_cdesc\n"));
611 (*current_out_cdesc)->next = cdesc;
612 (*current_out_cdesc) = cdesc;
614 cdesc->free_buf = NULL;
616 cdesc->dma_descr->buf = (char*)virt_to_phys(operation->tfrm_op.indata[*iniov_ix].iov_base + *iniov_offset);
617 cdesc->dma_descr->after = cdesc->dma_descr->buf + dlength;
620 *iniov_offset += dlength;
621 assert(desc_len >= 0);
622 if (*iniov_offset >= operation->tfrm_op.indata[*iniov_ix].iov_len) {
625 if (*iniov_ix > operation->tfrm_op.incount) {
626 DEBUG_API(printk("create_output_descriptors: not enough indata in operation."));
630 cdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, (*meta_out));
631 } /* while (desc_len != 0) */
632 /* Last DMA descriptor gets a 'wait' bit to signal expected change in metadata. */
633 (*current_out_cdesc)->dma_descr->wait = 1; /* This will set extraneous WAIT in some situations, e.g. when padding hashes and checksums. */
639 static int append_input_descriptors(struct cryptocop_operation *operation, struct cryptocop_dma_desc **current_in_cdesc, struct cryptocop_dma_desc **current_out_cdesc, struct cryptocop_tfrm_ctx *tc, int alloc_flag)
641 DEBUG(printk("append_input_descriptors, tc=0x%p, unit_no=%d\n", tc, tc->unit_no));
644 struct cryptocop_dma_desc *idescs = NULL;
645 DEBUG(printk("append_input_descriptors: pushing output, consumed %d produced %d bytes.\n", tc->consumed, tc->produced));
647 DEBUG(printk("append_input_descriptors: append pad descriptors to DMA out list.\n"));
648 while (tc->pad_descs) {
649 DEBUG(printk("append descriptor 0x%p\n", tc->pad_descs));
650 (*current_out_cdesc)->next = tc->pad_descs;
651 tc->pad_descs = tc->pad_descs->next;
652 (*current_out_cdesc) = (*current_out_cdesc)->next;
656 /* Setup and append output descriptors to DMA in list. */
657 if (tc->unit_no == src_dma){
658 /* mem2mem. Setup DMA in descriptors to discard all input prior to the requested mem2mem data. */
659 struct strcop_meta_in mi = {.sync = 0, .dmasel = src_dma};
660 unsigned int start_ix = tc->start_ix;
662 unsigned int desclen = start_ix < MEM2MEM_DISCARD_BUF_LENGTH ? start_ix : MEM2MEM_DISCARD_BUF_LENGTH;
663 (*current_in_cdesc)->next = alloc_cdesc(alloc_flag);
664 if (!(*current_in_cdesc)->next){
665 DEBUG_API(printk("append_input_descriptors: alloc_cdesc mem2mem discard failed\n"));
668 (*current_in_cdesc) = (*current_in_cdesc)->next;
669 (*current_in_cdesc)->dma_descr->buf = (char*)virt_to_phys(mem2mem_discard_buf);
670 (*current_in_cdesc)->dma_descr->after = (*current_in_cdesc)->dma_descr->buf + desclen;
671 (*current_in_cdesc)->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
675 (*current_in_cdesc)->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
678 failed = create_input_descriptors(operation, tc, &idescs, alloc_flag);
680 DEBUG_API(printk("append_input_descriptors: output descriptor setup failed\n"));
683 DEBUG(printk("append_input_descriptors: append output descriptors to DMA in list.\n"));
685 DEBUG(printk("append descriptor 0x%p\n", idescs));
686 (*current_in_cdesc)->next = idescs;
687 idescs = idescs->next;
688 (*current_in_cdesc) = (*current_in_cdesc)->next;
696 static int cryptocop_setup_dma_list(struct cryptocop_operation *operation, struct cryptocop_int_operation **int_op, int alloc_flag)
698 struct cryptocop_session *sess;
699 struct cryptocop_transform_ctx *tctx;
701 struct cryptocop_tfrm_ctx digest_ctx = {
702 .previous_src = src_none,
703 .current_src = src_none,
705 .requires_padding = 1,
706 .strict_block_length = 0,
720 struct cryptocop_tfrm_ctx cipher_ctx = {
721 .previous_src = src_none,
722 .current_src = src_none,
724 .requires_padding = 0,
725 .strict_block_length = 1,
739 struct cryptocop_tfrm_ctx csum_ctx = {
740 .previous_src = src_none,
741 .current_src = src_none,
744 .requires_padding = 1,
745 .strict_block_length = 0,
759 .unit_no = src_csum};
760 struct cryptocop_tfrm_cfg *tcfg = operation->tfrm_op.tfrm_cfg;
762 unsigned int indata_ix = 0;
764 /* iovec accounting. */
766 int iniov_offset = 0;
768 /* Operation descriptor cfg traversal pointer. */
769 struct cryptocop_desc *odsc;
772 /* List heads for allocated descriptors. */
773 struct cryptocop_dma_desc out_cdesc_head = {0};
774 struct cryptocop_dma_desc in_cdesc_head = {0};
776 struct cryptocop_dma_desc *current_out_cdesc = &out_cdesc_head;
777 struct cryptocop_dma_desc *current_in_cdesc = &in_cdesc_head;
779 struct cryptocop_tfrm_ctx *output_tc = NULL;
782 assert(operation != NULL);
783 assert(int_op != NULL);
785 DEBUG(printk("cryptocop_setup_dma_list: start\n"));
786 DEBUG(print_cryptocop_operation(operation));
788 sess = get_session(operation->sid);
790 DEBUG_API(printk("cryptocop_setup_dma_list: no session found for operation.\n"));
794 iop_alloc_ptr = kmalloc(DESCR_ALLOC_PAD + sizeof(struct cryptocop_int_operation), alloc_flag);
795 if (!iop_alloc_ptr) {
796 DEBUG_API(printk("cryptocop_setup_dma_list: kmalloc cryptocop_int_operation\n"));
800 (*int_op) = (struct cryptocop_int_operation*)(((unsigned long int)(iop_alloc_ptr + DESCR_ALLOC_PAD + offsetof(struct cryptocop_int_operation, ctx_out)) & ~0x0000001F) - offsetof(struct cryptocop_int_operation, ctx_out));
801 DEBUG(memset((*int_op), 0xff, sizeof(struct cryptocop_int_operation)));
802 (*int_op)->alloc_ptr = iop_alloc_ptr;
803 DEBUG(printk("cryptocop_setup_dma_list: *int_op=0x%p, alloc_ptr=0x%p\n", *int_op, (*int_op)->alloc_ptr));
805 (*int_op)->sid = operation->sid;
806 (*int_op)->cdesc_out = NULL;
807 (*int_op)->cdesc_in = NULL;
808 (*int_op)->tdes_mode = cryptocop_3des_ede;
809 (*int_op)->csum_mode = cryptocop_csum_le;
810 (*int_op)->ddesc_out = NULL;
811 (*int_op)->ddesc_in = NULL;
813 /* Scan operation->tfrm_op.tfrm_cfg for bad configuration and set up the local contexts. */
815 DEBUG_API(printk("cryptocop_setup_dma_list: no configured transforms in operation.\n"));
820 tctx = get_transform_ctx(sess, tcfg->tid);
822 DEBUG_API(printk("cryptocop_setup_dma_list: no transform id %d in session.\n", tcfg->tid));
826 if (tcfg->inject_ix > operation->tfrm_op.outlen){
827 DEBUG_API(printk("cryptocop_setup_dma_list: transform id %d inject_ix (%d) > operation->tfrm_op.outlen(%d)", tcfg->tid, tcfg->inject_ix, operation->tfrm_op.outlen));
831 switch (tctx->init.alg){
832 case cryptocop_alg_mem2mem:
833 if (cipher_ctx.tcfg != NULL){
834 DEBUG_API(printk("cryptocop_setup_dma_list: multiple ciphers in operation.\n"));
838 /* mem2mem is handled as a NULL cipher. */
839 cipher_ctx.cbcmode = 0;
840 cipher_ctx.decrypt = 0;
841 cipher_ctx.blocklength = 1;
842 cipher_ctx.ciph_conf = 0;
843 cipher_ctx.unit_no = src_dma;
844 cipher_ctx.tcfg = tcfg;
845 cipher_ctx.tctx = tctx;
847 case cryptocop_alg_des:
848 case cryptocop_alg_3des:
849 case cryptocop_alg_aes:
851 if (cipher_ctx.tcfg != NULL){
852 DEBUG_API(printk("cryptocop_setup_dma_list: multiple ciphers in operation.\n"));
856 cipher_ctx.tcfg = tcfg;
857 cipher_ctx.tctx = tctx;
858 if (cipher_ctx.tcfg->flags & CRYPTOCOP_DECRYPT){
859 cipher_ctx.decrypt = 1;
861 switch (tctx->init.cipher_mode) {
862 case cryptocop_cipher_mode_ecb:
863 cipher_ctx.cbcmode = 0;
865 case cryptocop_cipher_mode_cbc:
866 cipher_ctx.cbcmode = 1;
869 DEBUG_API(printk("cryptocop_setup_dma_list: cipher_ctx, bad cipher mode==%d\n", tctx->init.cipher_mode));
873 DEBUG(printk("cryptocop_setup_dma_list: cipher_ctx, set CBC mode==%d\n", cipher_ctx.cbcmode));
874 switch (tctx->init.alg){
875 case cryptocop_alg_des:
876 cipher_ctx.ciph_conf = 0;
877 cipher_ctx.unit_no = src_des;
878 cipher_ctx.blocklength = DES_BLOCK_LENGTH;
880 case cryptocop_alg_3des:
881 cipher_ctx.ciph_conf = 1;
882 cipher_ctx.unit_no = src_des;
883 cipher_ctx.blocklength = DES_BLOCK_LENGTH;
885 case cryptocop_alg_aes:
886 cipher_ctx.ciph_conf = 2;
887 cipher_ctx.unit_no = src_aes;
888 cipher_ctx.blocklength = AES_BLOCK_LENGTH;
891 panic("cryptocop_setup_dma_list: impossible algorithm %d\n", tctx->init.alg);
893 (*int_op)->tdes_mode = tctx->init.tdes_mode;
895 case cryptocop_alg_md5:
896 case cryptocop_alg_sha1:
898 if (digest_ctx.tcfg != NULL){
899 DEBUG_API(printk("cryptocop_setup_dma_list: multiple digests in operation.\n"));
903 digest_ctx.tcfg = tcfg;
904 digest_ctx.tctx = tctx;
905 digest_ctx.hash_mode = 0; /* Don't use explicit IV in this API. */
906 switch (tctx->init.alg){
907 case cryptocop_alg_md5:
908 digest_ctx.blocklength = MD5_BLOCK_LENGTH;
909 digest_ctx.unit_no = src_md5;
910 digest_ctx.hash_conf = 1; /* 1 => MD-5 */
912 case cryptocop_alg_sha1:
913 digest_ctx.blocklength = SHA1_BLOCK_LENGTH;
914 digest_ctx.unit_no = src_sha1;
915 digest_ctx.hash_conf = 0; /* 0 => SHA-1 */
918 panic("cryptocop_setup_dma_list: impossible digest algorithm\n");
921 case cryptocop_alg_csum:
923 if (csum_ctx.tcfg != NULL){
924 DEBUG_API(printk("cryptocop_setup_dma_list: multiple checksums in operation.\n"));
928 (*int_op)->csum_mode = tctx->init.csum_mode;
929 csum_ctx.tcfg = tcfg;
930 csum_ctx.tctx = tctx;
934 DEBUG_API(printk("cryptocop_setup_dma_list: invalid algorithm %d specified in tfrm %d.\n", tctx->init.alg, tcfg->tid));
940 /* Download key if a cipher is used. */
941 if (cipher_ctx.tcfg && (cipher_ctx.tctx->init.alg != cryptocop_alg_mem2mem)){
942 struct cryptocop_dma_desc *key_desc = NULL;
944 failed = setup_key_dl_desc(&cipher_ctx, &key_desc, alloc_flag);
946 DEBUG_API(printk("cryptocop_setup_dma_list: setup key dl\n"));
949 current_out_cdesc->next = key_desc;
950 current_out_cdesc = key_desc;
951 indata_ix += (unsigned int)(key_desc->dma_descr->after - key_desc->dma_descr->buf);
953 /* Download explicit IV if a cipher is used and CBC mode and explicit IV selected. */
954 if ((cipher_ctx.tctx->init.cipher_mode == cryptocop_cipher_mode_cbc) && (cipher_ctx.tcfg->flags & CRYPTOCOP_EXPLICIT_IV)) {
955 struct cryptocop_dma_desc *iv_desc = NULL;
957 DEBUG(printk("cryptocop_setup_dma_list: setup cipher CBC IV descriptor.\n"));
959 failed = setup_cipher_iv_desc(&cipher_ctx, &iv_desc, alloc_flag);
961 DEBUG_API(printk("cryptocop_setup_dma_list: CBC IV descriptor.\n"));
964 current_out_cdesc->next = iv_desc;
965 current_out_cdesc = iv_desc;
966 indata_ix += (unsigned int)(iv_desc->dma_descr->after - iv_desc->dma_descr->buf);
970 /* Process descriptors. */
971 odsc = operation->tfrm_op.desc;
973 struct cryptocop_desc_cfg *dcfg = odsc->cfg;
974 struct strcop_meta_out meta_out = {0};
975 size_t desc_len = odsc->length;
976 int active_count, eop_needed_count;
980 DEBUG(printk("cryptocop_setup_dma_list: parsing an operation descriptor\n"));
983 struct cryptocop_tfrm_ctx *tc = NULL;
985 DEBUG(printk("cryptocop_setup_dma_list: parsing an operation descriptor configuration.\n"));
986 /* Get the local context for the transform and mark it as the output unit if it produces output. */
987 if (digest_ctx.tcfg && (digest_ctx.tcfg->tid == dcfg->tid)){
989 } else if (cipher_ctx.tcfg && (cipher_ctx.tcfg->tid == dcfg->tid)){
991 } else if (csum_ctx.tcfg && (csum_ctx.tcfg->tid == dcfg->tid)){
995 DEBUG_API(printk("cryptocop_setup_dma_list: invalid transform %d specified in descriptor.\n", dcfg->tid));
1000 DEBUG_API(printk("cryptocop_setup_dma_list: completed transform %d reused.\n", dcfg->tid));
1005 tc->start_ix = indata_ix;
1009 tc->previous_src = tc->current_src;
1010 tc->prev_src = tc->curr_src;
1011 /* Map source unit id to DMA source config. */
1013 case cryptocop_source_dma:
1014 tc->current_src = src_dma;
1016 case cryptocop_source_des:
1017 tc->current_src = src_des;
1019 case cryptocop_source_3des:
1020 tc->current_src = src_des;
1022 case cryptocop_source_aes:
1023 tc->current_src = src_aes;
1025 case cryptocop_source_md5:
1026 case cryptocop_source_sha1:
1027 case cryptocop_source_csum:
1028 case cryptocop_source_none:
1030 /* We do not allow using accumulating style units (SHA-1, MD5, checksum) as sources to other units.
1032 DEBUG_API(printk("cryptocop_setup_dma_list: bad unit source configured %d.\n", dcfg->src));
1036 if (tc->current_src != src_dma) {
1037 /* Find the unit we are sourcing from. */
1038 if (digest_ctx.unit_no == tc->current_src){
1039 tc->curr_src = &digest_ctx;
1040 } else if (cipher_ctx.unit_no == tc->current_src){
1041 tc->curr_src = &cipher_ctx;
1042 } else if (csum_ctx.unit_no == tc->current_src){
1043 tc->curr_src = &csum_ctx;
1045 if ((tc->curr_src == tc) && (tc->unit_no != src_dma)){
1046 DEBUG_API(printk("cryptocop_setup_dma_list: unit %d configured to source from itself.\n", tc->unit_no));
1051 tc->curr_src = NULL;
1054 /* Detect source switch. */
1055 DEBUG(printk("cryptocop_setup_dma_list: tc->active=%d tc->unit_no=%d tc->current_src=%d tc->previous_src=%d, tc->curr_src=0x%p, tc->prev_srv=0x%p\n", tc->active, tc->unit_no, tc->current_src, tc->previous_src, tc->curr_src, tc->prev_src));
1056 if (tc->active && (tc->current_src != tc->previous_src)) {
1057 /* Only allow source switch when both the old source unit and the new one have
1058 * no pending data to process (i.e. the consumed length must be a multiple of the
1059 * transform blocklength). */
1060 /* Note: if the src == NULL we are actually sourcing from DMA out. */
1061 if (((tc->prev_src != NULL) && (tc->prev_src->consumed % tc->prev_src->blocklength)) ||
1062 ((tc->curr_src != NULL) && (tc->curr_src->consumed % tc->curr_src->blocklength)))
1064 DEBUG_API(printk("cryptocop_setup_dma_list: can only disconnect from or connect to a unit on a multiple of the blocklength, old: cons=%d, prod=%d, block=%d, new: cons=%d prod=%d, block=%d.\n", tc->prev_src ? tc->prev_src->consumed : INT_MIN, tc->prev_src ? tc->prev_src->produced : INT_MIN, tc->prev_src ? tc->prev_src->blocklength : INT_MIN, tc->curr_src ? tc->curr_src->consumed : INT_MIN, tc->curr_src ? tc->curr_src->produced : INT_MIN, tc->curr_src ? tc->curr_src->blocklength : INT_MIN));
1069 /* Detect unit deactivation. */
1071 /* Length check of this is handled below. */
1075 } /* while (dcfg) */
1076 DEBUG(printk("cryptocop_setup_dma_list: parsing operation descriptor configuration complete.\n"));
1078 if (cipher_ctx.active && (cipher_ctx.curr_src != NULL) && !cipher_ctx.curr_src->active){
1079 DEBUG_API(printk("cryptocop_setup_dma_list: cipher source from inactive unit %d\n", cipher_ctx.curr_src->unit_no));
1083 if (digest_ctx.active && (digest_ctx.curr_src != NULL) && !digest_ctx.curr_src->active){
1084 DEBUG_API(printk("cryptocop_setup_dma_list: digest source from inactive unit %d\n", digest_ctx.curr_src->unit_no));
1088 if (csum_ctx.active && (csum_ctx.curr_src != NULL) && !csum_ctx.curr_src->active){
1089 DEBUG_API(printk("cryptocop_setup_dma_list: cipher source from inactive unit %d\n", csum_ctx.curr_src->unit_no));
1094 /* Update consumed and produced lengths.
1096 The consumed length accounting here is actually cheating. If a unit source from DMA (or any
1097 other unit that process data in blocks of one octet) it is correct, but if it source from a
1098 block processing unit, i.e. a cipher, it will be temporarily incorrect at some times. However
1099 since it is only allowed--by the HW--to change source to or from a block processing unit at times where that
1100 unit has processed an exact multiple of its block length the end result will be correct.
1101 Beware that if the source change restriction change this code will need to be (much) reworked.
1103 DEBUG(printk("cryptocop_setup_dma_list: desc->length=%d, desc_len=%d.\n", odsc->length, desc_len));
1105 if (csum_ctx.active) {
1106 csum_ctx.consumed += desc_len;
1107 if (csum_ctx.done) {
1108 csum_ctx.produced = 2;
1110 DEBUG(printk("cryptocop_setup_dma_list: csum_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", csum_ctx.consumed, csum_ctx.produced, csum_ctx.blocklength));
1112 if (digest_ctx.active) {
1113 digest_ctx.consumed += desc_len;
1114 if (digest_ctx.done) {
1115 if (digest_ctx.unit_no == src_md5) {
1116 digest_ctx.produced = MD5_STATE_LENGTH;
1118 digest_ctx.produced = SHA1_STATE_LENGTH;
1121 DEBUG(printk("cryptocop_setup_dma_list: digest_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", digest_ctx.consumed, digest_ctx.produced, digest_ctx.blocklength));
1123 if (cipher_ctx.active) {
1124 /* Ciphers are allowed only to source from DMA out. That is filtered above. */
1125 assert(cipher_ctx.current_src == src_dma);
1126 cipher_ctx.consumed += desc_len;
1127 cipher_ctx.produced = cipher_ctx.blocklength * (cipher_ctx.consumed / cipher_ctx.blocklength);
1128 if (cipher_ctx.cbcmode && !(cipher_ctx.tcfg->flags & CRYPTOCOP_EXPLICIT_IV) && cipher_ctx.produced){
1129 cipher_ctx.produced -= cipher_ctx.blocklength; /* Compensate for CBC iv. */
1131 DEBUG(printk("cryptocop_setup_dma_list: cipher_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", cipher_ctx.consumed, cipher_ctx.produced, cipher_ctx.blocklength));
1134 /* Setup the DMA out descriptors. */
1135 /* Configure the metadata. */
1137 eop_needed_count = 0;
1138 if (cipher_ctx.active) {
1140 if (cipher_ctx.unit_no == src_dma){
1142 meta_out.ciphsel = src_none;
1144 meta_out.ciphsel = cipher_ctx.current_src;
1146 meta_out.ciphconf = cipher_ctx.ciph_conf;
1147 meta_out.cbcmode = cipher_ctx.cbcmode;
1148 meta_out.decrypt = cipher_ctx.decrypt;
1149 DEBUG(printk("set ciphsel=%d ciphconf=%d cbcmode=%d decrypt=%d\n", meta_out.ciphsel, meta_out.ciphconf, meta_out.cbcmode, meta_out.decrypt));
1150 if (cipher_ctx.done) ++eop_needed_count;
1152 meta_out.ciphsel = src_none;
1155 if (digest_ctx.active) {
1157 meta_out.hashsel = digest_ctx.current_src;
1158 meta_out.hashconf = digest_ctx.hash_conf;
1159 meta_out.hashmode = 0; /* Explicit mode is not used here. */
1160 DEBUG(printk("set hashsel=%d hashconf=%d hashmode=%d\n", meta_out.hashsel, meta_out.hashconf, meta_out.hashmode));
1161 if (digest_ctx.done) {
1162 assert(digest_ctx.pad_descs == NULL);
1163 failed = create_pad_descriptor(&digest_ctx, &digest_ctx.pad_descs, alloc_flag);
1165 DEBUG_API(printk("cryptocop_setup_dma_list: failed digest pad creation.\n"));
1170 meta_out.hashsel = src_none;
1173 if (csum_ctx.active) {
1175 meta_out.csumsel = csum_ctx.current_src;
1176 if (csum_ctx.done) {
1177 assert(csum_ctx.pad_descs == NULL);
1178 failed = create_pad_descriptor(&csum_ctx, &csum_ctx.pad_descs, alloc_flag);
1180 DEBUG_API(printk("cryptocop_setup_dma_list: failed csum pad creation.\n"));
1185 meta_out.csumsel = src_none;
1187 DEBUG(printk("cryptocop_setup_dma_list: %d eop needed, %d active units\n", eop_needed_count, active_count));
1188 /* Setup DMA out descriptors for the indata. */
1189 failed = create_output_descriptors(operation, &iniov_ix, &iniov_offset, desc_len, ¤t_out_cdesc, &meta_out, alloc_flag);
1191 DEBUG_API(printk("cryptocop_setup_dma_list: create_output_descriptors %d\n", failed));
1194 /* Setup out EOP. If there are active units that are not done here they cannot get an EOP
1195 * so we ust setup a zero length descriptor to DMA to signal EOP only to done units.
1196 * If there is a pad descriptor EOP for the padded unit will be EOPed by it.
1198 assert(active_count >= eop_needed_count);
1199 assert((eop_needed_count == 0) || (eop_needed_count == 1));
1200 if (eop_needed_count) {
1201 /* This means that the bulk operation (cipeher/m2m) is terminated. */
1202 if (active_count > 1) {
1203 /* Use zero length EOP descriptor. */
1204 struct cryptocop_dma_desc *ed = alloc_cdesc(alloc_flag);
1205 struct strcop_meta_out ed_mo = {0};
1207 DEBUG_API(printk("cryptocop_setup_dma_list: alloc EOP descriptor for cipher\n"));
1212 assert(cipher_ctx.active && cipher_ctx.done);
1214 if (cipher_ctx.unit_no == src_dma){
1216 ed_mo.ciphsel = src_none;
1218 ed_mo.ciphsel = cipher_ctx.current_src;
1220 ed_mo.ciphconf = cipher_ctx.ciph_conf;
1221 ed_mo.cbcmode = cipher_ctx.cbcmode;
1222 ed_mo.decrypt = cipher_ctx.decrypt;
1224 ed->free_buf = NULL;
1225 ed->dma_descr->wait = 1;
1226 ed->dma_descr->out_eop = 1;
1228 ed->dma_descr->buf = (char*)virt_to_phys(&ed); /* Use any valid physical address for zero length descriptor. */
1229 ed->dma_descr->after = ed->dma_descr->buf;
1230 ed->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, ed_mo);
1231 current_out_cdesc->next = ed;
1232 current_out_cdesc = ed;
1234 /* Set EOP in the current out descriptor since the only active module is
1235 * the one needing the EOP. */
1237 current_out_cdesc->dma_descr->out_eop = 1;
1241 if (cipher_ctx.done && cipher_ctx.active) cipher_ctx.active = 0;
1242 if (digest_ctx.done && digest_ctx.active) digest_ctx.active = 0;
1243 if (csum_ctx.done && csum_ctx.active) csum_ctx.active = 0;
1244 indata_ix += odsc->length;
1246 } /* while (odsc) */ /* Process descriptors. */
1247 DEBUG(printk("cryptocop_setup_dma_list: done parsing operation descriptors\n"));
1248 if (cipher_ctx.tcfg && (cipher_ctx.active || !cipher_ctx.done)){
1249 DEBUG_API(printk("cryptocop_setup_dma_list: cipher operation not terminated.\n"));
1253 if (digest_ctx.tcfg && (digest_ctx.active || !digest_ctx.done)){
1254 DEBUG_API(printk("cryptocop_setup_dma_list: digest operation not terminated.\n"));
1258 if (csum_ctx.tcfg && (csum_ctx.active || !csum_ctx.done)){
1259 DEBUG_API(printk("cryptocop_setup_dma_list: csum operation not terminated.\n"));
1264 failed = append_input_descriptors(operation, ¤t_in_cdesc, ¤t_out_cdesc, &cipher_ctx, alloc_flag);
1266 DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed));
1269 failed = append_input_descriptors(operation, ¤t_in_cdesc, ¤t_out_cdesc, &digest_ctx, alloc_flag);
1271 DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed));
1274 failed = append_input_descriptors(operation, ¤t_in_cdesc, ¤t_out_cdesc, &csum_ctx, alloc_flag);
1276 DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed));
1280 DEBUG(printk("cryptocop_setup_dma_list: int_op=0x%p, *int_op=0x%p\n", int_op, *int_op));
1281 (*int_op)->cdesc_out = out_cdesc_head.next;
1282 (*int_op)->cdesc_in = in_cdesc_head.next;
1283 DEBUG(printk("cryptocop_setup_dma_list: out_cdesc_head=0x%p in_cdesc_head=0x%p\n", (*int_op)->cdesc_out, (*int_op)->cdesc_in));
1285 setup_descr_chain(out_cdesc_head.next);
1286 setup_descr_chain(in_cdesc_head.next);
1288 /* Last but not least: mark the last DMA in descriptor for a INTR and EOL and the the
1289 * last DMA out descriptor for EOL.
1291 current_in_cdesc->dma_descr->intr = 1;
1292 current_in_cdesc->dma_descr->eol = 1;
1293 current_out_cdesc->dma_descr->eol = 1;
1295 /* Setup DMA contexts. */
1296 (*int_op)->ctx_out.next = NULL;
1297 (*int_op)->ctx_out.eol = 1;
1298 (*int_op)->ctx_out.intr = 0;
1299 (*int_op)->ctx_out.store_mode = 0;
1300 (*int_op)->ctx_out.en = 0;
1301 (*int_op)->ctx_out.dis = 0;
1302 (*int_op)->ctx_out.md0 = 0;
1303 (*int_op)->ctx_out.md1 = 0;
1304 (*int_op)->ctx_out.md2 = 0;
1305 (*int_op)->ctx_out.md3 = 0;
1306 (*int_op)->ctx_out.md4 = 0;
1307 (*int_op)->ctx_out.saved_data = (dma_descr_data*)virt_to_phys((*int_op)->cdesc_out->dma_descr);
1308 (*int_op)->ctx_out.saved_data_buf = (*int_op)->cdesc_out->dma_descr->buf; /* Already physical address. */
1310 (*int_op)->ctx_in.next = NULL;
1311 (*int_op)->ctx_in.eol = 1;
1312 (*int_op)->ctx_in.intr = 0;
1313 (*int_op)->ctx_in.store_mode = 0;
1314 (*int_op)->ctx_in.en = 0;
1315 (*int_op)->ctx_in.dis = 0;
1316 (*int_op)->ctx_in.md0 = 0;
1317 (*int_op)->ctx_in.md1 = 0;
1318 (*int_op)->ctx_in.md2 = 0;
1319 (*int_op)->ctx_in.md3 = 0;
1320 (*int_op)->ctx_in.md4 = 0;
1322 (*int_op)->ctx_in.saved_data = (dma_descr_data*)virt_to_phys((*int_op)->cdesc_in->dma_descr);
1323 (*int_op)->ctx_in.saved_data_buf = (*int_op)->cdesc_in->dma_descr->buf; /* Already physical address. */
1325 DEBUG(printk("cryptocop_setup_dma_list: done\n"));
1330 /* Free all allocated resources. */
1331 struct cryptocop_dma_desc *tmp_cdesc;
1332 while (digest_ctx.pad_descs){
1333 tmp_cdesc = digest_ctx.pad_descs->next;
1334 free_cdesc(digest_ctx.pad_descs);
1335 digest_ctx.pad_descs = tmp_cdesc;
1337 while (csum_ctx.pad_descs){
1338 tmp_cdesc = csum_ctx.pad_descs->next;
1339 free_cdesc(csum_ctx.pad_descs);
1340 csum_ctx.pad_descs = tmp_cdesc;
1342 assert(cipher_ctx.pad_descs == NULL); /* The ciphers are never padded. */
1344 if (*int_op != NULL) delete_internal_operation(*int_op);
1346 DEBUG_API(printk("cryptocop_setup_dma_list: done with error %d\n", failed));
1351 static void delete_internal_operation(struct cryptocop_int_operation *iop)
1353 void *ptr = iop->alloc_ptr;
1354 struct cryptocop_dma_desc *cd = iop->cdesc_out;
1355 struct cryptocop_dma_desc *next;
1357 DEBUG(printk("delete_internal_operation: iop=0x%p, alloc_ptr=0x%p\n", iop, ptr));
1373 #define MD5_MIN_PAD_LENGTH (9)
1374 #define MD5_PAD_LENGTH_FIELD_LENGTH (8)
1376 static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length)
1378 size_t padlen = MD5_BLOCK_LENGTH - (hashed_length % MD5_BLOCK_LENGTH);
1381 unsigned long long int bit_length = hashed_length << 3;
1383 if (padlen < MD5_MIN_PAD_LENGTH) padlen += MD5_BLOCK_LENGTH;
1385 p = kmalloc(padlen, alloc_flag);
1386 if (!pad) return -ENOMEM;
1389 memset(p+1, 0, padlen - 1);
1391 DEBUG(printk("create_md5_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
1393 i = padlen - MD5_PAD_LENGTH_FIELD_LENGTH;
1394 while (bit_length != 0){
1395 p[i++] = bit_length % 0x100;
1400 *pad_length = padlen;
1405 #define SHA1_MIN_PAD_LENGTH (9)
1406 #define SHA1_PAD_LENGTH_FIELD_LENGTH (8)
1408 static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length)
1410 size_t padlen = SHA1_BLOCK_LENGTH - (hashed_length % SHA1_BLOCK_LENGTH);
1413 unsigned long long int bit_length = hashed_length << 3;
1415 if (padlen < SHA1_MIN_PAD_LENGTH) padlen += SHA1_BLOCK_LENGTH;
1417 p = kmalloc(padlen, alloc_flag);
1418 if (!pad) return -ENOMEM;
1421 memset(p+1, 0, padlen - 1);
1423 DEBUG(printk("create_sha1_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
1426 while (bit_length != 0){
1427 p[i--] = bit_length % 0x100;
1432 *pad_length = padlen;
1438 static int transform_ok(struct cryptocop_transform_init *tinit)
1440 switch (tinit->alg){
1441 case cryptocop_alg_csum:
1442 switch (tinit->csum_mode){
1443 case cryptocop_csum_le:
1444 case cryptocop_csum_be:
1447 DEBUG_API(printk("transform_ok: Bad mode set for csum transform\n"));
1450 case cryptocop_alg_mem2mem:
1451 case cryptocop_alg_md5:
1452 case cryptocop_alg_sha1:
1453 if (tinit->keylen != 0) {
1454 DEBUG_API(printk("transform_ok: non-zero keylength, %d, for a digest/csum algorithm\n", tinit->keylen));
1455 return -EINVAL; /* This check is a bit strict. */
1458 case cryptocop_alg_des:
1459 if (tinit->keylen != 64) {
1460 DEBUG_API(printk("transform_ok: keylen %d invalid for DES\n", tinit->keylen));
1464 case cryptocop_alg_3des:
1465 if (tinit->keylen != 192) {
1466 DEBUG_API(printk("transform_ok: keylen %d invalid for 3DES\n", tinit->keylen));
1470 case cryptocop_alg_aes:
1471 if (tinit->keylen != 128 && tinit->keylen != 192 && tinit->keylen != 256) {
1472 DEBUG_API(printk("transform_ok: keylen %d invalid for AES\n", tinit->keylen));
1476 case cryptocop_no_alg:
1478 DEBUG_API(printk("transform_ok: no such algorithm %d\n", tinit->alg));
1482 switch (tinit->alg){
1483 case cryptocop_alg_des:
1484 case cryptocop_alg_3des:
1485 case cryptocop_alg_aes:
1486 if (tinit->cipher_mode != cryptocop_cipher_mode_ecb && tinit->cipher_mode != cryptocop_cipher_mode_cbc) return -EINVAL;
1494 int cryptocop_new_session(cryptocop_session_id *sid, struct cryptocop_transform_init *tinit, int alloc_flag)
1496 struct cryptocop_session *sess;
1497 struct cryptocop_transform_init *tfrm_in = tinit;
1498 struct cryptocop_transform_init *tmp_in;
1501 unsigned long int flags;
1503 init_stream_coprocessor(); /* For safety if we are called early */
1508 if ((err = transform_ok(tfrm_in))) {
1509 DEBUG_API(printk("cryptocop_new_session, bad transform\n"));
1512 tfrm_in = tfrm_in->next;
1514 if (0 == no_tfrms) {
1515 DEBUG_API(printk("cryptocop_new_session, no transforms specified\n"));
1519 sess = kmalloc(sizeof(struct cryptocop_session), alloc_flag);
1521 DEBUG_API(printk("cryptocop_new_session, kmalloc cryptocop_session\n"));
1525 sess->tfrm_ctx = kmalloc(no_tfrms * sizeof(struct cryptocop_transform_ctx), alloc_flag);
1526 if (!sess->tfrm_ctx) {
1527 DEBUG_API(printk("cryptocop_new_session, kmalloc cryptocop_transform_ctx\n"));
1533 for (i = 0; i < no_tfrms; i++){
1534 tmp_in = tfrm_in->next;
1536 if (tmp_in->tid == tfrm_in->tid) {
1537 DEBUG_API(printk("cryptocop_new_session, duplicate transform ids\n"));
1538 kfree(sess->tfrm_ctx);
1542 tmp_in = tmp_in->next;
1544 memcpy(&sess->tfrm_ctx[i].init, tfrm_in, sizeof(struct cryptocop_transform_init));
1545 sess->tfrm_ctx[i].dec_key_set = 0;
1546 sess->tfrm_ctx[i].next = &sess->tfrm_ctx[i] + 1;
1548 tfrm_in = tfrm_in->next;
1550 sess->tfrm_ctx[i-1].next = NULL;
1552 spin_lock_irqsave(&cryptocop_sessions_lock, flags);
1553 sess->sid = next_sid;
1555 /* TODO If we are really paranoid we should do duplicate check to handle sid wraparound.
1556 * OTOH 2^64 is a really large number of session. */
1557 if (next_sid == 0) next_sid = 1;
1559 /* Prepend to session list. */
1560 sess->next = cryptocop_sessions;
1561 cryptocop_sessions = sess;
1562 spin_unlock_irqrestore(&cryptocop_sessions_lock, flags);
1568 int cryptocop_free_session(cryptocop_session_id sid)
1570 struct cryptocop_transform_ctx *tc;
1571 struct cryptocop_session *sess = NULL;
1572 struct cryptocop_session *psess = NULL;
1573 unsigned long int flags;
1575 LIST_HEAD(remove_list);
1576 struct list_head *node, *tmp;
1577 struct cryptocop_prio_job *pj;
1579 DEBUG(printk("cryptocop_free_session: sid=%lld\n", sid));
1581 spin_lock_irqsave(&cryptocop_sessions_lock, flags);
1582 sess = cryptocop_sessions;
1583 while (sess && sess->sid != sid){
1589 psess->next = sess->next;
1591 cryptocop_sessions = sess->next;
1594 spin_unlock_irqrestore(&cryptocop_sessions_lock, flags);
1596 if (!sess) return -EINVAL;
1598 /* Remove queued jobs. */
1599 spin_lock_irqsave(&cryptocop_job_queue_lock, flags);
1601 for (i = 0; i < cryptocop_prio_no_prios; i++){
1602 if (!list_empty(&(cryptocop_job_queues[i].jobs))){
1603 list_for_each_safe(node, tmp, &(cryptocop_job_queues[i].jobs)) {
1604 pj = list_entry(node, struct cryptocop_prio_job, node);
1605 if (pj->oper->sid == sid) {
1606 list_move_tail(node, &remove_list);
1611 spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
1613 list_for_each_safe(node, tmp, &remove_list) {
1615 pj = list_entry(node, struct cryptocop_prio_job, node);
1616 pj->oper->operation_status = -EAGAIN; /* EAGAIN is not ideal for job/session terminated but it's the best choice I know of. */
1617 DEBUG(printk("cryptocop_free_session: pj=0x%p, pj->oper=0x%p, pj->iop=0x%p\n", pj, pj->oper, pj->iop));
1618 pj->oper->cb(pj->oper, pj->oper->cb_data);
1619 delete_internal_operation(pj->iop);
1623 tc = sess->tfrm_ctx;
1624 /* Erase keying data. */
1626 DEBUG(printk("cryptocop_free_session: memset keys, tfrm id=%d\n", tc->init.tid));
1627 memset(tc->init.key, 0xff, CRYPTOCOP_MAX_KEY_LENGTH);
1628 memset(tc->dec_key, 0xff, CRYPTOCOP_MAX_KEY_LENGTH);
1631 kfree(sess->tfrm_ctx);
1637 static struct cryptocop_session *get_session(cryptocop_session_id sid)
1639 struct cryptocop_session *sess;
1640 unsigned long int flags;
1642 spin_lock_irqsave(&cryptocop_sessions_lock, flags);
1643 sess = cryptocop_sessions;
1644 while (sess && (sess->sid != sid)){
1647 spin_unlock_irqrestore(&cryptocop_sessions_lock, flags);
1652 static struct cryptocop_transform_ctx *get_transform_ctx(struct cryptocop_session *sess, cryptocop_tfrm_id tid)
1654 struct cryptocop_transform_ctx *tc = sess->tfrm_ctx;
1656 DEBUG(printk("get_transform_ctx, sess=0x%p, tid=%d\n", sess, tid));
1657 assert(sess != NULL);
1658 while (tc && tc->init.tid != tid){
1659 DEBUG(printk("tc=0x%p, tc->next=0x%p\n", tc, tc->next));
1662 DEBUG(printk("get_transform_ctx, returning tc=0x%p\n", tc));
1668 /* The AES s-transform matrix (s-box). */
1669 static const u8 aes_sbox[256] = {
1670 99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215, 171, 118,
1671 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175, 156, 164, 114, 192,
1672 183, 253, 147, 38, 54, 63, 247, 204, 52, 165, 229, 241, 113, 216, 49, 21,
1673 4, 199, 35, 195, 24, 150, 5, 154, 7, 18, 128, 226, 235, 39, 178, 117,
1674 9, 131, 44, 26, 27, 110, 90, 160, 82, 59, 214, 179, 41, 227, 47, 132,
1675 83, 209, 0, 237, 32, 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207,
1676 208, 239, 170, 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168,
1677 81, 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 243, 210,
1678 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100, 93, 25, 115,
1679 96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 184, 20, 222, 94, 11, 219,
1680 224, 50, 58, 10, 73, 6, 36, 92, 194, 211, 172, 98, 145, 149, 228, 121,
1681 231, 200, 55, 109, 141, 213, 78, 169, 108, 86, 244, 234, 101, 122, 174, 8,
1682 186, 120, 37, 46, 28, 166, 180, 198, 232, 221, 116, 31, 75, 189, 139, 138,
1683 112, 62, 181, 102, 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158,
1684 225, 248, 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223,
1685 140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84, 187, 22
1688 /* AES has a 32 bit word round constants for each round in the
1689 * key schedule. round_constant[i] is really Rcon[i+1] in FIPS187.
1691 static u32 round_constant[11] = {
1692 0x01000000, 0x02000000, 0x04000000, 0x08000000,
1693 0x10000000, 0x20000000, 0x40000000, 0x80000000,
1694 0x1B000000, 0x36000000, 0x6C000000
1697 /* Apply the s-box to each of the four occtets in w. */
1698 static u32 aes_ks_subword(const u32 w)
1702 *(u32*)(&bytes[0]) = w;
1703 bytes[0] = aes_sbox[bytes[0]];
1704 bytes[1] = aes_sbox[bytes[1]];
1705 bytes[2] = aes_sbox[bytes[2]];
1706 bytes[3] = aes_sbox[bytes[3]];
1707 return *(u32*)(&bytes[0]);
1710 /* The encrypt (forward) Rijndael key schedule algorithm pseudo code:
1711 * (Note that AES words are 32 bit long)
1713 * KeyExpansion(byte key[4*Nk], word w[Nb*(Nr+1)], Nk){
1717 * w[i] = word(key[4*i, 4*i + 1, 4*i + 2, 4*i + 3])
1722 * while (i < (Nb * (Nr + 1))) {
1724 * if ((i mod Nk) == 0) {
1725 * temp = SubWord(RotWord(temp)) xor Rcon[i/Nk]
1727 * else if ((Nk > 6) && ((i mod Nk) == 4)) {
1728 * temp = SubWord(temp)
1730 * w[i] = w[i - Nk] xor temp
1732 * RotWord(t) does a 8 bit cyclic shift left on a 32 bit word.
1733 * SubWord(t) applies the AES s-box individually to each octet
1736 * For AES Nk can have the values 4, 6, and 8 (corresponding to
1737 * values for Nr of 10, 12, and 14). Nb is always 4.
1739 * To construct w[i], w[i - 1] and w[i - Nk] must be
1740 * available. Consequently we must keep a state of the last Nk words
1741 * to be able to create the last round keys.
1743 static void get_aes_decrypt_key(unsigned char *dec_key, const unsigned char *key, unsigned int keylength)
1746 u32 w_ring[8]; /* nk is max 8, use elements 0..(nk - 1) as a ringbuffer */
1765 panic("stream co-processor: bad aes key length in get_aes_decrypt_key\n");
1768 /* Need to do host byte order correction here since key is byte oriented and the
1769 * kx algorithm is word (u32) oriented. */
1770 for (i = 0; i < nk; i+=1) {
1771 w_ring[i] = be32_to_cpu(*(u32*)&key[4*i]);
1776 while (i < (4 * (nr + 2))) {
1777 temp = w_ring[w_last_ix];
1780 temp = (temp << 8) | (temp >> 24);
1781 temp = aes_ks_subword(temp);
1782 temp ^= round_constant[i/nk - 1];
1783 } else if ((nk > 6) && ((i % nk) == 4)) {
1784 temp = aes_ks_subword(temp);
1786 w_last_ix = (w_last_ix + 1) % nk; /* This is the same as (i-Nk) mod Nk */
1787 temp ^= w_ring[w_last_ix];
1788 w_ring[w_last_ix] = temp;
1790 /* We need the round keys for round Nr+1 and Nr+2 (round key
1791 * Nr+2 is the round key beyond the last one used when
1792 * encrypting). Rounds are numbered starting from 0, Nr=10
1793 * implies 11 rounds are used in encryption/decryption.
1795 if (i >= (4 * nr)) {
1796 /* Need to do host byte order correction here, the key
1797 * is byte oriented. */
1798 *(u32*)dec_key = cpu_to_be32(temp);
1806 /**** Job/operation management. ****/
1808 int cryptocop_job_queue_insert_csum(struct cryptocop_operation *operation)
1810 return cryptocop_job_queue_insert(cryptocop_prio_kernel_csum, operation);
1813 int cryptocop_job_queue_insert_crypto(struct cryptocop_operation *operation)
1815 return cryptocop_job_queue_insert(cryptocop_prio_kernel, operation);
1818 int cryptocop_job_queue_insert_user_job(struct cryptocop_operation *operation)
1820 return cryptocop_job_queue_insert(cryptocop_prio_user, operation);
1823 static int cryptocop_job_queue_insert(cryptocop_queue_priority prio, struct cryptocop_operation *operation)
1826 struct cryptocop_prio_job *pj = NULL;
1827 unsigned long int flags;
1829 DEBUG(printk("cryptocop_job_queue_insert(%d, 0x%p)\n", prio, operation));
1831 if (!operation || !operation->cb){
1832 DEBUG_API(printk("cryptocop_job_queue_insert oper=0x%p, NULL operation or callback\n", operation));
1836 if ((ret = cryptocop_job_setup(&pj, operation)) != 0){
1837 DEBUG_API(printk("cryptocop_job_queue_insert: job setup failed\n"));
1842 spin_lock_irqsave(&cryptocop_job_queue_lock, flags);
1843 list_add_tail(&pj->node, &cryptocop_job_queues[prio].jobs);
1844 spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
1846 /* Make sure a job is running */
1847 cryptocop_start_job();
1851 static void cryptocop_do_tasklet(unsigned long unused);
1852 DECLARE_TASKLET (cryptocop_tasklet, cryptocop_do_tasklet, 0);
1854 static void cryptocop_do_tasklet(unsigned long unused)
1856 struct list_head *node;
1857 struct cryptocop_prio_job *pj = NULL;
1858 unsigned long flags;
1860 DEBUG(printk("cryptocop_do_tasklet: entering\n"));
1863 spin_lock_irqsave(&cryptocop_completed_jobs_lock, flags);
1864 if (!list_empty(&cryptocop_completed_jobs)){
1865 node = cryptocop_completed_jobs.next;
1867 pj = list_entry(node, struct cryptocop_prio_job, node);
1871 spin_unlock_irqrestore(&cryptocop_completed_jobs_lock, flags);
1873 assert(pj->oper != NULL);
1875 /* Notify consumer of operation completeness. */
1876 DEBUG(printk("cryptocop_do_tasklet: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
1878 pj->oper->operation_status = 0; /* Job is completed. */
1879 pj->oper->cb(pj->oper, pj->oper->cb_data);
1880 delete_internal_operation(pj->iop);
1883 } while (pj != NULL);
1885 DEBUG(printk("cryptocop_do_tasklet: exiting\n"));
1889 dma_done_interrupt(int irq, void *dev_id, struct pt_regs * regs)
1891 struct cryptocop_prio_job *done_job;
1892 reg_dma_rw_ack_intr ack_intr = {
1896 REG_WR (dma, regi_dma9, rw_ack_intr, ack_intr);
1898 DEBUG(printk("cryptocop DMA done\n"));
1900 spin_lock(&running_job_lock);
1901 if (cryptocop_running_job == NULL){
1902 printk("stream co-processor got interrupt when not busy\n");
1903 spin_unlock(&running_job_lock);
1906 done_job = cryptocop_running_job;
1907 cryptocop_running_job = NULL;
1908 spin_unlock(&running_job_lock);
1910 /* Start processing a job. */
1911 if (!spin_trylock(&cryptocop_process_lock)){
1912 DEBUG(printk("cryptocop irq handler, not starting a job\n"));
1914 cryptocop_start_job();
1915 spin_unlock(&cryptocop_process_lock);
1918 done_job->oper->operation_status = 0; /* Job is completed. */
1919 if (done_job->oper->fast_callback){
1920 /* This operation wants callback from interrupt. */
1921 done_job->oper->cb(done_job->oper, done_job->oper->cb_data);
1922 delete_internal_operation(done_job->iop);
1925 spin_lock(&cryptocop_completed_jobs_lock);
1926 list_add_tail(&(done_job->node), &cryptocop_completed_jobs);
1927 spin_unlock(&cryptocop_completed_jobs_lock);
1928 tasklet_schedule(&cryptocop_tasklet);
1931 DEBUG(printk("cryptocop leave irq handler\n"));
1936 /* Setup interrupts and DMA channels. */
1937 static int init_cryptocop(void)
1939 unsigned long flags;
1940 reg_intr_vect_rw_mask intr_mask;
1941 reg_dma_rw_cfg dma_cfg = {.en = 1};
1942 reg_dma_rw_intr_mask intr_mask_in = {.data = regk_dma_yes}; /* Only want descriptor interrupts from the DMA in channel. */
1943 reg_dma_rw_ack_intr ack_intr = {.data = 1,.in_eop = 1 };
1944 reg_strcop_rw_cfg strcop_cfg = {
1945 .ipend = regk_strcop_little,
1946 .td1 = regk_strcop_e,
1947 .td2 = regk_strcop_d,
1948 .td3 = regk_strcop_e,
1953 if (request_irq(DMA9_INTR_VECT, dma_done_interrupt, 0, "stream co-processor DMA", NULL)) panic("request_irq stream co-processor irq dma9");
1955 (void)crisv32_request_dma(8, "strcop", DMA_PANIC_ON_ERROR, 0, dma_strp);
1956 (void)crisv32_request_dma(9, "strcop", DMA_PANIC_ON_ERROR, 0, dma_strp);
1958 local_irq_save(flags);
1960 /* Reset and enable the cryptocop. */
1962 REG_WR(strcop, regi_strcop, rw_cfg, strcop_cfg);
1964 REG_WR(strcop, regi_strcop, rw_cfg, strcop_cfg);
1966 /* Enable DMA9 interrupt */
1967 intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
1969 REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
1972 REG_WR(dma, regi_dma9, rw_cfg, dma_cfg); /* input DMA */
1973 REG_WR(dma, regi_dma8, rw_cfg, dma_cfg); /* output DMA */
1975 /* Set up wordsize = 4 for DMAs. */
1976 DMA_WR_CMD (regi_dma8, regk_dma_set_w_size4);
1977 DMA_WR_CMD (regi_dma9, regk_dma_set_w_size4);
1979 /* Enable interrupts. */
1980 REG_WR(dma, regi_dma9, rw_intr_mask, intr_mask_in);
1982 /* Clear intr ack. */
1983 REG_WR(dma, regi_dma9, rw_ack_intr, ack_intr);
1985 local_irq_restore(flags);
1990 /* Free used cryptocop hw resources (interrupt and DMA channels). */
1991 static void release_cryptocop(void)
1993 unsigned long flags;
1994 reg_intr_vect_rw_mask intr_mask;
1995 reg_dma_rw_cfg dma_cfg = {.en = 0};
1996 reg_dma_rw_intr_mask intr_mask_in = {0};
1997 reg_dma_rw_ack_intr ack_intr = {.data = 1,.in_eop = 1 };
1999 local_irq_save(flags);
2001 /* Clear intr ack. */
2002 REG_WR(dma, regi_dma9, rw_ack_intr, ack_intr);
2004 /* Disable DMA9 interrupt */
2005 intr_mask = REG_RD(intr_vect, regi_irq, rw_mask);
2007 REG_WR(intr_vect, regi_irq, rw_mask, intr_mask);
2010 REG_WR(dma, regi_dma9, rw_cfg, dma_cfg); /* input DMA */
2011 REG_WR(dma, regi_dma8, rw_cfg, dma_cfg); /* output DMA */
2013 /* Disable interrupts. */
2014 REG_WR(dma, regi_dma9, rw_intr_mask, intr_mask_in);
2016 local_irq_restore(flags);
2018 free_irq(DMA9_INTR_VECT, NULL);
2020 (void)crisv32_free_dma(8);
2021 (void)crisv32_free_dma(9);
2025 /* Init job queue. */
2026 static int cryptocop_job_queue_init(void)
2030 INIT_LIST_HEAD(&cryptocop_completed_jobs);
2032 for (i = 0; i < cryptocop_prio_no_prios; i++){
2033 cryptocop_job_queues[i].prio = (cryptocop_queue_priority)i;
2034 INIT_LIST_HEAD(&cryptocop_job_queues[i].jobs);
2040 static void cryptocop_job_queue_close(void)
2042 struct list_head *node, *tmp;
2043 struct cryptocop_prio_job *pj = NULL;
2044 unsigned long int process_flags, flags;
2047 /* FIXME: This is as yet untested code. */
2049 /* Stop strcop from getting an operation to process while we are closing the
2051 spin_lock_irqsave(&cryptocop_process_lock, process_flags);
2053 /* Empty the job queue. */
2054 spin_lock_irqsave(&cryptocop_process_lock, process_flags);
2055 for (i = 0; i < cryptocop_prio_no_prios; i++){
2056 if (!list_empty(&(cryptocop_job_queues[i].jobs))){
2057 list_for_each_safe(node, tmp, &(cryptocop_job_queues[i].jobs)) {
2058 pj = list_entry(node, struct cryptocop_prio_job, node);
2061 /* Call callback to notify consumer of job removal. */
2062 DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
2063 pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */
2064 pj->oper->cb(pj->oper, pj->oper->cb_data);
2066 delete_internal_operation(pj->iop);
2071 spin_unlock_irqrestore(&cryptocop_process_lock, process_flags);
2073 /* Remove the running job, if any. */
2074 spin_lock_irqsave(&running_job_lock, flags);
2075 if (cryptocop_running_job){
2076 reg_strcop_rw_cfg rw_cfg;
2077 reg_dma_rw_cfg dma_out_cfg, dma_in_cfg;
2080 dma_out_cfg = REG_RD(dma, regi_dma8, rw_cfg);
2081 dma_out_cfg.en = regk_dma_no;
2082 REG_WR(dma, regi_dma8, rw_cfg, dma_out_cfg);
2084 dma_in_cfg = REG_RD(dma, regi_dma9, rw_cfg);
2085 dma_in_cfg.en = regk_dma_no;
2086 REG_WR(dma, regi_dma9, rw_cfg, dma_in_cfg);
2088 /* Disble the cryptocop. */
2089 rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg);
2091 REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
2093 pj = cryptocop_running_job;
2094 cryptocop_running_job = NULL;
2096 /* Call callback to notify consumer of job removal. */
2097 DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
2098 pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */
2099 pj->oper->cb(pj->oper, pj->oper->cb_data);
2101 delete_internal_operation(pj->iop);
2104 spin_unlock_irqrestore(&running_job_lock, flags);
2106 /* Remove completed jobs, if any. */
2107 spin_lock_irqsave(&cryptocop_completed_jobs_lock, flags);
2109 list_for_each_safe(node, tmp, &cryptocop_completed_jobs) {
2110 pj = list_entry(node, struct cryptocop_prio_job, node);
2112 /* Call callback to notify consumer of job removal. */
2113 DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
2114 pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */
2115 pj->oper->cb(pj->oper, pj->oper->cb_data);
2117 delete_internal_operation(pj->iop);
2120 spin_unlock_irqrestore(&cryptocop_completed_jobs_lock, flags);
2124 static void cryptocop_start_job(void)
2127 struct cryptocop_prio_job *pj;
2128 unsigned long int flags;
2129 unsigned long int running_job_flags;
2130 reg_strcop_rw_cfg rw_cfg = {.en = 1, .ignore_sync = 0};
2132 DEBUG(printk("cryptocop_start_job: entering\n"));
2134 spin_lock_irqsave(&running_job_lock, running_job_flags);
2135 if (cryptocop_running_job != NULL){
2136 /* Already running. */
2137 DEBUG(printk("cryptocop_start_job: already running, exit\n"));
2138 spin_unlock_irqrestore(&running_job_lock, running_job_flags);
2141 spin_lock_irqsave(&cryptocop_job_queue_lock, flags);
2143 /* Check the queues in priority order. */
2144 for (i = cryptocop_prio_kernel_csum; (i < cryptocop_prio_no_prios) && list_empty(&cryptocop_job_queues[i].jobs); i++);
2145 if (i == cryptocop_prio_no_prios) {
2146 spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
2147 spin_unlock_irqrestore(&running_job_lock, running_job_flags);
2148 DEBUG(printk("cryptocop_start_job: no jobs to run\n"));
2149 return; /* No jobs to run */
2151 DEBUG(printk("starting job for prio %d\n", i));
2153 /* TODO: Do not starve lower priority jobs. Let in a lower
2154 * prio job for every N-th processed higher prio job or some
2155 * other scheduling policy. This could reasonably be
2156 * tweakable since the optimal balance would depend on the
2157 * type of load on the system. */
2159 /* Pull the DMA lists from the job and start the DMA client. */
2160 pj = list_entry(cryptocop_job_queues[i].jobs.next, struct cryptocop_prio_job, node);
2161 list_del(&pj->node);
2162 spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
2163 cryptocop_running_job = pj;
2165 /* Set config register (3DES and CSUM modes). */
2166 switch (pj->iop->tdes_mode){
2167 case cryptocop_3des_eee:
2168 rw_cfg.td1 = regk_strcop_e;
2169 rw_cfg.td2 = regk_strcop_e;
2170 rw_cfg.td3 = regk_strcop_e;
2172 case cryptocop_3des_eed:
2173 rw_cfg.td1 = regk_strcop_e;
2174 rw_cfg.td2 = regk_strcop_e;
2175 rw_cfg.td3 = regk_strcop_d;
2177 case cryptocop_3des_ede:
2178 rw_cfg.td1 = regk_strcop_e;
2179 rw_cfg.td2 = regk_strcop_d;
2180 rw_cfg.td3 = regk_strcop_e;
2182 case cryptocop_3des_edd:
2183 rw_cfg.td1 = regk_strcop_e;
2184 rw_cfg.td2 = regk_strcop_d;
2185 rw_cfg.td3 = regk_strcop_d;
2187 case cryptocop_3des_dee:
2188 rw_cfg.td1 = regk_strcop_d;
2189 rw_cfg.td2 = regk_strcop_e;
2190 rw_cfg.td3 = regk_strcop_e;
2192 case cryptocop_3des_ded:
2193 rw_cfg.td1 = regk_strcop_d;
2194 rw_cfg.td2 = regk_strcop_e;
2195 rw_cfg.td3 = regk_strcop_d;
2197 case cryptocop_3des_dde:
2198 rw_cfg.td1 = regk_strcop_d;
2199 rw_cfg.td2 = regk_strcop_d;
2200 rw_cfg.td3 = regk_strcop_e;
2202 case cryptocop_3des_ddd:
2203 rw_cfg.td1 = regk_strcop_d;
2204 rw_cfg.td2 = regk_strcop_d;
2205 rw_cfg.td3 = regk_strcop_d;
2208 DEBUG(printk("cryptocop_setup_dma_list: bad 3DES mode\n"));
2210 switch (pj->iop->csum_mode){
2211 case cryptocop_csum_le:
2212 rw_cfg.ipend = regk_strcop_little;
2214 case cryptocop_csum_be:
2215 rw_cfg.ipend = regk_strcop_big;
2218 DEBUG(printk("cryptocop_setup_dma_list: bad checksum mode\n"));
2220 REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
2222 DEBUG(printk("cryptocop_start_job: starting DMA, new cryptocop_running_job=0x%p\n"
2223 "ctx_in: 0x%p, phys: 0x%p\n"
2224 "ctx_out: 0x%p, phys: 0x%p\n",
2226 &pj->iop->ctx_in, (char*)virt_to_phys(&pj->iop->ctx_in),
2227 &pj->iop->ctx_out, (char*)virt_to_phys(&pj->iop->ctx_out)));
2229 /* Start input DMA. */
2230 DMA_START_CONTEXT(regi_dma9, virt_to_phys(&pj->iop->ctx_in));
2232 /* Start output DMA. */
2233 DMA_START_CONTEXT(regi_dma8, virt_to_phys(&pj->iop->ctx_out));
2235 spin_unlock_irqrestore(&running_job_lock, running_job_flags);
2236 DEBUG(printk("cryptocop_start_job: exiting\n"));
2240 static int cryptocop_job_setup(struct cryptocop_prio_job **pj, struct cryptocop_operation *operation)
2243 int alloc_flag = operation->in_interrupt ? GFP_ATOMIC : GFP_KERNEL;
2244 void *iop_alloc_ptr = NULL;
2246 *pj = kmalloc(sizeof (struct cryptocop_prio_job), alloc_flag);
2247 if (!*pj) return -ENOMEM;
2249 DEBUG(printk("cryptocop_job_setup: operation=0x%p\n", operation));
2251 (*pj)->oper = operation;
2252 DEBUG(printk("cryptocop_job_setup, cb=0x%p cb_data=0x%p\n", (*pj)->oper->cb, (*pj)->oper->cb_data));
2254 if (operation->use_dmalists) {
2255 DEBUG(print_user_dma_lists(&operation->list_op));
2256 if (!operation->list_op.inlist || !operation->list_op.outlist || !operation->list_op.out_data_buf || !operation->list_op.in_data_buf){
2257 DEBUG_API(printk("cryptocop_job_setup: bad indata (use_dmalists)\n"));
2261 iop_alloc_ptr = kmalloc(DESCR_ALLOC_PAD + sizeof(struct cryptocop_int_operation), alloc_flag);
2262 if (!iop_alloc_ptr) {
2263 DEBUG_API(printk("cryptocop_job_setup: kmalloc cryptocop_int_operation\n"));
2267 (*pj)->iop = (struct cryptocop_int_operation*)(((unsigned long int)(iop_alloc_ptr + DESCR_ALLOC_PAD + offsetof(struct cryptocop_int_operation, ctx_out)) & ~0x0000001F) - offsetof(struct cryptocop_int_operation, ctx_out));
2268 DEBUG(memset((*pj)->iop, 0xff, sizeof(struct cryptocop_int_operation)));
2269 (*pj)->iop->alloc_ptr = iop_alloc_ptr;
2270 (*pj)->iop->sid = operation->sid;
2271 (*pj)->iop->cdesc_out = NULL;
2272 (*pj)->iop->cdesc_in = NULL;
2273 (*pj)->iop->tdes_mode = operation->list_op.tdes_mode;
2274 (*pj)->iop->csum_mode = operation->list_op.csum_mode;
2275 (*pj)->iop->ddesc_out = operation->list_op.outlist;
2276 (*pj)->iop->ddesc_in = operation->list_op.inlist;
2278 /* Setup DMA contexts. */
2279 (*pj)->iop->ctx_out.next = NULL;
2280 (*pj)->iop->ctx_out.eol = 1;
2281 (*pj)->iop->ctx_out.saved_data = operation->list_op.outlist;
2282 (*pj)->iop->ctx_out.saved_data_buf = operation->list_op.out_data_buf;
2284 (*pj)->iop->ctx_in.next = NULL;
2285 (*pj)->iop->ctx_in.eol = 1;
2286 (*pj)->iop->ctx_in.saved_data = operation->list_op.inlist;
2287 (*pj)->iop->ctx_in.saved_data_buf = operation->list_op.in_data_buf;
2289 if ((err = cryptocop_setup_dma_list(operation, &(*pj)->iop, alloc_flag))) {
2290 DEBUG_API(printk("cryptocop_job_setup: cryptocop_setup_dma_list failed %d\n", err));
2295 DEBUG(print_dma_descriptors((*pj)->iop));
2297 DEBUG(printk("cryptocop_job_setup, DMA list setup successful\n"));
2303 static int cryptocop_open(struct inode *inode, struct file *filp)
2305 int p = MINOR(inode->i_rdev);
2307 if (p != CRYPTOCOP_MINOR) return -EINVAL;
2309 filp->private_data = NULL;
2314 static int cryptocop_release(struct inode *inode, struct file *filp)
2316 struct cryptocop_private *dev = filp->private_data;
2317 struct cryptocop_private *dev_next;
2320 dev_next = dev->next;
2321 if (dev->sid != CRYPTOCOP_SESSION_ID_NONE) {
2322 (void)cryptocop_free_session(dev->sid);
2332 static int cryptocop_ioctl_close_session(struct inode *inode, struct file *filp,
2333 unsigned int cmd, unsigned long arg)
2335 struct cryptocop_private *dev = filp->private_data;
2336 struct cryptocop_private *prev_dev = NULL;
2337 struct strcop_session_op *sess_op = (struct strcop_session_op *)arg;
2338 struct strcop_session_op sop;
2341 DEBUG(printk("cryptocop_ioctl_close_session\n"));
2343 if (!access_ok(VERIFY_READ, sess_op, sizeof(struct strcop_session_op)))
2345 err = copy_from_user(&sop, sess_op, sizeof(struct strcop_session_op));
2346 if (err) return -EFAULT;
2348 while (dev && (dev->sid != sop.ses_id)) {
2354 prev_dev->next = dev->next;
2356 filp->private_data = dev->next;
2358 err = cryptocop_free_session(dev->sid);
2359 if (err) return -EFAULT;
2361 DEBUG_API(printk("cryptocop_ioctl_close_session: session %lld not found\n", sop.ses_id));
2368 static void ioctl_process_job_callback(struct cryptocop_operation *op, void*cb_data)
2370 struct ioctl_job_cb_ctx *jc = (struct ioctl_job_cb_ctx *)cb_data;
2372 DEBUG(printk("ioctl_process_job_callback: op=0x%p, cb_data=0x%p\n", op, cb_data));
2375 wake_up(&cryptocop_ioc_process_wq);
2379 #define CRYPTOCOP_IOCTL_CIPHER_TID (1)
2380 #define CRYPTOCOP_IOCTL_DIGEST_TID (2)
2381 #define CRYPTOCOP_IOCTL_CSUM_TID (3)
2383 static size_t first_cfg_change_ix(struct strcop_crypto_op *crp_op)
2387 if (crp_op->do_cipher) ch_ix = crp_op->cipher_start;
2388 if (crp_op->do_digest && (crp_op->digest_start < ch_ix)) ch_ix = crp_op->digest_start;
2389 if (crp_op->do_csum && (crp_op->csum_start < ch_ix)) ch_ix = crp_op->csum_start;
2391 DEBUG(printk("first_cfg_change_ix: ix=%d\n", ch_ix));
2396 static size_t next_cfg_change_ix(struct strcop_crypto_op *crp_op, size_t ix)
2398 size_t ch_ix = INT_MAX;
2401 if (crp_op->do_cipher && ((crp_op->cipher_start + crp_op->cipher_len) > ix)){
2402 if (crp_op->cipher_start > ix) {
2403 ch_ix = crp_op->cipher_start;
2405 ch_ix = crp_op->cipher_start + crp_op->cipher_len;
2408 if (crp_op->do_digest && ((crp_op->digest_start + crp_op->digest_len) > ix)){
2409 if (crp_op->digest_start > ix) {
2410 tmp_ix = crp_op->digest_start;
2412 tmp_ix = crp_op->digest_start + crp_op->digest_len;
2414 if (tmp_ix < ch_ix) ch_ix = tmp_ix;
2416 if (crp_op->do_csum && ((crp_op->csum_start + crp_op->csum_len) > ix)){
2417 if (crp_op->csum_start > ix) {
2418 tmp_ix = crp_op->csum_start;
2420 tmp_ix = crp_op->csum_start + crp_op->csum_len;
2422 if (tmp_ix < ch_ix) ch_ix = tmp_ix;
2424 if (ch_ix == INT_MAX) ch_ix = ix;
2425 DEBUG(printk("next_cfg_change_ix prev ix=%d, next ix=%d\n", ix, ch_ix));
2430 /* Map map_length bytes from the pages starting on *pageix and *pageoffset to iovecs starting on *iovix.
2431 * Return -1 for ok, 0 for fail. */
2432 static int map_pages_to_iovec(struct iovec *iov, int iovlen, int *iovix, struct page **pages, int nopages, int *pageix, int *pageoffset, int map_length )
2436 assert(iov != NULL);
2437 assert(iovix != NULL);
2438 assert(pages != NULL);
2439 assert(pageix != NULL);
2440 assert(pageoffset != NULL);
2442 DEBUG(printk("map_pages_to_iovec, map_length=%d, iovlen=%d, *iovix=%d, nopages=%d, *pageix=%d, *pageoffset=%d\n", map_length, iovlen, *iovix, nopages, *pageix, *pageoffset));
2444 while (map_length > 0){
2445 DEBUG(printk("map_pages_to_iovec, map_length=%d, iovlen=%d, *iovix=%d, nopages=%d, *pageix=%d, *pageoffset=%d\n", map_length, iovlen, *iovix, nopages, *pageix, *pageoffset));
2446 if (*iovix >= iovlen){
2447 DEBUG_API(printk("map_page_to_iovec: *iovix=%d >= iovlen=%d\n", *iovix, iovlen));
2450 if (*pageix >= nopages){
2451 DEBUG_API(printk("map_page_to_iovec: *pageix=%d >= nopages=%d\n", *pageix, nopages));
2454 iov[*iovix].iov_base = (unsigned char*)page_address(pages[*pageix]) + *pageoffset;
2455 tmplen = PAGE_SIZE - *pageoffset;
2456 if (tmplen < map_length){
2460 tmplen = map_length;
2461 (*pageoffset) += map_length;
2463 DEBUG(printk("mapping %d bytes from page %d (or %d) to iovec %d\n", tmplen, *pageix, *pageix-1, *iovix));
2464 iov[*iovix].iov_len = tmplen;
2465 map_length -= tmplen;
2468 DEBUG(printk("map_page_to_iovec, exit, *iovix=%d\n", *iovix));
2474 static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2477 struct cryptocop_private *dev = filp->private_data;
2478 struct strcop_crypto_op *crp_oper = (struct strcop_crypto_op *)arg;
2479 struct strcop_crypto_op oper = {0};
2481 struct cryptocop_operation *cop = NULL;
2483 struct ioctl_job_cb_ctx *jc = NULL;
2485 struct page **inpages = NULL;
2486 struct page **outpages = NULL;
2490 struct cryptocop_desc descs[5]; /* Max 5 descriptors are needed, there are three transforms that
2491 * can get connected/disconnected on different places in the indata. */
2492 struct cryptocop_desc_cfg dcfgs[5*3];
2495 struct cryptocop_tfrm_cfg ciph_tcfg = {0};
2496 struct cryptocop_tfrm_cfg digest_tcfg = {0};
2497 struct cryptocop_tfrm_cfg csum_tcfg = {0};
2499 unsigned char *digest_result = NULL;
2500 int digest_length = 0;
2502 unsigned char csum_result[CSUM_BLOCK_LENGTH];
2503 struct cryptocop_session *sess;
2513 int cipher_active, digest_active, csum_active;
2514 int end_digest, end_csum;
2515 int digest_done = 0;
2516 int cipher_done = 0;
2519 DEBUG(printk("cryptocop_ioctl_process\n"));
2521 if (!access_ok(VERIFY_WRITE, crp_oper, sizeof(struct strcop_crypto_op))){
2522 DEBUG_API(printk("cryptocop_ioctl_process: !access_ok crp_oper!\n"));
2525 if (copy_from_user(&oper, crp_oper, sizeof(struct strcop_crypto_op))) {
2526 DEBUG_API(printk("cryptocop_ioctl_process: copy_from_user\n"));
2529 DEBUG(print_strcop_crypto_op(&oper));
2531 while (dev && dev->sid != oper.ses_id) dev = dev->next;
2533 DEBUG_API(printk("cryptocop_ioctl_process: session %lld not found\n", oper.ses_id));
2537 /* Check buffers. */
2538 if (((oper.indata + oper.inlen) < oper.indata) || ((oper.cipher_outdata + oper.cipher_outlen) < oper.cipher_outdata)){
2539 DEBUG_API(printk("cryptocop_ioctl_process: user buffers wrapped around, bad user!\n"));
2543 if (!access_ok(VERIFY_WRITE, oper.cipher_outdata, oper.cipher_outlen)){
2544 DEBUG_API(printk("cryptocop_ioctl_process: !access_ok out data!\n"));
2547 if (!access_ok(VERIFY_READ, oper.indata, oper.inlen)){
2548 DEBUG_API(printk("cryptocop_ioctl_process: !access_ok in data!\n"));
2552 cop = kmalloc(sizeof(struct cryptocop_operation), GFP_KERNEL);
2554 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc\n"));
2557 jc = kmalloc(sizeof(struct ioctl_job_cb_ctx), GFP_KERNEL);
2559 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc\n"));
2566 cop->cb = ioctl_process_job_callback;
2567 cop->operation_status = 0;
2568 cop->use_dmalists = 0;
2569 cop->in_interrupt = 0;
2570 cop->fast_callback = 0;
2571 cop->tfrm_op.tfrm_cfg = NULL;
2572 cop->tfrm_op.desc = NULL;
2573 cop->tfrm_op.indata = NULL;
2574 cop->tfrm_op.incount = 0;
2575 cop->tfrm_op.inlen = 0;
2576 cop->tfrm_op.outdata = NULL;
2577 cop->tfrm_op.outcount = 0;
2578 cop->tfrm_op.outlen = 0;
2580 sess = get_session(oper.ses_id);
2582 DEBUG_API(printk("cryptocop_ioctl_process: bad session id.\n"));
2588 if (oper.do_cipher) {
2589 unsigned int cipher_outlen = 0;
2590 struct cryptocop_transform_ctx *tc = get_transform_ctx(sess, CRYPTOCOP_IOCTL_CIPHER_TID);
2592 DEBUG_API(printk("cryptocop_ioctl_process: no cipher transform in session.\n"));
2596 ciph_tcfg.tid = CRYPTOCOP_IOCTL_CIPHER_TID;
2597 ciph_tcfg.inject_ix = 0;
2598 ciph_tcfg.flags = 0;
2599 if ((oper.cipher_start < 0) || (oper.cipher_len <= 0) || (oper.cipher_start > oper.inlen) || ((oper.cipher_start + oper.cipher_len) > oper.inlen)){
2600 DEBUG_API(printk("cryptocop_ioctl_process: bad cipher length\n"));
2605 cblocklen = tc->init.alg == cryptocop_alg_aes ? AES_BLOCK_LENGTH : DES_BLOCK_LENGTH;
2606 if (oper.cipher_len % cblocklen) {
2609 DEBUG_API(printk("cryptocop_ioctl_process: cipher inlength not multiple of block length.\n"));
2612 cipher_outlen = oper.cipher_len;
2613 if (tc->init.cipher_mode == cryptocop_cipher_mode_cbc){
2614 if (oper.cipher_explicit) {
2615 ciph_tcfg.flags |= CRYPTOCOP_EXPLICIT_IV;
2616 memcpy(ciph_tcfg.iv, oper.cipher_iv, cblocklen);
2618 cipher_outlen = oper.cipher_len - cblocklen;
2621 if (oper.cipher_explicit){
2624 DEBUG_API(printk("cryptocop_ioctl_process: explicit_iv when not CBC mode\n"));
2628 if (oper.cipher_outlen != cipher_outlen) {
2631 DEBUG_API(printk("cryptocop_ioctl_process: cipher_outlen incorrect, should be %d not %d.\n", cipher_outlen, oper.cipher_outlen));
2636 ciph_tcfg.flags |= CRYPTOCOP_DECRYPT;
2638 ciph_tcfg.flags |= CRYPTOCOP_ENCRYPT;
2640 ciph_tcfg.next = cop->tfrm_op.tfrm_cfg;
2641 cop->tfrm_op.tfrm_cfg = &ciph_tcfg;
2643 if (oper.do_digest){
2644 struct cryptocop_transform_ctx *tc = get_transform_ctx(sess, CRYPTOCOP_IOCTL_DIGEST_TID);
2646 DEBUG_API(printk("cryptocop_ioctl_process: no digest transform in session.\n"));
2650 digest_length = tc->init.alg == cryptocop_alg_md5 ? 16 : 20;
2651 digest_result = kmalloc(digest_length, GFP_KERNEL);
2652 if (!digest_result) {
2653 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc digest_result\n"));
2657 DEBUG(memset(digest_result, 0xff, digest_length));
2659 digest_tcfg.tid = CRYPTOCOP_IOCTL_DIGEST_TID;
2660 digest_tcfg.inject_ix = 0;
2661 ciph_tcfg.inject_ix += digest_length;
2662 if ((oper.digest_start < 0) || (oper.digest_len <= 0) || (oper.digest_start > oper.inlen) || ((oper.digest_start + oper.digest_len) > oper.inlen)){
2663 DEBUG_API(printk("cryptocop_ioctl_process: bad digest length\n"));
2668 digest_tcfg.next = cop->tfrm_op.tfrm_cfg;
2669 cop->tfrm_op.tfrm_cfg = &digest_tcfg;
2672 csum_tcfg.tid = CRYPTOCOP_IOCTL_CSUM_TID;
2673 csum_tcfg.inject_ix = digest_length;
2674 ciph_tcfg.inject_ix += 2;
2676 if ((oper.csum_start < 0) || (oper.csum_len <= 0) || (oper.csum_start > oper.inlen) || ((oper.csum_start + oper.csum_len) > oper.inlen)){
2677 DEBUG_API(printk("cryptocop_ioctl_process: bad csum length\n"));
2683 csum_tcfg.next = cop->tfrm_op.tfrm_cfg;
2684 cop->tfrm_op.tfrm_cfg = &csum_tcfg;
2687 prev_ix = first_cfg_change_ix(&oper);
2688 if (prev_ix > oper.inlen) {
2689 DEBUG_API(printk("cryptocop_ioctl_process: length mismatch\n"));
2690 nooutpages = noinpages = 0;
2694 DEBUG(printk("cryptocop_ioctl_process: inlen=%d, cipher_outlen=%d\n", oper.inlen, oper.cipher_outlen));
2696 /* Map user pages for in and out data of the operation. */
2697 noinpages = (((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK) + oper.inlen - 1 - prev_ix + ~PAGE_MASK) >> PAGE_SHIFT;
2698 DEBUG(printk("cryptocop_ioctl_process: noinpages=%d\n", noinpages));
2699 inpages = kmalloc(noinpages * sizeof(struct page*), GFP_KERNEL);
2701 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc inpages\n"));
2702 nooutpages = noinpages = 0;
2706 if (oper.do_cipher){
2707 nooutpages = (((unsigned long int)oper.cipher_outdata & ~PAGE_MASK) + oper.cipher_outlen - 1 + ~PAGE_MASK) >> PAGE_SHIFT;
2708 DEBUG(printk("cryptocop_ioctl_process: nooutpages=%d\n", nooutpages));
2709 outpages = kmalloc(nooutpages * sizeof(struct page*), GFP_KERNEL);
2711 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc outpages\n"));
2712 nooutpages = noinpages = 0;
2718 /* Acquire the mm page semaphore. */
2719 down_read(¤t->mm->mmap_sem);
2721 err = get_user_pages(current,
2723 (unsigned long int)(oper.indata + prev_ix),
2725 0, /* read access only for in data */
2731 up_read(¤t->mm->mmap_sem);
2732 nooutpages = noinpages = 0;
2733 DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages indata\n"));
2737 if (oper.do_cipher){
2738 err = get_user_pages(current,
2740 (unsigned long int)oper.cipher_outdata,
2742 1, /* write access for out data */
2746 up_read(¤t->mm->mmap_sem);
2749 DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages outdata\n"));
2754 up_read(¤t->mm->mmap_sem);
2757 /* Add 6 to nooutpages to make room for possibly inserted buffers for storing digest and
2758 * csum output and splits when units are (dis-)connected. */
2759 cop->tfrm_op.indata = kmalloc((noinpages) * sizeof(struct iovec), GFP_KERNEL);
2760 cop->tfrm_op.outdata = kmalloc((6 + nooutpages) * sizeof(struct iovec), GFP_KERNEL);
2761 if (!cop->tfrm_op.indata || !cop->tfrm_op.outdata) {
2762 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc iovecs\n"));
2767 cop->tfrm_op.inlen = oper.inlen - prev_ix;
2768 cop->tfrm_op.outlen = 0;
2769 if (oper.do_cipher) cop->tfrm_op.outlen += oper.cipher_outlen;
2770 if (oper.do_digest) cop->tfrm_op.outlen += digest_length;
2771 if (oper.do_csum) cop->tfrm_op.outlen += 2;
2773 /* Setup the in iovecs. */
2774 cop->tfrm_op.incount = noinpages;
2776 size_t tmplen = cop->tfrm_op.inlen;
2778 cop->tfrm_op.indata[0].iov_len = PAGE_SIZE - ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK);
2779 cop->tfrm_op.indata[0].iov_base = (unsigned char*)page_address(inpages[0]) + ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK);
2780 tmplen -= cop->tfrm_op.indata[0].iov_len;
2781 for (i = 1; i<noinpages; i++){
2782 cop->tfrm_op.indata[i].iov_len = tmplen < PAGE_SIZE ? tmplen : PAGE_SIZE;
2783 cop->tfrm_op.indata[i].iov_base = (unsigned char*)page_address(inpages[i]);
2784 tmplen -= PAGE_SIZE;
2787 cop->tfrm_op.indata[0].iov_len = oper.inlen - prev_ix;
2788 cop->tfrm_op.indata[0].iov_base = (unsigned char*)page_address(inpages[0]) + ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK);
2791 iovlen = nooutpages + 6;
2792 pageoffset = oper.do_cipher ? ((unsigned long int)oper.cipher_outdata & ~PAGE_MASK) : 0;
2794 next_ix = next_cfg_change_ix(&oper, prev_ix);
2795 if (prev_ix == next_ix){
2796 DEBUG_API(printk("cryptocop_ioctl_process: length configuration broken.\n"));
2797 err = -EINVAL; /* This should be impossible barring bugs. */
2800 while (prev_ix != next_ix){
2801 end_digest = end_csum = cipher_active = digest_active = csum_active = 0;
2802 descs[desc_ix].cfg = NULL;
2803 descs[desc_ix].length = next_ix - prev_ix;
2805 if (oper.do_cipher && (oper.cipher_start < next_ix) && (prev_ix < (oper.cipher_start + oper.cipher_len))) {
2806 dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_CIPHER_TID;
2807 dcfgs[dcfg_ix].src = cryptocop_source_dma;
2810 if (next_ix == (oper.cipher_start + oper.cipher_len)){
2812 dcfgs[dcfg_ix].last = 1;
2814 dcfgs[dcfg_ix].last = 0;
2816 dcfgs[dcfg_ix].next = descs[desc_ix].cfg;
2817 descs[desc_ix].cfg = &dcfgs[dcfg_ix];
2820 if (oper.do_digest && (oper.digest_start < next_ix) && (prev_ix < (oper.digest_start + oper.digest_len))) {
2822 dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_DIGEST_TID;
2823 dcfgs[dcfg_ix].src = cryptocop_source_dma;
2824 if (next_ix == (oper.digest_start + oper.digest_len)){
2825 assert(!digest_done);
2827 dcfgs[dcfg_ix].last = 1;
2829 dcfgs[dcfg_ix].last = 0;
2831 dcfgs[dcfg_ix].next = descs[desc_ix].cfg;
2832 descs[desc_ix].cfg = &dcfgs[dcfg_ix];
2835 if (oper.do_csum && (oper.csum_start < next_ix) && (prev_ix < (oper.csum_start + oper.csum_len))){
2837 dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_CSUM_TID;
2838 dcfgs[dcfg_ix].src = cryptocop_source_dma;
2839 if (next_ix == (oper.csum_start + oper.csum_len)){
2841 dcfgs[dcfg_ix].last = 1;
2843 dcfgs[dcfg_ix].last = 0;
2845 dcfgs[dcfg_ix].next = descs[desc_ix].cfg;
2846 descs[desc_ix].cfg = &dcfgs[dcfg_ix];
2849 if (!descs[desc_ix].cfg){
2850 DEBUG_API(printk("cryptocop_ioctl_process: data segment %d (%d to %d) had no active transforms\n", desc_ix, prev_ix, next_ix));
2854 descs[desc_ix].next = &(descs[desc_ix]) + 1;
2857 next_ix = next_cfg_change_ix(&oper, prev_ix);
2860 descs[desc_ix-1].next = NULL;
2862 descs[0].next = NULL;
2864 if (oper.do_digest) {
2865 DEBUG(printk("cryptocop_ioctl_process: mapping %d byte digest output to iovec %d\n", digest_length, iovix));
2866 /* Add outdata iovec, length == <length of type of digest> */
2867 cop->tfrm_op.outdata[iovix].iov_base = digest_result;
2868 cop->tfrm_op.outdata[iovix].iov_len = digest_length;
2872 /* Add outdata iovec, length == 2, the length of csum. */
2873 DEBUG(printk("cryptocop_ioctl_process: mapping 2 byte csum output to iovec %d\n", iovix));
2874 /* Add outdata iovec, length == <length of type of digest> */
2875 cop->tfrm_op.outdata[iovix].iov_base = csum_result;
2876 cop->tfrm_op.outdata[iovix].iov_len = 2;
2879 if (oper.do_cipher) {
2880 if (!map_pages_to_iovec(cop->tfrm_op.outdata, iovlen, &iovix, outpages, nooutpages, &pageix, &pageoffset, oper.cipher_outlen)){
2881 DEBUG_API(printk("cryptocop_ioctl_process: failed to map pages to iovec.\n"));
2882 err = -ENOSYS; /* This should be impossible barring bugs. */
2886 DEBUG(printk("cryptocop_ioctl_process: setting cop->tfrm_op.outcount %d\n", iovix));
2887 cop->tfrm_op.outcount = iovix;
2888 assert(iovix <= (nooutpages + 6));
2890 cop->sid = oper.ses_id;
2891 cop->tfrm_op.desc = &descs[0];
2893 DEBUG(printk("cryptocop_ioctl_process: inserting job, cb_data=0x%p\n", cop->cb_data));
2895 if ((err = cryptocop_job_queue_insert_user_job(cop)) != 0) {
2896 DEBUG_API(printk("cryptocop_ioctl_process: insert job %d\n", err));
2901 DEBUG(printk("cryptocop_ioctl_process: begin wait for result\n"));
2903 wait_event(cryptocop_ioc_process_wq, (jc->processed != 0));
2904 DEBUG(printk("cryptocop_ioctl_process: end wait for result\n"));
2905 if (!jc->processed){
2906 printk(KERN_WARNING "cryptocop_ioctl_process: job not processed at completion\n");
2911 /* Job process done. Cipher output should already be correct in job so no post processing of outdata. */
2912 DEBUG(printk("cryptocop_ioctl_process: operation_status = %d\n", cop->operation_status));
2913 if (cop->operation_status == 0){
2914 if (oper.do_digest){
2915 DEBUG(printk("cryptocop_ioctl_process: copy %d bytes digest to user\n", digest_length));
2916 err = copy_to_user((unsigned char*)crp_oper + offsetof(struct strcop_crypto_op, digest), digest_result, digest_length);
2918 DEBUG_API(printk("cryptocop_ioctl_process: copy_to_user, digest length %d, err %d\n", digest_length, err));
2924 DEBUG(printk("cryptocop_ioctl_process: copy 2 bytes checksum to user\n"));
2925 err = copy_to_user((unsigned char*)crp_oper + offsetof(struct strcop_crypto_op, csum), csum_result, 2);
2927 DEBUG_API(printk("cryptocop_ioctl_process: copy_to_user, csum, err %d\n", err));
2934 DEBUG(printk("cryptocop_ioctl_process: returning err = operation_status = %d\n", cop->operation_status));
2935 err = cop->operation_status;
2939 /* Release page caches. */
2940 for (i = 0; i < noinpages; i++){
2941 put_page(inpages[i]);
2943 for (i = 0; i < nooutpages; i++){
2945 /* Mark output pages dirty. */
2946 spdl_err = set_page_dirty_lock(outpages[i]);
2947 DEBUG(if (spdl_err)printk("cryptocop_ioctl_process: set_page_dirty_lock returned %d\n", spdl_err));
2949 for (i = 0; i < nooutpages; i++){
2950 put_page(outpages[i]);
2953 kfree(digest_result);
2957 kfree(cop->tfrm_op.indata);
2958 kfree(cop->tfrm_op.outdata);
2963 DEBUG(print_lock_status());
2969 static int cryptocop_ioctl_create_session(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2971 cryptocop_session_id sid;
2973 struct cryptocop_private *dev;
2974 struct strcop_session_op *sess_op = (struct strcop_session_op *)arg;
2975 struct strcop_session_op sop;
2976 struct cryptocop_transform_init *tis = NULL;
2977 struct cryptocop_transform_init ti_cipher = {0};
2978 struct cryptocop_transform_init ti_digest = {0};
2979 struct cryptocop_transform_init ti_csum = {0};
2981 if (!access_ok(VERIFY_WRITE, sess_op, sizeof(struct strcop_session_op)))
2983 err = copy_from_user(&sop, sess_op, sizeof(struct strcop_session_op));
2984 if (err) return -EFAULT;
2985 if (sop.cipher != cryptocop_cipher_none) {
2986 if (!access_ok(VERIFY_READ, sop.key, sop.keylen)) return -EFAULT;
2988 DEBUG(printk("cryptocop_ioctl_create_session, sess_op:\n"));
2990 DEBUG(printk("\tcipher:%d\n"
2991 "\tcipher_mode:%d\n"
2999 if (sop.cipher != cryptocop_cipher_none){
3000 /* Init the cipher. */
3001 switch (sop.cipher){
3002 case cryptocop_cipher_des:
3003 ti_cipher.alg = cryptocop_alg_des;
3005 case cryptocop_cipher_3des:
3006 ti_cipher.alg = cryptocop_alg_3des;
3008 case cryptocop_cipher_aes:
3009 ti_cipher.alg = cryptocop_alg_aes;
3012 DEBUG_API(printk("create session, bad cipher algorithm %d\n", sop.cipher));
3015 DEBUG(printk("setting cipher transform %d\n", ti_cipher.alg));
3016 copy_from_user(ti_cipher.key, sop.key, sop.keylen/8);
3017 ti_cipher.keylen = sop.keylen;
3019 case cryptocop_cipher_mode_cbc:
3020 case cryptocop_cipher_mode_ecb:
3021 ti_cipher.cipher_mode = sop.cmode;
3024 DEBUG_API(printk("create session, bad cipher mode %d\n", sop.cmode));
3027 DEBUG(printk("cryptocop_ioctl_create_session: setting CBC mode %d\n", ti_cipher.cipher_mode));
3028 switch (sop.des3_mode){
3029 case cryptocop_3des_eee:
3030 case cryptocop_3des_eed:
3031 case cryptocop_3des_ede:
3032 case cryptocop_3des_edd:
3033 case cryptocop_3des_dee:
3034 case cryptocop_3des_ded:
3035 case cryptocop_3des_dde:
3036 case cryptocop_3des_ddd:
3037 ti_cipher.tdes_mode = sop.des3_mode;
3040 DEBUG_API(printk("create session, bad 3DES mode %d\n", sop.des3_mode));
3043 ti_cipher.tid = CRYPTOCOP_IOCTL_CIPHER_TID;
3044 ti_cipher.next = tis;
3046 } /* if (sop.cipher != cryptocop_cipher_none) */
3047 if (sop.digest != cryptocop_digest_none){
3048 DEBUG(printk("setting digest transform\n"));
3049 switch (sop.digest){
3050 case cryptocop_digest_md5:
3051 ti_digest.alg = cryptocop_alg_md5;
3053 case cryptocop_digest_sha1:
3054 ti_digest.alg = cryptocop_alg_sha1;
3057 DEBUG_API(printk("create session, bad digest algorithm %d\n", sop.digest));
3060 ti_digest.tid = CRYPTOCOP_IOCTL_DIGEST_TID;
3061 ti_digest.next = tis;
3063 } /* if (sop.digest != cryptocop_digest_none) */
3064 if (sop.csum != cryptocop_csum_none){
3065 DEBUG(printk("setting csum transform\n"));
3067 case cryptocop_csum_le:
3068 case cryptocop_csum_be:
3069 ti_csum.csum_mode = sop.csum;
3072 DEBUG_API(printk("create session, bad checksum algorithm %d\n", sop.csum));
3075 ti_csum.alg = cryptocop_alg_csum;
3076 ti_csum.tid = CRYPTOCOP_IOCTL_CSUM_TID;
3079 } /* (sop.csum != cryptocop_csum_none) */
3080 dev = kmalloc(sizeof(struct cryptocop_private), GFP_KERNEL);
3082 DEBUG_API(printk("create session, alloc dev\n"));
3086 err = cryptocop_new_session(&sid, tis, GFP_KERNEL);
3087 DEBUG({ if (err) printk("create session, cryptocop_new_session %d\n", err);});
3093 sess_op->ses_id = sid;
3095 dev->next = filp->private_data;
3096 filp->private_data = dev;
3101 static int cryptocop_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
3104 if (_IOC_TYPE(cmd) != ETRAXCRYPTOCOP_IOCTYPE) {
3105 DEBUG_API(printk("cryptocop_ioctl: wrong type\n"));
3108 if (_IOC_NR(cmd) > CRYPTOCOP_IO_MAXNR){
3111 /* Access check of the argument. Some commands, e.g. create session and process op,
3112 needs additional checks. Those are handled in the command handling functions. */
3113 if (_IOC_DIR(cmd) & _IOC_READ)
3114 err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd));
3115 else if (_IOC_DIR(cmd) & _IOC_WRITE)
3116 err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd));
3117 if (err) return -EFAULT;
3120 case CRYPTOCOP_IO_CREATE_SESSION:
3121 return cryptocop_ioctl_create_session(inode, filp, cmd, arg);
3122 case CRYPTOCOP_IO_CLOSE_SESSION:
3123 return cryptocop_ioctl_close_session(inode, filp, cmd, arg);
3124 case CRYPTOCOP_IO_PROCESS_OP:
3125 return cryptocop_ioctl_process(inode, filp, cmd, arg);
3127 DEBUG_API(printk("cryptocop_ioctl: unknown command\n"));
3135 static void print_dma_descriptors(struct cryptocop_int_operation *iop)
3137 struct cryptocop_dma_desc *cdesc_out = iop->cdesc_out;
3138 struct cryptocop_dma_desc *cdesc_in = iop->cdesc_in;
3141 printk("print_dma_descriptors start\n");
3144 printk("\tsid: 0x%lld\n", iop->sid);
3146 printk("\tcdesc_out: 0x%p\n", iop->cdesc_out);
3147 printk("\tcdesc_in: 0x%p\n", iop->cdesc_in);
3148 printk("\tddesc_out: 0x%p\n", iop->ddesc_out);
3149 printk("\tddesc_in: 0x%p\n", iop->ddesc_in);
3151 printk("\niop->ctx_out: 0x%p phys: 0x%p\n", &iop->ctx_out, (char*)virt_to_phys(&iop->ctx_out));
3152 printk("\tnext: 0x%p\n"
3153 "\tsaved_data: 0x%p\n"
3154 "\tsaved_data_buf: 0x%p\n",
3156 iop->ctx_out.saved_data,
3157 iop->ctx_out.saved_data_buf);
3159 printk("\niop->ctx_in: 0x%p phys: 0x%p\n", &iop->ctx_in, (char*)virt_to_phys(&iop->ctx_in));
3160 printk("\tnext: 0x%p\n"
3161 "\tsaved_data: 0x%p\n"
3162 "\tsaved_data_buf: 0x%p\n",
3164 iop->ctx_in.saved_data,
3165 iop->ctx_in.saved_data_buf);
3170 printk("cdesc_out %d, desc=0x%p\n", i, cdesc_out->dma_descr);
3171 printk("\n\tvirt_to_phys(desc): 0x%p\n", (char*)virt_to_phys(cdesc_out->dma_descr));
3172 td = cdesc_out->dma_descr;
3173 printk("\n\tbuf: 0x%p\n"
3192 cdesc_out = cdesc_out->next;
3198 printk("cdesc_in %d, desc=0x%p\n", i, cdesc_in->dma_descr);
3199 printk("\n\tvirt_to_phys(desc): 0x%p\n", (char*)virt_to_phys(cdesc_in->dma_descr));
3200 td = cdesc_in->dma_descr;
3201 printk("\n\tbuf: 0x%p\n"
3220 cdesc_in = cdesc_in->next;
3224 printk("print_dma_descriptors end\n");
3228 static void print_strcop_crypto_op(struct strcop_crypto_op *cop)
3230 printk("print_strcop_crypto_op, 0x%p\n", cop);
3233 printk("indata=0x%p\n"
3237 "cipher_explicit=%d\n"
3246 cop->cipher_explicit,
3249 cop->cipher_outdata,
3250 cop->cipher_outlen);
3252 printk("do_digest=%d\n"
3259 printk("do_csum=%d\n"
3267 static void print_cryptocop_operation(struct cryptocop_operation *cop)
3269 struct cryptocop_desc *d;
3270 struct cryptocop_tfrm_cfg *tc;
3271 struct cryptocop_desc_cfg *dc;
3274 printk("print_cryptocop_operation, cop=0x%p\n\n", cop);
3275 printk("sid: %lld\n", cop->sid);
3276 printk("operation_status=%d\n"
3279 "fast_callback=%d\n",
3280 cop->operation_status,
3283 cop->fast_callback);
3285 if (cop->use_dmalists){
3286 print_user_dma_lists(&cop->list_op);
3288 printk("cop->tfrm_op\n"
3297 cop->tfrm_op.tfrm_cfg,
3299 cop->tfrm_op.indata,
3300 cop->tfrm_op.incount,
3302 cop->tfrm_op.outdata,
3303 cop->tfrm_op.outcount,
3304 cop->tfrm_op.outlen);
3306 tc = cop->tfrm_op.tfrm_cfg;
3308 printk("tfrm_cfg, 0x%p\n"
3320 d = cop->tfrm_op.desc;
3322 printk("\n======================desc, 0x%p\n"
3332 printk("=========desc_cfg, 0x%p\n"
3346 printk("\n====iniov\n");
3347 for (i = 0; i < cop->tfrm_op.incount; i++){
3348 printk("indata[%d]\n"
3352 cop->tfrm_op.indata[i].iov_base,
3353 cop->tfrm_op.indata[i].iov_len);
3355 printk("\n====outiov\n");
3356 for (i = 0; i < cop->tfrm_op.outcount; i++){
3357 printk("outdata[%d]\n"
3361 cop->tfrm_op.outdata[i].iov_base,
3362 cop->tfrm_op.outdata[i].iov_len);
3365 printk("------------end print_cryptocop_operation\n");
3369 static void print_user_dma_lists(struct cryptocop_dma_list_operation *dma_op)
3374 printk("print_user_dma_lists, dma_op=0x%p\n", dma_op);
3376 printk("out_data_buf = 0x%p, phys_to_virt(out_data_buf) = 0x%p\n", dma_op->out_data_buf, phys_to_virt((unsigned long int)dma_op->out_data_buf));
3377 printk("in_data_buf = 0x%p, phys_to_virt(in_data_buf) = 0x%p\n", dma_op->in_data_buf, phys_to_virt((unsigned long int)dma_op->in_data_buf));
3379 printk("##############outlist\n");
3380 dd = phys_to_virt((unsigned long int)dma_op->outlist);
3382 while (dd != NULL) {
3383 printk("#%d phys_to_virt(desc) 0x%p\n", i, dd);
3384 printk("\n\tbuf: 0x%p\n"
3406 dd = phys_to_virt((unsigned long int)dd->next);
3410 printk("##############inlist\n");
3411 dd = phys_to_virt((unsigned long int)dma_op->inlist);
3413 while (dd != NULL) {
3414 printk("#%d phys_to_virt(desc) 0x%p\n", i, dd);
3415 printk("\n\tbuf: 0x%p\n"
3437 dd = phys_to_virt((unsigned long int)dd->next);
3443 static void print_lock_status(void)
3445 printk("**********************print_lock_status\n");
3446 printk("cryptocop_completed_jobs_lock %d\n", spin_is_locked(&cryptocop_completed_jobs_lock));
3447 printk("cryptocop_job_queue_lock %d\n", spin_is_locked(&cryptocop_job_queue_lock));
3448 printk("descr_pool_lock %d\n", spin_is_locked(&descr_pool_lock));
3449 printk("cryptocop_sessions_lock %d\n", spin_is_locked(cryptocop_sessions_lock));
3450 printk("running_job_lock %d\n", spin_is_locked(running_job_lock));
3451 printk("cryptocop_process_lock %d\n", spin_is_locked(cryptocop_process_lock));
3456 static const char cryptocop_name[] = "ETRAX FS stream co-processor";
3458 static int init_stream_coprocessor(void)
3462 static int initialized = 0;
3469 printk("ETRAX FS stream co-processor driver v0.01, (c) 2003 Axis Communications AB\n");
3471 err = register_chrdev(CRYPTOCOP_MAJOR, cryptocop_name, &cryptocop_fops);
3473 printk(KERN_ERR "stream co-processor: could not get major number.\n");
3477 err = init_cryptocop();
3479 (void)unregister_chrdev(CRYPTOCOP_MAJOR, cryptocop_name);
3482 err = cryptocop_job_queue_init();
3484 release_cryptocop();
3485 (void)unregister_chrdev(CRYPTOCOP_MAJOR, cryptocop_name);
3488 /* Init the descriptor pool. */
3489 for (i = 0; i < CRYPTOCOP_DESCRIPTOR_POOL_SIZE - 1; i++) {
3490 descr_pool[i].from_pool = 1;
3491 descr_pool[i].next = &descr_pool[i + 1];
3493 descr_pool[i].from_pool = 1;
3494 descr_pool[i].next = NULL;
3495 descr_pool_free_list = &descr_pool[0];
3496 descr_pool_no_free = CRYPTOCOP_DESCRIPTOR_POOL_SIZE;
3498 spin_lock_init(&cryptocop_completed_jobs_lock);
3499 spin_lock_init(&cryptocop_job_queue_lock);
3500 spin_lock_init(&descr_pool_lock);
3501 spin_lock_init(&cryptocop_sessions_lock);
3502 spin_lock_init(&running_job_lock);
3503 spin_lock_init(&cryptocop_process_lock);
3505 cryptocop_sessions = NULL;
3508 cryptocop_running_job = NULL;
3510 printk("stream co-processor: init done.\n");
3514 static void __exit exit_stream_coprocessor(void)
3516 release_cryptocop();
3517 cryptocop_job_queue_close();
3520 module_init(init_stream_coprocessor);
3521 module_exit(exit_stream_coprocessor);