2 * Stream co-processor driver for the ETRAX FS
4 * Copyright (C) 2003-2007 Axis Communications AB
7 #include <linux/init.h>
8 #include <linux/sched.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/string.h>
14 #include <linux/smp_lock.h>
15 #include <linux/spinlock.h>
16 #include <linux/stddef.h>
18 #include <asm/uaccess.h>
20 #include <asm/atomic.h>
22 #include <linux/list.h>
23 #include <linux/interrupt.h>
25 #include <asm/signal.h>
29 #include <hwregs/dma.h>
30 #include <hwregs/reg_map.h>
31 #include <hwregs/reg_rdwr.h>
32 #include <hwregs/intr_vect_defs.h>
34 #include <hwregs/strcop.h>
35 #include <hwregs/strcop_defs.h>
36 #include <cryptocop.h>
41 #define IN_DMA_INST regi_dma9
42 #define OUT_DMA_INST regi_dma8
43 #define DMA_IRQ DMA9_INTR_VECT
47 #define IN_DMA_INST regi_dma3
48 #define OUT_DMA_INST regi_dma2
49 #define DMA_IRQ DMA3_INTR_VECT
52 #define DESCR_ALLOC_PAD (31)
54 struct cryptocop_dma_desc {
55 char *free_buf; /* If non-null will be kfreed in free_cdesc() */
56 dma_descr_data *dma_descr;
58 unsigned char dma_descr_buf[sizeof(dma_descr_data) + DESCR_ALLOC_PAD];
60 unsigned int from_pool:1; /* If 1 'allocated' from the descriptor pool. */
61 struct cryptocop_dma_desc *next;
65 struct cryptocop_int_operation{
67 cryptocop_session_id sid;
69 dma_descr_context ctx_out;
70 dma_descr_context ctx_in;
72 /* DMA descriptors allocated by driver. */
73 struct cryptocop_dma_desc *cdesc_out;
74 struct cryptocop_dma_desc *cdesc_in;
76 /* Strcop config to use. */
77 cryptocop_3des_mode tdes_mode;
78 cryptocop_csum_type csum_mode;
80 /* DMA descrs provided by consumer. */
81 dma_descr_data *ddesc_out;
82 dma_descr_data *ddesc_in;
86 struct cryptocop_tfrm_ctx {
87 cryptocop_tfrm_id tid;
88 unsigned int blocklength;
90 unsigned int start_ix;
92 struct cryptocop_tfrm_cfg *tcfg;
93 struct cryptocop_transform_ctx *tctx;
95 unsigned char previous_src;
96 unsigned char current_src;
98 /* Values to use in metadata out. */
99 unsigned char hash_conf;
100 unsigned char hash_mode;
101 unsigned char ciph_conf;
102 unsigned char cbcmode;
103 unsigned char decrypt;
105 unsigned int requires_padding:1;
106 unsigned int strict_block_length:1;
107 unsigned int active:1;
112 /* Pad (input) descriptors to put in the DMA out list when the transform
113 * output is put on the DMA in list. */
114 struct cryptocop_dma_desc *pad_descs;
116 struct cryptocop_tfrm_ctx *prev_src;
117 struct cryptocop_tfrm_ctx *curr_src;
120 unsigned char unit_no;
124 struct cryptocop_private{
125 cryptocop_session_id sid;
126 struct cryptocop_private *next;
131 struct cryptocop_transform_ctx{
132 struct cryptocop_transform_init init;
133 unsigned char dec_key[CRYPTOCOP_MAX_KEY_LENGTH];
134 unsigned int dec_key_set:1;
136 struct cryptocop_transform_ctx *next;
140 struct cryptocop_session{
141 cryptocop_session_id sid;
143 struct cryptocop_transform_ctx *tfrm_ctx;
145 struct cryptocop_session *next;
148 /* Priority levels for jobs sent to the cryptocop. Checksum operations from
149 kernel have highest priority since TCPIP stack processing must not
152 cryptocop_prio_kernel_csum = 0,
153 cryptocop_prio_kernel = 1,
154 cryptocop_prio_user = 2,
155 cryptocop_prio_no_prios = 3
156 } cryptocop_queue_priority;
158 struct cryptocop_prio_queue{
159 struct list_head jobs;
160 cryptocop_queue_priority prio;
163 struct cryptocop_prio_job{
164 struct list_head node;
165 cryptocop_queue_priority prio;
167 struct cryptocop_operation *oper;
168 struct cryptocop_int_operation *iop;
171 struct ioctl_job_cb_ctx {
172 unsigned int processed:1;
176 static struct cryptocop_session *cryptocop_sessions = NULL;
177 spinlock_t cryptocop_sessions_lock;
179 /* Next Session ID to assign. */
180 static cryptocop_session_id next_sid = 1;
182 /* Pad for checksum. */
183 static const char csum_zero_pad[1] = {0x00};
185 /* Trash buffer for mem2mem operations. */
186 #define MEM2MEM_DISCARD_BUF_LENGTH (512)
187 static unsigned char mem2mem_discard_buf[MEM2MEM_DISCARD_BUF_LENGTH];
189 /* Descriptor pool. */
190 /* FIXME Tweak this value. */
191 #define CRYPTOCOP_DESCRIPTOR_POOL_SIZE (100)
192 static struct cryptocop_dma_desc descr_pool[CRYPTOCOP_DESCRIPTOR_POOL_SIZE];
193 static struct cryptocop_dma_desc *descr_pool_free_list;
194 static int descr_pool_no_free;
195 static spinlock_t descr_pool_lock;
197 /* Lock to stop cryptocop to start processing of a new operation. The holder
198 of this lock MUST call cryptocop_start_job() after it is unlocked. */
199 spinlock_t cryptocop_process_lock;
201 static struct cryptocop_prio_queue cryptocop_job_queues[cryptocop_prio_no_prios];
202 static spinlock_t cryptocop_job_queue_lock;
203 static struct cryptocop_prio_job *cryptocop_running_job = NULL;
204 static spinlock_t running_job_lock;
206 /* The interrupt handler appends completed jobs to this list. The scehduled
207 * tasklet removes them upon sending the response to the crypto consumer. */
208 static struct list_head cryptocop_completed_jobs;
209 static spinlock_t cryptocop_completed_jobs_lock;
211 DECLARE_WAIT_QUEUE_HEAD(cryptocop_ioc_process_wq);
214 /** Local functions. **/
216 static int cryptocop_open(struct inode *, struct file *);
218 static int cryptocop_release(struct inode *, struct file *);
220 static int cryptocop_ioctl(struct inode *inode, struct file *file,
221 unsigned int cmd, unsigned long arg);
223 static void cryptocop_start_job(void);
225 static int cryptocop_job_queue_insert(cryptocop_queue_priority prio, struct cryptocop_operation *operation);
226 static int cryptocop_job_setup(struct cryptocop_prio_job **pj, struct cryptocop_operation *operation);
228 static int cryptocop_job_queue_init(void);
229 static void cryptocop_job_queue_close(void);
231 static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length);
233 static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length);
235 static int transform_ok(struct cryptocop_transform_init *tinit);
237 static struct cryptocop_session *get_session(cryptocop_session_id sid);
239 static struct cryptocop_transform_ctx *get_transform_ctx(struct cryptocop_session *sess, cryptocop_tfrm_id tid);
241 static void delete_internal_operation(struct cryptocop_int_operation *iop);
243 static void get_aes_decrypt_key(unsigned char *dec_key, const unsigned char *key, unsigned int keylength);
245 static int init_stream_coprocessor(void);
247 static void __exit exit_stream_coprocessor(void);
252 #define DEBUG_API(s) s
253 static void print_cryptocop_operation(struct cryptocop_operation *cop);
254 static void print_dma_descriptors(struct cryptocop_int_operation *iop);
255 static void print_strcop_crypto_op(struct strcop_crypto_op *cop);
256 static void print_lock_status(void);
257 static void print_user_dma_lists(struct cryptocop_dma_list_operation *dma_op);
258 #define assert(s) do{if (!(s)) panic(#s);} while(0);
266 /* Transform constants. */
267 #define DES_BLOCK_LENGTH (8)
268 #define AES_BLOCK_LENGTH (16)
269 #define MD5_BLOCK_LENGTH (64)
270 #define SHA1_BLOCK_LENGTH (64)
271 #define CSUM_BLOCK_LENGTH (2)
272 #define MD5_STATE_LENGTH (16)
273 #define SHA1_STATE_LENGTH (20)
275 /* The device number. */
276 #define CRYPTOCOP_MAJOR (254)
277 #define CRYPTOCOP_MINOR (0)
281 const struct file_operations cryptocop_fops = {
282 .owner = THIS_MODULE,
283 .open = cryptocop_open,
284 .release = cryptocop_release,
285 .ioctl = cryptocop_ioctl
289 static void free_cdesc(struct cryptocop_dma_desc *cdesc)
291 DEBUG(printk("free_cdesc: cdesc 0x%p, from_pool=%d\n", cdesc, cdesc->from_pool));
292 kfree(cdesc->free_buf);
294 if (cdesc->from_pool) {
295 unsigned long int flags;
296 spin_lock_irqsave(&descr_pool_lock, flags);
297 cdesc->next = descr_pool_free_list;
298 descr_pool_free_list = cdesc;
299 ++descr_pool_no_free;
300 spin_unlock_irqrestore(&descr_pool_lock, flags);
307 static struct cryptocop_dma_desc *alloc_cdesc(int alloc_flag)
309 int use_pool = (alloc_flag & GFP_ATOMIC) ? 1 : 0;
310 struct cryptocop_dma_desc *cdesc;
313 unsigned long int flags;
314 spin_lock_irqsave(&descr_pool_lock, flags);
315 if (!descr_pool_free_list) {
316 spin_unlock_irqrestore(&descr_pool_lock, flags);
317 DEBUG_API(printk("alloc_cdesc: pool is empty\n"));
320 cdesc = descr_pool_free_list;
321 descr_pool_free_list = descr_pool_free_list->next;
322 --descr_pool_no_free;
323 spin_unlock_irqrestore(&descr_pool_lock, flags);
324 cdesc->from_pool = 1;
326 cdesc = kmalloc(sizeof(struct cryptocop_dma_desc), alloc_flag);
328 DEBUG_API(printk("alloc_cdesc: kmalloc\n"));
331 cdesc->from_pool = 0;
333 cdesc->dma_descr = (dma_descr_data*)(((unsigned long int)cdesc + offsetof(struct cryptocop_dma_desc, dma_descr_buf) + DESCR_ALLOC_PAD) & ~0x0000001F);
337 cdesc->free_buf = NULL;
338 cdesc->dma_descr->out_eop = 0;
339 cdesc->dma_descr->in_eop = 0;
340 cdesc->dma_descr->intr = 0;
341 cdesc->dma_descr->eol = 0;
342 cdesc->dma_descr->wait = 0;
343 cdesc->dma_descr->buf = NULL;
344 cdesc->dma_descr->after = NULL;
346 DEBUG_API(printk("alloc_cdesc: return 0x%p, cdesc->dma_descr=0x%p, from_pool=%d\n", cdesc, cdesc->dma_descr, cdesc->from_pool));
351 static void setup_descr_chain(struct cryptocop_dma_desc *cd)
353 DEBUG(printk("setup_descr_chain: entering\n"));
356 cd->dma_descr->next = (dma_descr_data*)virt_to_phys(cd->next->dma_descr);
358 cd->dma_descr->next = NULL;
362 DEBUG(printk("setup_descr_chain: exit\n"));
366 /* Create a pad descriptor for the transform.
367 * Return -1 for error, 0 if pad created. */
368 static int create_pad_descriptor(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **pad_desc, int alloc_flag)
370 struct cryptocop_dma_desc *cdesc = NULL;
372 struct strcop_meta_out mo = {
380 DEBUG(printk("create_pad_descriptor: start.\n"));
381 /* Setup pad descriptor. */
383 DEBUG(printk("create_pad_descriptor: setting up padding.\n"));
384 cdesc = alloc_cdesc(alloc_flag);
386 DEBUG_API(printk("create_pad_descriptor: alloc pad desc\n"));
389 switch (tc->unit_no) {
391 error = create_md5_pad(alloc_flag, tc->consumed, &pad, &plen);
393 DEBUG_API(printk("create_pad_descriptor: create_md5_pad_failed\n"));
396 cdesc->free_buf = pad;
397 mo.hashsel = src_dma;
398 mo.hashconf = tc->hash_conf;
399 mo.hashmode = tc->hash_mode;
402 error = create_sha1_pad(alloc_flag, tc->consumed, &pad, &plen);
404 DEBUG_API(printk("create_pad_descriptor: create_sha1_pad_failed\n"));
407 cdesc->free_buf = pad;
408 mo.hashsel = src_dma;
409 mo.hashconf = tc->hash_conf;
410 mo.hashmode = tc->hash_mode;
413 if (tc->consumed % tc->blocklength){
414 pad = (char*)csum_zero_pad;
417 pad = (char*)cdesc; /* Use any pointer. */
420 mo.csumsel = src_dma;
423 cdesc->dma_descr->wait = 1;
424 cdesc->dma_descr->out_eop = 1; /* Since this is a pad output is pushed. EOP is ok here since the padded unit is the only one active. */
425 cdesc->dma_descr->buf = (char*)virt_to_phys((char*)pad);
426 cdesc->dma_descr->after = cdesc->dma_descr->buf + plen;
428 cdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo);
434 if (cdesc) free_cdesc(cdesc);
439 static int setup_key_dl_desc(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **kd, int alloc_flag)
441 struct cryptocop_dma_desc *key_desc = alloc_cdesc(alloc_flag);
442 struct strcop_meta_out mo = {0};
444 DEBUG(printk("setup_key_dl_desc\n"));
447 DEBUG_API(printk("setup_key_dl_desc: failed descriptor allocation.\n"));
452 if ((tc->tctx->init.alg == cryptocop_alg_aes) && (tc->tcfg->flags & CRYPTOCOP_DECRYPT)) {
453 /* Precook the AES decrypt key. */
454 if (!tc->tctx->dec_key_set){
455 get_aes_decrypt_key(tc->tctx->dec_key, tc->tctx->init.key, tc->tctx->init.keylen);
456 tc->tctx->dec_key_set = 1;
458 key_desc->dma_descr->buf = (char*)virt_to_phys(tc->tctx->dec_key);
459 key_desc->dma_descr->after = key_desc->dma_descr->buf + tc->tctx->init.keylen/8;
461 key_desc->dma_descr->buf = (char*)virt_to_phys(tc->tctx->init.key);
462 key_desc->dma_descr->after = key_desc->dma_descr->buf + tc->tctx->init.keylen/8;
464 /* Setup metadata. */
466 switch (tc->tctx->init.keylen) {
486 mo.ciphsel = mo.hashsel = mo.csumsel = src_none;
487 key_desc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo);
489 key_desc->dma_descr->out_eop = 1;
490 key_desc->dma_descr->wait = 1;
491 key_desc->dma_descr->intr = 0;
497 static int setup_cipher_iv_desc(struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **id, int alloc_flag)
499 struct cryptocop_dma_desc *iv_desc = alloc_cdesc(alloc_flag);
500 struct strcop_meta_out mo = {0};
502 DEBUG(printk("setup_cipher_iv_desc\n"));
505 DEBUG_API(printk("setup_cipher_iv_desc: failed CBC IV descriptor allocation.\n"));
509 iv_desc->dma_descr->buf = (char*)virt_to_phys(tc->tcfg->iv);
510 iv_desc->dma_descr->after = iv_desc->dma_descr->buf + tc->blocklength;
512 /* Setup metadata. */
513 mo.hashsel = mo.csumsel = src_none;
514 mo.ciphsel = src_dma;
515 mo.ciphconf = tc->ciph_conf;
516 mo.cbcmode = tc->cbcmode;
518 iv_desc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, mo);
520 iv_desc->dma_descr->out_eop = 0;
521 iv_desc->dma_descr->wait = 1;
522 iv_desc->dma_descr->intr = 0;
528 /* Map the ouput length of the transform to operation output starting on the inject index. */
529 static int create_input_descriptors(struct cryptocop_operation *operation, struct cryptocop_tfrm_ctx *tc, struct cryptocop_dma_desc **id, int alloc_flag)
532 struct cryptocop_dma_desc head = {0};
533 struct cryptocop_dma_desc *outdesc = &head;
534 size_t iov_offset = 0;
537 struct strcop_meta_in mi = {0};
539 size_t out_length = tc->produced;
543 assert(out_length != 0);
544 if (((tc->produced + tc->tcfg->inject_ix) > operation->tfrm_op.outlen) || (tc->produced && (operation->tfrm_op.outlen == 0))) {
545 DEBUG_API(printk("create_input_descriptors: operation outdata too small\n"));
548 /* Traverse the out iovec until the result inject index is reached. */
549 while ((outiov_ix < operation->tfrm_op.outcount) && ((out_ix + operation->tfrm_op.outdata[outiov_ix].iov_len) <= tc->tcfg->inject_ix)){
550 out_ix += operation->tfrm_op.outdata[outiov_ix].iov_len;
553 if (outiov_ix >= operation->tfrm_op.outcount){
554 DEBUG_API(printk("create_input_descriptors: operation outdata too small\n"));
557 iov_offset = tc->tcfg->inject_ix - out_ix;
558 mi.dmasel = tc->unit_no;
560 /* Setup the output descriptors. */
561 while ((out_length > 0) && (outiov_ix < operation->tfrm_op.outcount)) {
562 outdesc->next = alloc_cdesc(alloc_flag);
563 if (!outdesc->next) {
564 DEBUG_API(printk("create_input_descriptors: alloc_cdesc\n"));
568 outdesc = outdesc->next;
569 rem_length = operation->tfrm_op.outdata[outiov_ix].iov_len - iov_offset;
570 dlength = (out_length < rem_length) ? out_length : rem_length;
572 DEBUG(printk("create_input_descriptors:\n"
573 "outiov_ix=%d, rem_length=%d, dlength=%d\n"
574 "iov_offset=%d, outdata[outiov_ix].iov_len=%d\n"
575 "outcount=%d, outiov_ix=%d\n",
576 outiov_ix, rem_length, dlength, iov_offset, operation->tfrm_op.outdata[outiov_ix].iov_len, operation->tfrm_op.outcount, outiov_ix));
578 outdesc->dma_descr->buf = (char*)virt_to_phys(operation->tfrm_op.outdata[outiov_ix].iov_base + iov_offset);
579 outdesc->dma_descr->after = outdesc->dma_descr->buf + dlength;
580 outdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
582 out_length -= dlength;
583 iov_offset += dlength;
584 if (iov_offset >= operation->tfrm_op.outdata[outiov_ix].iov_len) {
590 DEBUG_API(printk("create_input_descriptors: not enough room for output, %d remained\n", out_length));
594 /* Set sync in last descriptor. */
596 outdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
603 outdesc = head.next->next;
604 free_cdesc(head.next);
611 static int create_output_descriptors(struct cryptocop_operation *operation, int *iniov_ix, int *iniov_offset, size_t desc_len, struct cryptocop_dma_desc **current_out_cdesc, struct strcop_meta_out *meta_out, int alloc_flag)
613 while (desc_len != 0) {
614 struct cryptocop_dma_desc *cdesc;
615 int rem_length = operation->tfrm_op.indata[*iniov_ix].iov_len - *iniov_offset;
616 int dlength = (desc_len < rem_length) ? desc_len : rem_length;
618 cdesc = alloc_cdesc(alloc_flag);
620 DEBUG_API(printk("create_output_descriptors: alloc_cdesc\n"));
623 (*current_out_cdesc)->next = cdesc;
624 (*current_out_cdesc) = cdesc;
626 cdesc->free_buf = NULL;
628 cdesc->dma_descr->buf = (char*)virt_to_phys(operation->tfrm_op.indata[*iniov_ix].iov_base + *iniov_offset);
629 cdesc->dma_descr->after = cdesc->dma_descr->buf + dlength;
632 *iniov_offset += dlength;
633 assert(desc_len >= 0);
634 if (*iniov_offset >= operation->tfrm_op.indata[*iniov_ix].iov_len) {
637 if (*iniov_ix > operation->tfrm_op.incount) {
638 DEBUG_API(printk("create_output_descriptors: not enough indata in operation."));
642 cdesc->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, (*meta_out));
643 } /* while (desc_len != 0) */
644 /* Last DMA descriptor gets a 'wait' bit to signal expected change in metadata. */
645 (*current_out_cdesc)->dma_descr->wait = 1; /* This will set extraneous WAIT in some situations, e.g. when padding hashes and checksums. */
651 static int append_input_descriptors(struct cryptocop_operation *operation, struct cryptocop_dma_desc **current_in_cdesc, struct cryptocop_dma_desc **current_out_cdesc, struct cryptocop_tfrm_ctx *tc, int alloc_flag)
653 DEBUG(printk("append_input_descriptors, tc=0x%p, unit_no=%d\n", tc, tc->unit_no));
656 struct cryptocop_dma_desc *idescs = NULL;
657 DEBUG(printk("append_input_descriptors: pushing output, consumed %d produced %d bytes.\n", tc->consumed, tc->produced));
659 DEBUG(printk("append_input_descriptors: append pad descriptors to DMA out list.\n"));
660 while (tc->pad_descs) {
661 DEBUG(printk("append descriptor 0x%p\n", tc->pad_descs));
662 (*current_out_cdesc)->next = tc->pad_descs;
663 tc->pad_descs = tc->pad_descs->next;
664 (*current_out_cdesc) = (*current_out_cdesc)->next;
668 /* Setup and append output descriptors to DMA in list. */
669 if (tc->unit_no == src_dma){
670 /* mem2mem. Setup DMA in descriptors to discard all input prior to the requested mem2mem data. */
671 struct strcop_meta_in mi = {.sync = 0, .dmasel = src_dma};
672 unsigned int start_ix = tc->start_ix;
674 unsigned int desclen = start_ix < MEM2MEM_DISCARD_BUF_LENGTH ? start_ix : MEM2MEM_DISCARD_BUF_LENGTH;
675 (*current_in_cdesc)->next = alloc_cdesc(alloc_flag);
676 if (!(*current_in_cdesc)->next){
677 DEBUG_API(printk("append_input_descriptors: alloc_cdesc mem2mem discard failed\n"));
680 (*current_in_cdesc) = (*current_in_cdesc)->next;
681 (*current_in_cdesc)->dma_descr->buf = (char*)virt_to_phys(mem2mem_discard_buf);
682 (*current_in_cdesc)->dma_descr->after = (*current_in_cdesc)->dma_descr->buf + desclen;
683 (*current_in_cdesc)->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
687 (*current_in_cdesc)->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_in, mi);
690 failed = create_input_descriptors(operation, tc, &idescs, alloc_flag);
692 DEBUG_API(printk("append_input_descriptors: output descriptor setup failed\n"));
695 DEBUG(printk("append_input_descriptors: append output descriptors to DMA in list.\n"));
697 DEBUG(printk("append descriptor 0x%p\n", idescs));
698 (*current_in_cdesc)->next = idescs;
699 idescs = idescs->next;
700 (*current_in_cdesc) = (*current_in_cdesc)->next;
708 static int cryptocop_setup_dma_list(struct cryptocop_operation *operation, struct cryptocop_int_operation **int_op, int alloc_flag)
710 struct cryptocop_session *sess;
711 struct cryptocop_transform_ctx *tctx;
713 struct cryptocop_tfrm_ctx digest_ctx = {
714 .previous_src = src_none,
715 .current_src = src_none,
717 .requires_padding = 1,
718 .strict_block_length = 0,
732 struct cryptocop_tfrm_ctx cipher_ctx = {
733 .previous_src = src_none,
734 .current_src = src_none,
736 .requires_padding = 0,
737 .strict_block_length = 1,
751 struct cryptocop_tfrm_ctx csum_ctx = {
752 .previous_src = src_none,
753 .current_src = src_none,
756 .requires_padding = 1,
757 .strict_block_length = 0,
771 .unit_no = src_csum};
772 struct cryptocop_tfrm_cfg *tcfg = operation->tfrm_op.tfrm_cfg;
774 unsigned int indata_ix = 0;
776 /* iovec accounting. */
778 int iniov_offset = 0;
780 /* Operation descriptor cfg traversal pointer. */
781 struct cryptocop_desc *odsc;
784 /* List heads for allocated descriptors. */
785 struct cryptocop_dma_desc out_cdesc_head = {0};
786 struct cryptocop_dma_desc in_cdesc_head = {0};
788 struct cryptocop_dma_desc *current_out_cdesc = &out_cdesc_head;
789 struct cryptocop_dma_desc *current_in_cdesc = &in_cdesc_head;
791 struct cryptocop_tfrm_ctx *output_tc = NULL;
794 assert(operation != NULL);
795 assert(int_op != NULL);
797 DEBUG(printk("cryptocop_setup_dma_list: start\n"));
798 DEBUG(print_cryptocop_operation(operation));
800 sess = get_session(operation->sid);
802 DEBUG_API(printk("cryptocop_setup_dma_list: no session found for operation.\n"));
806 iop_alloc_ptr = kmalloc(DESCR_ALLOC_PAD + sizeof(struct cryptocop_int_operation), alloc_flag);
807 if (!iop_alloc_ptr) {
808 DEBUG_API(printk("cryptocop_setup_dma_list: kmalloc cryptocop_int_operation\n"));
812 (*int_op) = (struct cryptocop_int_operation*)(((unsigned long int)(iop_alloc_ptr + DESCR_ALLOC_PAD + offsetof(struct cryptocop_int_operation, ctx_out)) & ~0x0000001F) - offsetof(struct cryptocop_int_operation, ctx_out));
813 DEBUG(memset((*int_op), 0xff, sizeof(struct cryptocop_int_operation)));
814 (*int_op)->alloc_ptr = iop_alloc_ptr;
815 DEBUG(printk("cryptocop_setup_dma_list: *int_op=0x%p, alloc_ptr=0x%p\n", *int_op, (*int_op)->alloc_ptr));
817 (*int_op)->sid = operation->sid;
818 (*int_op)->cdesc_out = NULL;
819 (*int_op)->cdesc_in = NULL;
820 (*int_op)->tdes_mode = cryptocop_3des_ede;
821 (*int_op)->csum_mode = cryptocop_csum_le;
822 (*int_op)->ddesc_out = NULL;
823 (*int_op)->ddesc_in = NULL;
825 /* Scan operation->tfrm_op.tfrm_cfg for bad configuration and set up the local contexts. */
827 DEBUG_API(printk("cryptocop_setup_dma_list: no configured transforms in operation.\n"));
832 tctx = get_transform_ctx(sess, tcfg->tid);
834 DEBUG_API(printk("cryptocop_setup_dma_list: no transform id %d in session.\n", tcfg->tid));
838 if (tcfg->inject_ix > operation->tfrm_op.outlen){
839 DEBUG_API(printk("cryptocop_setup_dma_list: transform id %d inject_ix (%d) > operation->tfrm_op.outlen(%d)", tcfg->tid, tcfg->inject_ix, operation->tfrm_op.outlen));
843 switch (tctx->init.alg){
844 case cryptocop_alg_mem2mem:
845 if (cipher_ctx.tcfg != NULL){
846 DEBUG_API(printk("cryptocop_setup_dma_list: multiple ciphers in operation.\n"));
850 /* mem2mem is handled as a NULL cipher. */
851 cipher_ctx.cbcmode = 0;
852 cipher_ctx.decrypt = 0;
853 cipher_ctx.blocklength = 1;
854 cipher_ctx.ciph_conf = 0;
855 cipher_ctx.unit_no = src_dma;
856 cipher_ctx.tcfg = tcfg;
857 cipher_ctx.tctx = tctx;
859 case cryptocop_alg_des:
860 case cryptocop_alg_3des:
861 case cryptocop_alg_aes:
863 if (cipher_ctx.tcfg != NULL){
864 DEBUG_API(printk("cryptocop_setup_dma_list: multiple ciphers in operation.\n"));
868 cipher_ctx.tcfg = tcfg;
869 cipher_ctx.tctx = tctx;
870 if (cipher_ctx.tcfg->flags & CRYPTOCOP_DECRYPT){
871 cipher_ctx.decrypt = 1;
873 switch (tctx->init.cipher_mode) {
874 case cryptocop_cipher_mode_ecb:
875 cipher_ctx.cbcmode = 0;
877 case cryptocop_cipher_mode_cbc:
878 cipher_ctx.cbcmode = 1;
881 DEBUG_API(printk("cryptocop_setup_dma_list: cipher_ctx, bad cipher mode==%d\n", tctx->init.cipher_mode));
885 DEBUG(printk("cryptocop_setup_dma_list: cipher_ctx, set CBC mode==%d\n", cipher_ctx.cbcmode));
886 switch (tctx->init.alg){
887 case cryptocop_alg_des:
888 cipher_ctx.ciph_conf = 0;
889 cipher_ctx.unit_no = src_des;
890 cipher_ctx.blocklength = DES_BLOCK_LENGTH;
892 case cryptocop_alg_3des:
893 cipher_ctx.ciph_conf = 1;
894 cipher_ctx.unit_no = src_des;
895 cipher_ctx.blocklength = DES_BLOCK_LENGTH;
897 case cryptocop_alg_aes:
898 cipher_ctx.ciph_conf = 2;
899 cipher_ctx.unit_no = src_aes;
900 cipher_ctx.blocklength = AES_BLOCK_LENGTH;
903 panic("cryptocop_setup_dma_list: impossible algorithm %d\n", tctx->init.alg);
905 (*int_op)->tdes_mode = tctx->init.tdes_mode;
907 case cryptocop_alg_md5:
908 case cryptocop_alg_sha1:
910 if (digest_ctx.tcfg != NULL){
911 DEBUG_API(printk("cryptocop_setup_dma_list: multiple digests in operation.\n"));
915 digest_ctx.tcfg = tcfg;
916 digest_ctx.tctx = tctx;
917 digest_ctx.hash_mode = 0; /* Don't use explicit IV in this API. */
918 switch (tctx->init.alg){
919 case cryptocop_alg_md5:
920 digest_ctx.blocklength = MD5_BLOCK_LENGTH;
921 digest_ctx.unit_no = src_md5;
922 digest_ctx.hash_conf = 1; /* 1 => MD-5 */
924 case cryptocop_alg_sha1:
925 digest_ctx.blocklength = SHA1_BLOCK_LENGTH;
926 digest_ctx.unit_no = src_sha1;
927 digest_ctx.hash_conf = 0; /* 0 => SHA-1 */
930 panic("cryptocop_setup_dma_list: impossible digest algorithm\n");
933 case cryptocop_alg_csum:
935 if (csum_ctx.tcfg != NULL){
936 DEBUG_API(printk("cryptocop_setup_dma_list: multiple checksums in operation.\n"));
940 (*int_op)->csum_mode = tctx->init.csum_mode;
941 csum_ctx.tcfg = tcfg;
942 csum_ctx.tctx = tctx;
946 DEBUG_API(printk("cryptocop_setup_dma_list: invalid algorithm %d specified in tfrm %d.\n", tctx->init.alg, tcfg->tid));
952 /* Download key if a cipher is used. */
953 if (cipher_ctx.tcfg && (cipher_ctx.tctx->init.alg != cryptocop_alg_mem2mem)){
954 struct cryptocop_dma_desc *key_desc = NULL;
956 failed = setup_key_dl_desc(&cipher_ctx, &key_desc, alloc_flag);
958 DEBUG_API(printk("cryptocop_setup_dma_list: setup key dl\n"));
961 current_out_cdesc->next = key_desc;
962 current_out_cdesc = key_desc;
963 indata_ix += (unsigned int)(key_desc->dma_descr->after - key_desc->dma_descr->buf);
965 /* Download explicit IV if a cipher is used and CBC mode and explicit IV selected. */
966 if ((cipher_ctx.tctx->init.cipher_mode == cryptocop_cipher_mode_cbc) && (cipher_ctx.tcfg->flags & CRYPTOCOP_EXPLICIT_IV)) {
967 struct cryptocop_dma_desc *iv_desc = NULL;
969 DEBUG(printk("cryptocop_setup_dma_list: setup cipher CBC IV descriptor.\n"));
971 failed = setup_cipher_iv_desc(&cipher_ctx, &iv_desc, alloc_flag);
973 DEBUG_API(printk("cryptocop_setup_dma_list: CBC IV descriptor.\n"));
976 current_out_cdesc->next = iv_desc;
977 current_out_cdesc = iv_desc;
978 indata_ix += (unsigned int)(iv_desc->dma_descr->after - iv_desc->dma_descr->buf);
982 /* Process descriptors. */
983 odsc = operation->tfrm_op.desc;
985 struct cryptocop_desc_cfg *dcfg = odsc->cfg;
986 struct strcop_meta_out meta_out = {0};
987 size_t desc_len = odsc->length;
988 int active_count, eop_needed_count;
992 DEBUG(printk("cryptocop_setup_dma_list: parsing an operation descriptor\n"));
995 struct cryptocop_tfrm_ctx *tc = NULL;
997 DEBUG(printk("cryptocop_setup_dma_list: parsing an operation descriptor configuration.\n"));
998 /* Get the local context for the transform and mark it as the output unit if it produces output. */
999 if (digest_ctx.tcfg && (digest_ctx.tcfg->tid == dcfg->tid)){
1001 } else if (cipher_ctx.tcfg && (cipher_ctx.tcfg->tid == dcfg->tid)){
1003 } else if (csum_ctx.tcfg && (csum_ctx.tcfg->tid == dcfg->tid)){
1007 DEBUG_API(printk("cryptocop_setup_dma_list: invalid transform %d specified in descriptor.\n", dcfg->tid));
1012 DEBUG_API(printk("cryptocop_setup_dma_list: completed transform %d reused.\n", dcfg->tid));
1017 tc->start_ix = indata_ix;
1021 tc->previous_src = tc->current_src;
1022 tc->prev_src = tc->curr_src;
1023 /* Map source unit id to DMA source config. */
1025 case cryptocop_source_dma:
1026 tc->current_src = src_dma;
1028 case cryptocop_source_des:
1029 tc->current_src = src_des;
1031 case cryptocop_source_3des:
1032 tc->current_src = src_des;
1034 case cryptocop_source_aes:
1035 tc->current_src = src_aes;
1037 case cryptocop_source_md5:
1038 case cryptocop_source_sha1:
1039 case cryptocop_source_csum:
1040 case cryptocop_source_none:
1042 /* We do not allow using accumulating style units (SHA-1, MD5, checksum) as sources to other units.
1044 DEBUG_API(printk("cryptocop_setup_dma_list: bad unit source configured %d.\n", dcfg->src));
1048 if (tc->current_src != src_dma) {
1049 /* Find the unit we are sourcing from. */
1050 if (digest_ctx.unit_no == tc->current_src){
1051 tc->curr_src = &digest_ctx;
1052 } else if (cipher_ctx.unit_no == tc->current_src){
1053 tc->curr_src = &cipher_ctx;
1054 } else if (csum_ctx.unit_no == tc->current_src){
1055 tc->curr_src = &csum_ctx;
1057 if ((tc->curr_src == tc) && (tc->unit_no != src_dma)){
1058 DEBUG_API(printk("cryptocop_setup_dma_list: unit %d configured to source from itself.\n", tc->unit_no));
1063 tc->curr_src = NULL;
1066 /* Detect source switch. */
1067 DEBUG(printk("cryptocop_setup_dma_list: tc->active=%d tc->unit_no=%d tc->current_src=%d tc->previous_src=%d, tc->curr_src=0x%p, tc->prev_srv=0x%p\n", tc->active, tc->unit_no, tc->current_src, tc->previous_src, tc->curr_src, tc->prev_src));
1068 if (tc->active && (tc->current_src != tc->previous_src)) {
1069 /* Only allow source switch when both the old source unit and the new one have
1070 * no pending data to process (i.e. the consumed length must be a multiple of the
1071 * transform blocklength). */
1072 /* Note: if the src == NULL we are actually sourcing from DMA out. */
1073 if (((tc->prev_src != NULL) && (tc->prev_src->consumed % tc->prev_src->blocklength)) ||
1074 ((tc->curr_src != NULL) && (tc->curr_src->consumed % tc->curr_src->blocklength)))
1076 DEBUG_API(printk("cryptocop_setup_dma_list: can only disconnect from or connect to a unit on a multiple of the blocklength, old: cons=%d, prod=%d, block=%d, new: cons=%d prod=%d, block=%d.\n", tc->prev_src ? tc->prev_src->consumed : INT_MIN, tc->prev_src ? tc->prev_src->produced : INT_MIN, tc->prev_src ? tc->prev_src->blocklength : INT_MIN, tc->curr_src ? tc->curr_src->consumed : INT_MIN, tc->curr_src ? tc->curr_src->produced : INT_MIN, tc->curr_src ? tc->curr_src->blocklength : INT_MIN));
1081 /* Detect unit deactivation. */
1083 /* Length check of this is handled below. */
1087 } /* while (dcfg) */
1088 DEBUG(printk("cryptocop_setup_dma_list: parsing operation descriptor configuration complete.\n"));
1090 if (cipher_ctx.active && (cipher_ctx.curr_src != NULL) && !cipher_ctx.curr_src->active){
1091 DEBUG_API(printk("cryptocop_setup_dma_list: cipher source from inactive unit %d\n", cipher_ctx.curr_src->unit_no));
1095 if (digest_ctx.active && (digest_ctx.curr_src != NULL) && !digest_ctx.curr_src->active){
1096 DEBUG_API(printk("cryptocop_setup_dma_list: digest source from inactive unit %d\n", digest_ctx.curr_src->unit_no));
1100 if (csum_ctx.active && (csum_ctx.curr_src != NULL) && !csum_ctx.curr_src->active){
1101 DEBUG_API(printk("cryptocop_setup_dma_list: cipher source from inactive unit %d\n", csum_ctx.curr_src->unit_no));
1106 /* Update consumed and produced lengths.
1108 The consumed length accounting here is actually cheating. If a unit source from DMA (or any
1109 other unit that process data in blocks of one octet) it is correct, but if it source from a
1110 block processing unit, i.e. a cipher, it will be temporarily incorrect at some times. However
1111 since it is only allowed--by the HW--to change source to or from a block processing unit at times where that
1112 unit has processed an exact multiple of its block length the end result will be correct.
1113 Beware that if the source change restriction change this code will need to be (much) reworked.
1115 DEBUG(printk("cryptocop_setup_dma_list: desc->length=%d, desc_len=%d.\n", odsc->length, desc_len));
1117 if (csum_ctx.active) {
1118 csum_ctx.consumed += desc_len;
1119 if (csum_ctx.done) {
1120 csum_ctx.produced = 2;
1122 DEBUG(printk("cryptocop_setup_dma_list: csum_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", csum_ctx.consumed, csum_ctx.produced, csum_ctx.blocklength));
1124 if (digest_ctx.active) {
1125 digest_ctx.consumed += desc_len;
1126 if (digest_ctx.done) {
1127 if (digest_ctx.unit_no == src_md5) {
1128 digest_ctx.produced = MD5_STATE_LENGTH;
1130 digest_ctx.produced = SHA1_STATE_LENGTH;
1133 DEBUG(printk("cryptocop_setup_dma_list: digest_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", digest_ctx.consumed, digest_ctx.produced, digest_ctx.blocklength));
1135 if (cipher_ctx.active) {
1136 /* Ciphers are allowed only to source from DMA out. That is filtered above. */
1137 assert(cipher_ctx.current_src == src_dma);
1138 cipher_ctx.consumed += desc_len;
1139 cipher_ctx.produced = cipher_ctx.blocklength * (cipher_ctx.consumed / cipher_ctx.blocklength);
1140 if (cipher_ctx.cbcmode && !(cipher_ctx.tcfg->flags & CRYPTOCOP_EXPLICIT_IV) && cipher_ctx.produced){
1141 cipher_ctx.produced -= cipher_ctx.blocklength; /* Compensate for CBC iv. */
1143 DEBUG(printk("cryptocop_setup_dma_list: cipher_ctx producing: consumed=%d, produced=%d, blocklength=%d.\n", cipher_ctx.consumed, cipher_ctx.produced, cipher_ctx.blocklength));
1146 /* Setup the DMA out descriptors. */
1147 /* Configure the metadata. */
1149 eop_needed_count = 0;
1150 if (cipher_ctx.active) {
1152 if (cipher_ctx.unit_no == src_dma){
1154 meta_out.ciphsel = src_none;
1156 meta_out.ciphsel = cipher_ctx.current_src;
1158 meta_out.ciphconf = cipher_ctx.ciph_conf;
1159 meta_out.cbcmode = cipher_ctx.cbcmode;
1160 meta_out.decrypt = cipher_ctx.decrypt;
1161 DEBUG(printk("set ciphsel=%d ciphconf=%d cbcmode=%d decrypt=%d\n", meta_out.ciphsel, meta_out.ciphconf, meta_out.cbcmode, meta_out.decrypt));
1162 if (cipher_ctx.done) ++eop_needed_count;
1164 meta_out.ciphsel = src_none;
1167 if (digest_ctx.active) {
1169 meta_out.hashsel = digest_ctx.current_src;
1170 meta_out.hashconf = digest_ctx.hash_conf;
1171 meta_out.hashmode = 0; /* Explicit mode is not used here. */
1172 DEBUG(printk("set hashsel=%d hashconf=%d hashmode=%d\n", meta_out.hashsel, meta_out.hashconf, meta_out.hashmode));
1173 if (digest_ctx.done) {
1174 assert(digest_ctx.pad_descs == NULL);
1175 failed = create_pad_descriptor(&digest_ctx, &digest_ctx.pad_descs, alloc_flag);
1177 DEBUG_API(printk("cryptocop_setup_dma_list: failed digest pad creation.\n"));
1182 meta_out.hashsel = src_none;
1185 if (csum_ctx.active) {
1187 meta_out.csumsel = csum_ctx.current_src;
1188 if (csum_ctx.done) {
1189 assert(csum_ctx.pad_descs == NULL);
1190 failed = create_pad_descriptor(&csum_ctx, &csum_ctx.pad_descs, alloc_flag);
1192 DEBUG_API(printk("cryptocop_setup_dma_list: failed csum pad creation.\n"));
1197 meta_out.csumsel = src_none;
1199 DEBUG(printk("cryptocop_setup_dma_list: %d eop needed, %d active units\n", eop_needed_count, active_count));
1200 /* Setup DMA out descriptors for the indata. */
1201 failed = create_output_descriptors(operation, &iniov_ix, &iniov_offset, desc_len, ¤t_out_cdesc, &meta_out, alloc_flag);
1203 DEBUG_API(printk("cryptocop_setup_dma_list: create_output_descriptors %d\n", failed));
1206 /* Setup out EOP. If there are active units that are not done here they cannot get an EOP
1207 * so we ust setup a zero length descriptor to DMA to signal EOP only to done units.
1208 * If there is a pad descriptor EOP for the padded unit will be EOPed by it.
1210 assert(active_count >= eop_needed_count);
1211 assert((eop_needed_count == 0) || (eop_needed_count == 1));
1212 if (eop_needed_count) {
1213 /* This means that the bulk operation (cipeher/m2m) is terminated. */
1214 if (active_count > 1) {
1215 /* Use zero length EOP descriptor. */
1216 struct cryptocop_dma_desc *ed = alloc_cdesc(alloc_flag);
1217 struct strcop_meta_out ed_mo = {0};
1219 DEBUG_API(printk("cryptocop_setup_dma_list: alloc EOP descriptor for cipher\n"));
1224 assert(cipher_ctx.active && cipher_ctx.done);
1226 if (cipher_ctx.unit_no == src_dma){
1228 ed_mo.ciphsel = src_none;
1230 ed_mo.ciphsel = cipher_ctx.current_src;
1232 ed_mo.ciphconf = cipher_ctx.ciph_conf;
1233 ed_mo.cbcmode = cipher_ctx.cbcmode;
1234 ed_mo.decrypt = cipher_ctx.decrypt;
1236 ed->free_buf = NULL;
1237 ed->dma_descr->wait = 1;
1238 ed->dma_descr->out_eop = 1;
1240 ed->dma_descr->buf = (char*)virt_to_phys(&ed); /* Use any valid physical address for zero length descriptor. */
1241 ed->dma_descr->after = ed->dma_descr->buf;
1242 ed->dma_descr->md = REG_TYPE_CONV(unsigned short int, struct strcop_meta_out, ed_mo);
1243 current_out_cdesc->next = ed;
1244 current_out_cdesc = ed;
1246 /* Set EOP in the current out descriptor since the only active module is
1247 * the one needing the EOP. */
1249 current_out_cdesc->dma_descr->out_eop = 1;
1253 if (cipher_ctx.done && cipher_ctx.active) cipher_ctx.active = 0;
1254 if (digest_ctx.done && digest_ctx.active) digest_ctx.active = 0;
1255 if (csum_ctx.done && csum_ctx.active) csum_ctx.active = 0;
1256 indata_ix += odsc->length;
1258 } /* while (odsc) */ /* Process descriptors. */
1259 DEBUG(printk("cryptocop_setup_dma_list: done parsing operation descriptors\n"));
1260 if (cipher_ctx.tcfg && (cipher_ctx.active || !cipher_ctx.done)){
1261 DEBUG_API(printk("cryptocop_setup_dma_list: cipher operation not terminated.\n"));
1265 if (digest_ctx.tcfg && (digest_ctx.active || !digest_ctx.done)){
1266 DEBUG_API(printk("cryptocop_setup_dma_list: digest operation not terminated.\n"));
1270 if (csum_ctx.tcfg && (csum_ctx.active || !csum_ctx.done)){
1271 DEBUG_API(printk("cryptocop_setup_dma_list: csum operation not terminated.\n"));
1276 failed = append_input_descriptors(operation, ¤t_in_cdesc, ¤t_out_cdesc, &cipher_ctx, alloc_flag);
1278 DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed));
1281 failed = append_input_descriptors(operation, ¤t_in_cdesc, ¤t_out_cdesc, &digest_ctx, alloc_flag);
1283 DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed));
1286 failed = append_input_descriptors(operation, ¤t_in_cdesc, ¤t_out_cdesc, &csum_ctx, alloc_flag);
1288 DEBUG_API(printk("cryptocop_setup_dma_list: append_input_descriptors cipher_ctx %d\n", failed));
1292 DEBUG(printk("cryptocop_setup_dma_list: int_op=0x%p, *int_op=0x%p\n", int_op, *int_op));
1293 (*int_op)->cdesc_out = out_cdesc_head.next;
1294 (*int_op)->cdesc_in = in_cdesc_head.next;
1295 DEBUG(printk("cryptocop_setup_dma_list: out_cdesc_head=0x%p in_cdesc_head=0x%p\n", (*int_op)->cdesc_out, (*int_op)->cdesc_in));
1297 setup_descr_chain(out_cdesc_head.next);
1298 setup_descr_chain(in_cdesc_head.next);
1300 /* Last but not least: mark the last DMA in descriptor for a INTR and EOL and the the
1301 * last DMA out descriptor for EOL.
1303 current_in_cdesc->dma_descr->intr = 1;
1304 current_in_cdesc->dma_descr->eol = 1;
1305 current_out_cdesc->dma_descr->eol = 1;
1307 /* Setup DMA contexts. */
1308 (*int_op)->ctx_out.next = NULL;
1309 (*int_op)->ctx_out.eol = 1;
1310 (*int_op)->ctx_out.intr = 0;
1311 (*int_op)->ctx_out.store_mode = 0;
1312 (*int_op)->ctx_out.en = 0;
1313 (*int_op)->ctx_out.dis = 0;
1314 (*int_op)->ctx_out.md0 = 0;
1315 (*int_op)->ctx_out.md1 = 0;
1316 (*int_op)->ctx_out.md2 = 0;
1317 (*int_op)->ctx_out.md3 = 0;
1318 (*int_op)->ctx_out.md4 = 0;
1319 (*int_op)->ctx_out.saved_data = (dma_descr_data*)virt_to_phys((*int_op)->cdesc_out->dma_descr);
1320 (*int_op)->ctx_out.saved_data_buf = (*int_op)->cdesc_out->dma_descr->buf; /* Already physical address. */
1322 (*int_op)->ctx_in.next = NULL;
1323 (*int_op)->ctx_in.eol = 1;
1324 (*int_op)->ctx_in.intr = 0;
1325 (*int_op)->ctx_in.store_mode = 0;
1326 (*int_op)->ctx_in.en = 0;
1327 (*int_op)->ctx_in.dis = 0;
1328 (*int_op)->ctx_in.md0 = 0;
1329 (*int_op)->ctx_in.md1 = 0;
1330 (*int_op)->ctx_in.md2 = 0;
1331 (*int_op)->ctx_in.md3 = 0;
1332 (*int_op)->ctx_in.md4 = 0;
1334 (*int_op)->ctx_in.saved_data = (dma_descr_data*)virt_to_phys((*int_op)->cdesc_in->dma_descr);
1335 (*int_op)->ctx_in.saved_data_buf = (*int_op)->cdesc_in->dma_descr->buf; /* Already physical address. */
1337 DEBUG(printk("cryptocop_setup_dma_list: done\n"));
1342 /* Free all allocated resources. */
1343 struct cryptocop_dma_desc *tmp_cdesc;
1344 while (digest_ctx.pad_descs){
1345 tmp_cdesc = digest_ctx.pad_descs->next;
1346 free_cdesc(digest_ctx.pad_descs);
1347 digest_ctx.pad_descs = tmp_cdesc;
1349 while (csum_ctx.pad_descs){
1350 tmp_cdesc = csum_ctx.pad_descs->next;
1351 free_cdesc(csum_ctx.pad_descs);
1352 csum_ctx.pad_descs = tmp_cdesc;
1354 assert(cipher_ctx.pad_descs == NULL); /* The ciphers are never padded. */
1356 if (*int_op != NULL) delete_internal_operation(*int_op);
1358 DEBUG_API(printk("cryptocop_setup_dma_list: done with error %d\n", failed));
1363 static void delete_internal_operation(struct cryptocop_int_operation *iop)
1365 void *ptr = iop->alloc_ptr;
1366 struct cryptocop_dma_desc *cd = iop->cdesc_out;
1367 struct cryptocop_dma_desc *next;
1369 DEBUG(printk("delete_internal_operation: iop=0x%p, alloc_ptr=0x%p\n", iop, ptr));
1385 #define MD5_MIN_PAD_LENGTH (9)
1386 #define MD5_PAD_LENGTH_FIELD_LENGTH (8)
1388 static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length)
1390 size_t padlen = MD5_BLOCK_LENGTH - (hashed_length % MD5_BLOCK_LENGTH);
1393 unsigned long long int bit_length = hashed_length << 3;
1395 if (padlen < MD5_MIN_PAD_LENGTH) padlen += MD5_BLOCK_LENGTH;
1397 p = kmalloc(padlen, alloc_flag);
1398 if (!pad) return -ENOMEM;
1401 memset(p+1, 0, padlen - 1);
1403 DEBUG(printk("create_md5_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
1405 i = padlen - MD5_PAD_LENGTH_FIELD_LENGTH;
1406 while (bit_length != 0){
1407 p[i++] = bit_length % 0x100;
1412 *pad_length = padlen;
1417 #define SHA1_MIN_PAD_LENGTH (9)
1418 #define SHA1_PAD_LENGTH_FIELD_LENGTH (8)
1420 static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, char **pad, size_t *pad_length)
1422 size_t padlen = SHA1_BLOCK_LENGTH - (hashed_length % SHA1_BLOCK_LENGTH);
1425 unsigned long long int bit_length = hashed_length << 3;
1427 if (padlen < SHA1_MIN_PAD_LENGTH) padlen += SHA1_BLOCK_LENGTH;
1429 p = kmalloc(padlen, alloc_flag);
1430 if (!pad) return -ENOMEM;
1433 memset(p+1, 0, padlen - 1);
1435 DEBUG(printk("create_sha1_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
1438 while (bit_length != 0){
1439 p[i--] = bit_length % 0x100;
1444 *pad_length = padlen;
1450 static int transform_ok(struct cryptocop_transform_init *tinit)
1452 switch (tinit->alg){
1453 case cryptocop_alg_csum:
1454 switch (tinit->csum_mode){
1455 case cryptocop_csum_le:
1456 case cryptocop_csum_be:
1459 DEBUG_API(printk("transform_ok: Bad mode set for csum transform\n"));
1462 case cryptocop_alg_mem2mem:
1463 case cryptocop_alg_md5:
1464 case cryptocop_alg_sha1:
1465 if (tinit->keylen != 0) {
1466 DEBUG_API(printk("transform_ok: non-zero keylength, %d, for a digest/csum algorithm\n", tinit->keylen));
1467 return -EINVAL; /* This check is a bit strict. */
1470 case cryptocop_alg_des:
1471 if (tinit->keylen != 64) {
1472 DEBUG_API(printk("transform_ok: keylen %d invalid for DES\n", tinit->keylen));
1476 case cryptocop_alg_3des:
1477 if (tinit->keylen != 192) {
1478 DEBUG_API(printk("transform_ok: keylen %d invalid for 3DES\n", tinit->keylen));
1482 case cryptocop_alg_aes:
1483 if (tinit->keylen != 128 && tinit->keylen != 192 && tinit->keylen != 256) {
1484 DEBUG_API(printk("transform_ok: keylen %d invalid for AES\n", tinit->keylen));
1488 case cryptocop_no_alg:
1490 DEBUG_API(printk("transform_ok: no such algorithm %d\n", tinit->alg));
1494 switch (tinit->alg){
1495 case cryptocop_alg_des:
1496 case cryptocop_alg_3des:
1497 case cryptocop_alg_aes:
1498 if (tinit->cipher_mode != cryptocop_cipher_mode_ecb && tinit->cipher_mode != cryptocop_cipher_mode_cbc) return -EINVAL;
1506 int cryptocop_new_session(cryptocop_session_id *sid, struct cryptocop_transform_init *tinit, int alloc_flag)
1508 struct cryptocop_session *sess;
1509 struct cryptocop_transform_init *tfrm_in = tinit;
1510 struct cryptocop_transform_init *tmp_in;
1513 unsigned long int flags;
1515 init_stream_coprocessor(); /* For safety if we are called early */
1520 if ((err = transform_ok(tfrm_in))) {
1521 DEBUG_API(printk("cryptocop_new_session, bad transform\n"));
1524 tfrm_in = tfrm_in->next;
1526 if (0 == no_tfrms) {
1527 DEBUG_API(printk("cryptocop_new_session, no transforms specified\n"));
1531 sess = kmalloc(sizeof(struct cryptocop_session), alloc_flag);
1533 DEBUG_API(printk("cryptocop_new_session, kmalloc cryptocop_session\n"));
1537 sess->tfrm_ctx = kmalloc(no_tfrms * sizeof(struct cryptocop_transform_ctx), alloc_flag);
1538 if (!sess->tfrm_ctx) {
1539 DEBUG_API(printk("cryptocop_new_session, kmalloc cryptocop_transform_ctx\n"));
1545 for (i = 0; i < no_tfrms; i++){
1546 tmp_in = tfrm_in->next;
1548 if (tmp_in->tid == tfrm_in->tid) {
1549 DEBUG_API(printk("cryptocop_new_session, duplicate transform ids\n"));
1550 kfree(sess->tfrm_ctx);
1554 tmp_in = tmp_in->next;
1556 memcpy(&sess->tfrm_ctx[i].init, tfrm_in, sizeof(struct cryptocop_transform_init));
1557 sess->tfrm_ctx[i].dec_key_set = 0;
1558 sess->tfrm_ctx[i].next = &sess->tfrm_ctx[i] + 1;
1560 tfrm_in = tfrm_in->next;
1562 sess->tfrm_ctx[i-1].next = NULL;
1564 spin_lock_irqsave(&cryptocop_sessions_lock, flags);
1565 sess->sid = next_sid;
1567 /* TODO If we are really paranoid we should do duplicate check to handle sid wraparound.
1568 * OTOH 2^64 is a really large number of session. */
1569 if (next_sid == 0) next_sid = 1;
1571 /* Prepend to session list. */
1572 sess->next = cryptocop_sessions;
1573 cryptocop_sessions = sess;
1574 spin_unlock_irqrestore(&cryptocop_sessions_lock, flags);
1580 int cryptocop_free_session(cryptocop_session_id sid)
1582 struct cryptocop_transform_ctx *tc;
1583 struct cryptocop_session *sess = NULL;
1584 struct cryptocop_session *psess = NULL;
1585 unsigned long int flags;
1587 LIST_HEAD(remove_list);
1588 struct list_head *node, *tmp;
1589 struct cryptocop_prio_job *pj;
1591 DEBUG(printk("cryptocop_free_session: sid=%lld\n", sid));
1593 spin_lock_irqsave(&cryptocop_sessions_lock, flags);
1594 sess = cryptocop_sessions;
1595 while (sess && sess->sid != sid){
1601 psess->next = sess->next;
1603 cryptocop_sessions = sess->next;
1606 spin_unlock_irqrestore(&cryptocop_sessions_lock, flags);
1608 if (!sess) return -EINVAL;
1610 /* Remove queued jobs. */
1611 spin_lock_irqsave(&cryptocop_job_queue_lock, flags);
1613 for (i = 0; i < cryptocop_prio_no_prios; i++){
1614 if (!list_empty(&(cryptocop_job_queues[i].jobs))){
1615 list_for_each_safe(node, tmp, &(cryptocop_job_queues[i].jobs)) {
1616 pj = list_entry(node, struct cryptocop_prio_job, node);
1617 if (pj->oper->sid == sid) {
1618 list_move_tail(node, &remove_list);
1623 spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
1625 list_for_each_safe(node, tmp, &remove_list) {
1627 pj = list_entry(node, struct cryptocop_prio_job, node);
1628 pj->oper->operation_status = -EAGAIN; /* EAGAIN is not ideal for job/session terminated but it's the best choice I know of. */
1629 DEBUG(printk("cryptocop_free_session: pj=0x%p, pj->oper=0x%p, pj->iop=0x%p\n", pj, pj->oper, pj->iop));
1630 pj->oper->cb(pj->oper, pj->oper->cb_data);
1631 delete_internal_operation(pj->iop);
1635 tc = sess->tfrm_ctx;
1636 /* Erase keying data. */
1638 DEBUG(printk("cryptocop_free_session: memset keys, tfrm id=%d\n", tc->init.tid));
1639 memset(tc->init.key, 0xff, CRYPTOCOP_MAX_KEY_LENGTH);
1640 memset(tc->dec_key, 0xff, CRYPTOCOP_MAX_KEY_LENGTH);
1643 kfree(sess->tfrm_ctx);
1649 static struct cryptocop_session *get_session(cryptocop_session_id sid)
1651 struct cryptocop_session *sess;
1652 unsigned long int flags;
1654 spin_lock_irqsave(&cryptocop_sessions_lock, flags);
1655 sess = cryptocop_sessions;
1656 while (sess && (sess->sid != sid)){
1659 spin_unlock_irqrestore(&cryptocop_sessions_lock, flags);
1664 static struct cryptocop_transform_ctx *get_transform_ctx(struct cryptocop_session *sess, cryptocop_tfrm_id tid)
1666 struct cryptocop_transform_ctx *tc = sess->tfrm_ctx;
1668 DEBUG(printk("get_transform_ctx, sess=0x%p, tid=%d\n", sess, tid));
1669 assert(sess != NULL);
1670 while (tc && tc->init.tid != tid){
1671 DEBUG(printk("tc=0x%p, tc->next=0x%p\n", tc, tc->next));
1674 DEBUG(printk("get_transform_ctx, returning tc=0x%p\n", tc));
1680 /* The AES s-transform matrix (s-box). */
1681 static const u8 aes_sbox[256] = {
1682 99, 124, 119, 123, 242, 107, 111, 197, 48, 1, 103, 43, 254, 215, 171, 118,
1683 202, 130, 201, 125, 250, 89, 71, 240, 173, 212, 162, 175, 156, 164, 114, 192,
1684 183, 253, 147, 38, 54, 63, 247, 204, 52, 165, 229, 241, 113, 216, 49, 21,
1685 4, 199, 35, 195, 24, 150, 5, 154, 7, 18, 128, 226, 235, 39, 178, 117,
1686 9, 131, 44, 26, 27, 110, 90, 160, 82, 59, 214, 179, 41, 227, 47, 132,
1687 83, 209, 0, 237, 32, 252, 177, 91, 106, 203, 190, 57, 74, 76, 88, 207,
1688 208, 239, 170, 251, 67, 77, 51, 133, 69, 249, 2, 127, 80, 60, 159, 168,
1689 81, 163, 64, 143, 146, 157, 56, 245, 188, 182, 218, 33, 16, 255, 243, 210,
1690 205, 12, 19, 236, 95, 151, 68, 23, 196, 167, 126, 61, 100, 93, 25, 115,
1691 96, 129, 79, 220, 34, 42, 144, 136, 70, 238, 184, 20, 222, 94, 11, 219,
1692 224, 50, 58, 10, 73, 6, 36, 92, 194, 211, 172, 98, 145, 149, 228, 121,
1693 231, 200, 55, 109, 141, 213, 78, 169, 108, 86, 244, 234, 101, 122, 174, 8,
1694 186, 120, 37, 46, 28, 166, 180, 198, 232, 221, 116, 31, 75, 189, 139, 138,
1695 112, 62, 181, 102, 72, 3, 246, 14, 97, 53, 87, 185, 134, 193, 29, 158,
1696 225, 248, 152, 17, 105, 217, 142, 148, 155, 30, 135, 233, 206, 85, 40, 223,
1697 140, 161, 137, 13, 191, 230, 66, 104, 65, 153, 45, 15, 176, 84, 187, 22
1700 /* AES has a 32 bit word round constants for each round in the
1701 * key schedule. round_constant[i] is really Rcon[i+1] in FIPS187.
1703 static u32 round_constant[11] = {
1704 0x01000000, 0x02000000, 0x04000000, 0x08000000,
1705 0x10000000, 0x20000000, 0x40000000, 0x80000000,
1706 0x1B000000, 0x36000000, 0x6C000000
1709 /* Apply the s-box to each of the four occtets in w. */
1710 static u32 aes_ks_subword(const u32 w)
1714 *(u32*)(&bytes[0]) = w;
1715 bytes[0] = aes_sbox[bytes[0]];
1716 bytes[1] = aes_sbox[bytes[1]];
1717 bytes[2] = aes_sbox[bytes[2]];
1718 bytes[3] = aes_sbox[bytes[3]];
1719 return *(u32*)(&bytes[0]);
1722 /* The encrypt (forward) Rijndael key schedule algorithm pseudo code:
1723 * (Note that AES words are 32 bit long)
1725 * KeyExpansion(byte key[4*Nk], word w[Nb*(Nr+1)], Nk){
1729 * w[i] = word(key[4*i, 4*i + 1, 4*i + 2, 4*i + 3])
1734 * while (i < (Nb * (Nr + 1))) {
1736 * if ((i mod Nk) == 0) {
1737 * temp = SubWord(RotWord(temp)) xor Rcon[i/Nk]
1739 * else if ((Nk > 6) && ((i mod Nk) == 4)) {
1740 * temp = SubWord(temp)
1742 * w[i] = w[i - Nk] xor temp
1744 * RotWord(t) does a 8 bit cyclic shift left on a 32 bit word.
1745 * SubWord(t) applies the AES s-box individually to each octet
1748 * For AES Nk can have the values 4, 6, and 8 (corresponding to
1749 * values for Nr of 10, 12, and 14). Nb is always 4.
1751 * To construct w[i], w[i - 1] and w[i - Nk] must be
1752 * available. Consequently we must keep a state of the last Nk words
1753 * to be able to create the last round keys.
1755 static void get_aes_decrypt_key(unsigned char *dec_key, const unsigned char *key, unsigned int keylength)
1758 u32 w_ring[8]; /* nk is max 8, use elements 0..(nk - 1) as a ringbuffer */
1777 panic("stream co-processor: bad aes key length in get_aes_decrypt_key\n");
1780 /* Need to do host byte order correction here since key is byte oriented and the
1781 * kx algorithm is word (u32) oriented. */
1782 for (i = 0; i < nk; i+=1) {
1783 w_ring[i] = be32_to_cpu(*(u32*)&key[4*i]);
1788 while (i < (4 * (nr + 2))) {
1789 temp = w_ring[w_last_ix];
1792 temp = (temp << 8) | (temp >> 24);
1793 temp = aes_ks_subword(temp);
1794 temp ^= round_constant[i/nk - 1];
1795 } else if ((nk > 6) && ((i % nk) == 4)) {
1796 temp = aes_ks_subword(temp);
1798 w_last_ix = (w_last_ix + 1) % nk; /* This is the same as (i-Nk) mod Nk */
1799 temp ^= w_ring[w_last_ix];
1800 w_ring[w_last_ix] = temp;
1802 /* We need the round keys for round Nr+1 and Nr+2 (round key
1803 * Nr+2 is the round key beyond the last one used when
1804 * encrypting). Rounds are numbered starting from 0, Nr=10
1805 * implies 11 rounds are used in encryption/decryption.
1807 if (i >= (4 * nr)) {
1808 /* Need to do host byte order correction here, the key
1809 * is byte oriented. */
1810 *(u32*)dec_key = cpu_to_be32(temp);
1818 /**** Job/operation management. ****/
1820 int cryptocop_job_queue_insert_csum(struct cryptocop_operation *operation)
1822 return cryptocop_job_queue_insert(cryptocop_prio_kernel_csum, operation);
1825 int cryptocop_job_queue_insert_crypto(struct cryptocop_operation *operation)
1827 return cryptocop_job_queue_insert(cryptocop_prio_kernel, operation);
1830 int cryptocop_job_queue_insert_user_job(struct cryptocop_operation *operation)
1832 return cryptocop_job_queue_insert(cryptocop_prio_user, operation);
1835 static int cryptocop_job_queue_insert(cryptocop_queue_priority prio, struct cryptocop_operation *operation)
1838 struct cryptocop_prio_job *pj = NULL;
1839 unsigned long int flags;
1841 DEBUG(printk("cryptocop_job_queue_insert(%d, 0x%p)\n", prio, operation));
1843 if (!operation || !operation->cb){
1844 DEBUG_API(printk("cryptocop_job_queue_insert oper=0x%p, NULL operation or callback\n", operation));
1848 if ((ret = cryptocop_job_setup(&pj, operation)) != 0){
1849 DEBUG_API(printk("cryptocop_job_queue_insert: job setup failed\n"));
1854 spin_lock_irqsave(&cryptocop_job_queue_lock, flags);
1855 list_add_tail(&pj->node, &cryptocop_job_queues[prio].jobs);
1856 spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
1858 /* Make sure a job is running */
1859 cryptocop_start_job();
1863 static void cryptocop_do_tasklet(unsigned long unused);
1864 DECLARE_TASKLET (cryptocop_tasklet, cryptocop_do_tasklet, 0);
1866 static void cryptocop_do_tasklet(unsigned long unused)
1868 struct list_head *node;
1869 struct cryptocop_prio_job *pj = NULL;
1870 unsigned long flags;
1872 DEBUG(printk("cryptocop_do_tasklet: entering\n"));
1875 spin_lock_irqsave(&cryptocop_completed_jobs_lock, flags);
1876 if (!list_empty(&cryptocop_completed_jobs)){
1877 node = cryptocop_completed_jobs.next;
1879 pj = list_entry(node, struct cryptocop_prio_job, node);
1883 spin_unlock_irqrestore(&cryptocop_completed_jobs_lock, flags);
1885 assert(pj->oper != NULL);
1887 /* Notify consumer of operation completeness. */
1888 DEBUG(printk("cryptocop_do_tasklet: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
1890 pj->oper->operation_status = 0; /* Job is completed. */
1891 pj->oper->cb(pj->oper, pj->oper->cb_data);
1892 delete_internal_operation(pj->iop);
1895 } while (pj != NULL);
1897 DEBUG(printk("cryptocop_do_tasklet: exiting\n"));
1901 dma_done_interrupt(int irq, void *dev_id)
1903 struct cryptocop_prio_job *done_job;
1904 reg_dma_rw_ack_intr ack_intr = {
1908 REG_WR(dma, IN_DMA_INST, rw_ack_intr, ack_intr);
1910 DEBUG(printk("cryptocop DMA done\n"));
1912 spin_lock(&running_job_lock);
1913 if (cryptocop_running_job == NULL){
1914 printk("stream co-processor got interrupt when not busy\n");
1915 spin_unlock(&running_job_lock);
1918 done_job = cryptocop_running_job;
1919 cryptocop_running_job = NULL;
1920 spin_unlock(&running_job_lock);
1922 /* Start processing a job. */
1923 if (!spin_trylock(&cryptocop_process_lock)){
1924 DEBUG(printk("cryptocop irq handler, not starting a job\n"));
1926 cryptocop_start_job();
1927 spin_unlock(&cryptocop_process_lock);
1930 done_job->oper->operation_status = 0; /* Job is completed. */
1931 if (done_job->oper->fast_callback){
1932 /* This operation wants callback from interrupt. */
1933 done_job->oper->cb(done_job->oper, done_job->oper->cb_data);
1934 delete_internal_operation(done_job->iop);
1937 spin_lock(&cryptocop_completed_jobs_lock);
1938 list_add_tail(&(done_job->node), &cryptocop_completed_jobs);
1939 spin_unlock(&cryptocop_completed_jobs_lock);
1940 tasklet_schedule(&cryptocop_tasklet);
1943 DEBUG(printk("cryptocop leave irq handler\n"));
1948 /* Setup interrupts and DMA channels. */
1949 static int init_cryptocop(void)
1951 unsigned long flags;
1952 reg_dma_rw_cfg dma_cfg = {.en = 1};
1953 reg_dma_rw_intr_mask intr_mask_in = {.data = regk_dma_yes}; /* Only want descriptor interrupts from the DMA in channel. */
1954 reg_dma_rw_ack_intr ack_intr = {.data = 1,.in_eop = 1 };
1955 reg_strcop_rw_cfg strcop_cfg = {
1956 .ipend = regk_strcop_little,
1957 .td1 = regk_strcop_e,
1958 .td2 = regk_strcop_d,
1959 .td3 = regk_strcop_e,
1964 if (request_irq(DMA_IRQ, dma_done_interrupt, 0,
1965 "stream co-processor DMA", NULL))
1966 panic("request_irq stream co-processor irq dma9");
1968 (void)crisv32_request_dma(OUT_DMA, "strcop", DMA_PANIC_ON_ERROR,
1970 (void)crisv32_request_dma(IN_DMA, "strcop", DMA_PANIC_ON_ERROR,
1973 local_irq_save(flags);
1975 /* Reset and enable the cryptocop. */
1977 REG_WR(strcop, regi_strcop, rw_cfg, strcop_cfg);
1979 REG_WR(strcop, regi_strcop, rw_cfg, strcop_cfg);
1982 REG_WR(dma, IN_DMA_INST, rw_cfg, dma_cfg); /* input DMA */
1983 REG_WR(dma, OUT_DMA_INST, rw_cfg, dma_cfg); /* output DMA */
1985 /* Set up wordsize = 4 for DMAs. */
1986 DMA_WR_CMD(OUT_DMA_INST, regk_dma_set_w_size4);
1987 DMA_WR_CMD(IN_DMA_INST, regk_dma_set_w_size4);
1989 /* Enable interrupts. */
1990 REG_WR(dma, IN_DMA_INST, rw_intr_mask, intr_mask_in);
1992 /* Clear intr ack. */
1993 REG_WR(dma, IN_DMA_INST, rw_ack_intr, ack_intr);
1995 local_irq_restore(flags);
2000 /* Free used cryptocop hw resources (interrupt and DMA channels). */
2001 static void release_cryptocop(void)
2003 unsigned long flags;
2004 reg_dma_rw_cfg dma_cfg = {.en = 0};
2005 reg_dma_rw_intr_mask intr_mask_in = {0};
2006 reg_dma_rw_ack_intr ack_intr = {.data = 1,.in_eop = 1 };
2008 local_irq_save(flags);
2010 /* Clear intr ack. */
2011 REG_WR(dma, IN_DMA_INST, rw_ack_intr, ack_intr);
2014 REG_WR(dma, IN_DMA_INST, rw_cfg, dma_cfg); /* input DMA */
2015 REG_WR(dma, OUT_DMA_INST, rw_cfg, dma_cfg); /* output DMA */
2017 /* Disable interrupts. */
2018 REG_WR(dma, IN_DMA_INST, rw_intr_mask, intr_mask_in);
2020 local_irq_restore(flags);
2022 free_irq(DMA_IRQ, NULL);
2024 (void)crisv32_free_dma(OUT_DMA);
2025 (void)crisv32_free_dma(IN_DMA);
2029 /* Init job queue. */
2030 static int cryptocop_job_queue_init(void)
2034 INIT_LIST_HEAD(&cryptocop_completed_jobs);
2036 for (i = 0; i < cryptocop_prio_no_prios; i++){
2037 cryptocop_job_queues[i].prio = (cryptocop_queue_priority)i;
2038 INIT_LIST_HEAD(&cryptocop_job_queues[i].jobs);
2044 static void cryptocop_job_queue_close(void)
2046 struct list_head *node, *tmp;
2047 struct cryptocop_prio_job *pj = NULL;
2048 unsigned long int process_flags, flags;
2051 /* FIXME: This is as yet untested code. */
2053 /* Stop strcop from getting an operation to process while we are closing the
2055 spin_lock_irqsave(&cryptocop_process_lock, process_flags);
2057 /* Empty the job queue. */
2058 for (i = 0; i < cryptocop_prio_no_prios; i++){
2059 if (!list_empty(&(cryptocop_job_queues[i].jobs))){
2060 list_for_each_safe(node, tmp, &(cryptocop_job_queues[i].jobs)) {
2061 pj = list_entry(node, struct cryptocop_prio_job, node);
2064 /* Call callback to notify consumer of job removal. */
2065 DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
2066 pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */
2067 pj->oper->cb(pj->oper, pj->oper->cb_data);
2069 delete_internal_operation(pj->iop);
2074 spin_unlock_irqrestore(&cryptocop_process_lock, process_flags);
2076 /* Remove the running job, if any. */
2077 spin_lock_irqsave(&running_job_lock, flags);
2078 if (cryptocop_running_job){
2079 reg_strcop_rw_cfg rw_cfg;
2080 reg_dma_rw_cfg dma_out_cfg, dma_in_cfg;
2083 dma_out_cfg = REG_RD(dma, OUT_DMA_INST, rw_cfg);
2084 dma_out_cfg.en = regk_dma_no;
2085 REG_WR(dma, OUT_DMA_INST, rw_cfg, dma_out_cfg);
2087 dma_in_cfg = REG_RD(dma, IN_DMA_INST, rw_cfg);
2088 dma_in_cfg.en = regk_dma_no;
2089 REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg);
2091 /* Disble the cryptocop. */
2092 rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg);
2094 REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
2096 pj = cryptocop_running_job;
2097 cryptocop_running_job = NULL;
2099 /* Call callback to notify consumer of job removal. */
2100 DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
2101 pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */
2102 pj->oper->cb(pj->oper, pj->oper->cb_data);
2104 delete_internal_operation(pj->iop);
2107 spin_unlock_irqrestore(&running_job_lock, flags);
2109 /* Remove completed jobs, if any. */
2110 spin_lock_irqsave(&cryptocop_completed_jobs_lock, flags);
2112 list_for_each_safe(node, tmp, &cryptocop_completed_jobs) {
2113 pj = list_entry(node, struct cryptocop_prio_job, node);
2115 /* Call callback to notify consumer of job removal. */
2116 DEBUG(printk("cryptocop_job_queue_close: callback 0x%p, data 0x%p\n", pj->oper->cb, pj->oper->cb_data));
2117 pj->oper->operation_status = -EINTR; /* Job is terminated without completion. */
2118 pj->oper->cb(pj->oper, pj->oper->cb_data);
2120 delete_internal_operation(pj->iop);
2123 spin_unlock_irqrestore(&cryptocop_completed_jobs_lock, flags);
2127 static void cryptocop_start_job(void)
2130 struct cryptocop_prio_job *pj;
2131 unsigned long int flags;
2132 unsigned long int running_job_flags;
2133 reg_strcop_rw_cfg rw_cfg = {.en = 1, .ignore_sync = 0};
2135 DEBUG(printk("cryptocop_start_job: entering\n"));
2137 spin_lock_irqsave(&running_job_lock, running_job_flags);
2138 if (cryptocop_running_job != NULL){
2139 /* Already running. */
2140 DEBUG(printk("cryptocop_start_job: already running, exit\n"));
2141 spin_unlock_irqrestore(&running_job_lock, running_job_flags);
2144 spin_lock_irqsave(&cryptocop_job_queue_lock, flags);
2146 /* Check the queues in priority order. */
2147 for (i = cryptocop_prio_kernel_csum; (i < cryptocop_prio_no_prios) && list_empty(&cryptocop_job_queues[i].jobs); i++);
2148 if (i == cryptocop_prio_no_prios) {
2149 spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
2150 spin_unlock_irqrestore(&running_job_lock, running_job_flags);
2151 DEBUG(printk("cryptocop_start_job: no jobs to run\n"));
2152 return; /* No jobs to run */
2154 DEBUG(printk("starting job for prio %d\n", i));
2156 /* TODO: Do not starve lower priority jobs. Let in a lower
2157 * prio job for every N-th processed higher prio job or some
2158 * other scheduling policy. This could reasonably be
2159 * tweakable since the optimal balance would depend on the
2160 * type of load on the system. */
2162 /* Pull the DMA lists from the job and start the DMA client. */
2163 pj = list_entry(cryptocop_job_queues[i].jobs.next, struct cryptocop_prio_job, node);
2164 list_del(&pj->node);
2165 spin_unlock_irqrestore(&cryptocop_job_queue_lock, flags);
2166 cryptocop_running_job = pj;
2168 /* Set config register (3DES and CSUM modes). */
2169 switch (pj->iop->tdes_mode){
2170 case cryptocop_3des_eee:
2171 rw_cfg.td1 = regk_strcop_e;
2172 rw_cfg.td2 = regk_strcop_e;
2173 rw_cfg.td3 = regk_strcop_e;
2175 case cryptocop_3des_eed:
2176 rw_cfg.td1 = regk_strcop_e;
2177 rw_cfg.td2 = regk_strcop_e;
2178 rw_cfg.td3 = regk_strcop_d;
2180 case cryptocop_3des_ede:
2181 rw_cfg.td1 = regk_strcop_e;
2182 rw_cfg.td2 = regk_strcop_d;
2183 rw_cfg.td3 = regk_strcop_e;
2185 case cryptocop_3des_edd:
2186 rw_cfg.td1 = regk_strcop_e;
2187 rw_cfg.td2 = regk_strcop_d;
2188 rw_cfg.td3 = regk_strcop_d;
2190 case cryptocop_3des_dee:
2191 rw_cfg.td1 = regk_strcop_d;
2192 rw_cfg.td2 = regk_strcop_e;
2193 rw_cfg.td3 = regk_strcop_e;
2195 case cryptocop_3des_ded:
2196 rw_cfg.td1 = regk_strcop_d;
2197 rw_cfg.td2 = regk_strcop_e;
2198 rw_cfg.td3 = regk_strcop_d;
2200 case cryptocop_3des_dde:
2201 rw_cfg.td1 = regk_strcop_d;
2202 rw_cfg.td2 = regk_strcop_d;
2203 rw_cfg.td3 = regk_strcop_e;
2205 case cryptocop_3des_ddd:
2206 rw_cfg.td1 = regk_strcop_d;
2207 rw_cfg.td2 = regk_strcop_d;
2208 rw_cfg.td3 = regk_strcop_d;
2211 DEBUG(printk("cryptocop_setup_dma_list: bad 3DES mode\n"));
2213 switch (pj->iop->csum_mode){
2214 case cryptocop_csum_le:
2215 rw_cfg.ipend = regk_strcop_little;
2217 case cryptocop_csum_be:
2218 rw_cfg.ipend = regk_strcop_big;
2221 DEBUG(printk("cryptocop_setup_dma_list: bad checksum mode\n"));
2223 REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg);
2225 DEBUG(printk("cryptocop_start_job: starting DMA, new cryptocop_running_job=0x%p\n"
2226 "ctx_in: 0x%p, phys: 0x%p\n"
2227 "ctx_out: 0x%p, phys: 0x%p\n",
2229 &pj->iop->ctx_in, (char*)virt_to_phys(&pj->iop->ctx_in),
2230 &pj->iop->ctx_out, (char*)virt_to_phys(&pj->iop->ctx_out)));
2232 /* Start input DMA. */
2233 flush_dma_context(&pj->iop->ctx_in);
2234 DMA_START_CONTEXT(IN_DMA_INST, virt_to_phys(&pj->iop->ctx_in));
2236 /* Start output DMA. */
2237 DMA_START_CONTEXT(OUT_DMA_INST, virt_to_phys(&pj->iop->ctx_out));
2239 spin_unlock_irqrestore(&running_job_lock, running_job_flags);
2240 DEBUG(printk("cryptocop_start_job: exiting\n"));
2244 static int cryptocop_job_setup(struct cryptocop_prio_job **pj, struct cryptocop_operation *operation)
2247 int alloc_flag = operation->in_interrupt ? GFP_ATOMIC : GFP_KERNEL;
2248 void *iop_alloc_ptr = NULL;
2250 *pj = kmalloc(sizeof (struct cryptocop_prio_job), alloc_flag);
2251 if (!*pj) return -ENOMEM;
2253 DEBUG(printk("cryptocop_job_setup: operation=0x%p\n", operation));
2255 (*pj)->oper = operation;
2256 DEBUG(printk("cryptocop_job_setup, cb=0x%p cb_data=0x%p\n", (*pj)->oper->cb, (*pj)->oper->cb_data));
2258 if (operation->use_dmalists) {
2259 DEBUG(print_user_dma_lists(&operation->list_op));
2260 if (!operation->list_op.inlist || !operation->list_op.outlist || !operation->list_op.out_data_buf || !operation->list_op.in_data_buf){
2261 DEBUG_API(printk("cryptocop_job_setup: bad indata (use_dmalists)\n"));
2265 iop_alloc_ptr = kmalloc(DESCR_ALLOC_PAD + sizeof(struct cryptocop_int_operation), alloc_flag);
2266 if (!iop_alloc_ptr) {
2267 DEBUG_API(printk("cryptocop_job_setup: kmalloc cryptocop_int_operation\n"));
2271 (*pj)->iop = (struct cryptocop_int_operation*)(((unsigned long int)(iop_alloc_ptr + DESCR_ALLOC_PAD + offsetof(struct cryptocop_int_operation, ctx_out)) & ~0x0000001F) - offsetof(struct cryptocop_int_operation, ctx_out));
2272 DEBUG(memset((*pj)->iop, 0xff, sizeof(struct cryptocop_int_operation)));
2273 (*pj)->iop->alloc_ptr = iop_alloc_ptr;
2274 (*pj)->iop->sid = operation->sid;
2275 (*pj)->iop->cdesc_out = NULL;
2276 (*pj)->iop->cdesc_in = NULL;
2277 (*pj)->iop->tdes_mode = operation->list_op.tdes_mode;
2278 (*pj)->iop->csum_mode = operation->list_op.csum_mode;
2279 (*pj)->iop->ddesc_out = operation->list_op.outlist;
2280 (*pj)->iop->ddesc_in = operation->list_op.inlist;
2282 /* Setup DMA contexts. */
2283 (*pj)->iop->ctx_out.next = NULL;
2284 (*pj)->iop->ctx_out.eol = 1;
2285 (*pj)->iop->ctx_out.saved_data = operation->list_op.outlist;
2286 (*pj)->iop->ctx_out.saved_data_buf = operation->list_op.out_data_buf;
2288 (*pj)->iop->ctx_in.next = NULL;
2289 (*pj)->iop->ctx_in.eol = 1;
2290 (*pj)->iop->ctx_in.saved_data = operation->list_op.inlist;
2291 (*pj)->iop->ctx_in.saved_data_buf = operation->list_op.in_data_buf;
2293 if ((err = cryptocop_setup_dma_list(operation, &(*pj)->iop, alloc_flag))) {
2294 DEBUG_API(printk("cryptocop_job_setup: cryptocop_setup_dma_list failed %d\n", err));
2299 DEBUG(print_dma_descriptors((*pj)->iop));
2301 DEBUG(printk("cryptocop_job_setup, DMA list setup successful\n"));
2306 static int cryptocop_open(struct inode *inode, struct file *filp)
2308 int p = iminor(inode);
2310 cycle_kernel_lock();
2311 if (p != CRYPTOCOP_MINOR) return -EINVAL;
2313 filp->private_data = NULL;
2318 static int cryptocop_release(struct inode *inode, struct file *filp)
2320 struct cryptocop_private *dev = filp->private_data;
2321 struct cryptocop_private *dev_next;
2324 dev_next = dev->next;
2325 if (dev->sid != CRYPTOCOP_SESSION_ID_NONE) {
2326 (void)cryptocop_free_session(dev->sid);
2336 static int cryptocop_ioctl_close_session(struct inode *inode, struct file *filp,
2337 unsigned int cmd, unsigned long arg)
2339 struct cryptocop_private *dev = filp->private_data;
2340 struct cryptocop_private *prev_dev = NULL;
2341 struct strcop_session_op *sess_op = (struct strcop_session_op *)arg;
2342 struct strcop_session_op sop;
2345 DEBUG(printk("cryptocop_ioctl_close_session\n"));
2347 if (!access_ok(VERIFY_READ, sess_op, sizeof(struct strcop_session_op)))
2349 err = copy_from_user(&sop, sess_op, sizeof(struct strcop_session_op));
2350 if (err) return -EFAULT;
2352 while (dev && (dev->sid != sop.ses_id)) {
2358 prev_dev->next = dev->next;
2360 filp->private_data = dev->next;
2362 err = cryptocop_free_session(dev->sid);
2363 if (err) return -EFAULT;
2365 DEBUG_API(printk("cryptocop_ioctl_close_session: session %lld not found\n", sop.ses_id));
2372 static void ioctl_process_job_callback(struct cryptocop_operation *op, void*cb_data)
2374 struct ioctl_job_cb_ctx *jc = (struct ioctl_job_cb_ctx *)cb_data;
2376 DEBUG(printk("ioctl_process_job_callback: op=0x%p, cb_data=0x%p\n", op, cb_data));
2379 wake_up(&cryptocop_ioc_process_wq);
2383 #define CRYPTOCOP_IOCTL_CIPHER_TID (1)
2384 #define CRYPTOCOP_IOCTL_DIGEST_TID (2)
2385 #define CRYPTOCOP_IOCTL_CSUM_TID (3)
2387 static size_t first_cfg_change_ix(struct strcop_crypto_op *crp_op)
2391 if (crp_op->do_cipher) ch_ix = crp_op->cipher_start;
2392 if (crp_op->do_digest && (crp_op->digest_start < ch_ix)) ch_ix = crp_op->digest_start;
2393 if (crp_op->do_csum && (crp_op->csum_start < ch_ix)) ch_ix = crp_op->csum_start;
2395 DEBUG(printk("first_cfg_change_ix: ix=%d\n", ch_ix));
2400 static size_t next_cfg_change_ix(struct strcop_crypto_op *crp_op, size_t ix)
2402 size_t ch_ix = INT_MAX;
2405 if (crp_op->do_cipher && ((crp_op->cipher_start + crp_op->cipher_len) > ix)){
2406 if (crp_op->cipher_start > ix) {
2407 ch_ix = crp_op->cipher_start;
2409 ch_ix = crp_op->cipher_start + crp_op->cipher_len;
2412 if (crp_op->do_digest && ((crp_op->digest_start + crp_op->digest_len) > ix)){
2413 if (crp_op->digest_start > ix) {
2414 tmp_ix = crp_op->digest_start;
2416 tmp_ix = crp_op->digest_start + crp_op->digest_len;
2418 if (tmp_ix < ch_ix) ch_ix = tmp_ix;
2420 if (crp_op->do_csum && ((crp_op->csum_start + crp_op->csum_len) > ix)){
2421 if (crp_op->csum_start > ix) {
2422 tmp_ix = crp_op->csum_start;
2424 tmp_ix = crp_op->csum_start + crp_op->csum_len;
2426 if (tmp_ix < ch_ix) ch_ix = tmp_ix;
2428 if (ch_ix == INT_MAX) ch_ix = ix;
2429 DEBUG(printk("next_cfg_change_ix prev ix=%d, next ix=%d\n", ix, ch_ix));
2434 /* Map map_length bytes from the pages starting on *pageix and *pageoffset to iovecs starting on *iovix.
2435 * Return -1 for ok, 0 for fail. */
2436 static int map_pages_to_iovec(struct iovec *iov, int iovlen, int *iovix, struct page **pages, int nopages, int *pageix, int *pageoffset, int map_length )
2440 assert(iov != NULL);
2441 assert(iovix != NULL);
2442 assert(pages != NULL);
2443 assert(pageix != NULL);
2444 assert(pageoffset != NULL);
2446 DEBUG(printk("map_pages_to_iovec, map_length=%d, iovlen=%d, *iovix=%d, nopages=%d, *pageix=%d, *pageoffset=%d\n", map_length, iovlen, *iovix, nopages, *pageix, *pageoffset));
2448 while (map_length > 0){
2449 DEBUG(printk("map_pages_to_iovec, map_length=%d, iovlen=%d, *iovix=%d, nopages=%d, *pageix=%d, *pageoffset=%d\n", map_length, iovlen, *iovix, nopages, *pageix, *pageoffset));
2450 if (*iovix >= iovlen){
2451 DEBUG_API(printk("map_page_to_iovec: *iovix=%d >= iovlen=%d\n", *iovix, iovlen));
2454 if (*pageix >= nopages){
2455 DEBUG_API(printk("map_page_to_iovec: *pageix=%d >= nopages=%d\n", *pageix, nopages));
2458 iov[*iovix].iov_base = (unsigned char*)page_address(pages[*pageix]) + *pageoffset;
2459 tmplen = PAGE_SIZE - *pageoffset;
2460 if (tmplen < map_length){
2464 tmplen = map_length;
2465 (*pageoffset) += map_length;
2467 DEBUG(printk("mapping %d bytes from page %d (or %d) to iovec %d\n", tmplen, *pageix, *pageix-1, *iovix));
2468 iov[*iovix].iov_len = tmplen;
2469 map_length -= tmplen;
2472 DEBUG(printk("map_page_to_iovec, exit, *iovix=%d\n", *iovix));
2478 static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2481 struct cryptocop_private *dev = filp->private_data;
2482 struct strcop_crypto_op *crp_oper = (struct strcop_crypto_op *)arg;
2483 struct strcop_crypto_op oper = {0};
2485 struct cryptocop_operation *cop = NULL;
2487 struct ioctl_job_cb_ctx *jc = NULL;
2489 struct page **inpages = NULL;
2490 struct page **outpages = NULL;
2494 struct cryptocop_desc descs[5]; /* Max 5 descriptors are needed, there are three transforms that
2495 * can get connected/disconnected on different places in the indata. */
2496 struct cryptocop_desc_cfg dcfgs[5*3];
2499 struct cryptocop_tfrm_cfg ciph_tcfg = {0};
2500 struct cryptocop_tfrm_cfg digest_tcfg = {0};
2501 struct cryptocop_tfrm_cfg csum_tcfg = {0};
2503 unsigned char *digest_result = NULL;
2504 int digest_length = 0;
2506 unsigned char csum_result[CSUM_BLOCK_LENGTH];
2507 struct cryptocop_session *sess;
2517 int cipher_active, digest_active, csum_active;
2518 int end_digest, end_csum;
2519 int digest_done = 0;
2520 int cipher_done = 0;
2523 DEBUG(printk("cryptocop_ioctl_process\n"));
2525 if (!access_ok(VERIFY_WRITE, crp_oper, sizeof(struct strcop_crypto_op))){
2526 DEBUG_API(printk("cryptocop_ioctl_process: !access_ok crp_oper!\n"));
2529 if (copy_from_user(&oper, crp_oper, sizeof(struct strcop_crypto_op))) {
2530 DEBUG_API(printk("cryptocop_ioctl_process: copy_from_user\n"));
2533 DEBUG(print_strcop_crypto_op(&oper));
2535 while (dev && dev->sid != oper.ses_id) dev = dev->next;
2537 DEBUG_API(printk("cryptocop_ioctl_process: session %lld not found\n", oper.ses_id));
2541 /* Check buffers. */
2542 if (((oper.indata + oper.inlen) < oper.indata) || ((oper.cipher_outdata + oper.cipher_outlen) < oper.cipher_outdata)){
2543 DEBUG_API(printk("cryptocop_ioctl_process: user buffers wrapped around, bad user!\n"));
2547 if (!access_ok(VERIFY_WRITE, oper.cipher_outdata, oper.cipher_outlen)){
2548 DEBUG_API(printk("cryptocop_ioctl_process: !access_ok out data!\n"));
2551 if (!access_ok(VERIFY_READ, oper.indata, oper.inlen)){
2552 DEBUG_API(printk("cryptocop_ioctl_process: !access_ok in data!\n"));
2556 cop = kmalloc(sizeof(struct cryptocop_operation), GFP_KERNEL);
2558 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc\n"));
2561 jc = kmalloc(sizeof(struct ioctl_job_cb_ctx), GFP_KERNEL);
2563 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc\n"));
2570 cop->cb = ioctl_process_job_callback;
2571 cop->operation_status = 0;
2572 cop->use_dmalists = 0;
2573 cop->in_interrupt = 0;
2574 cop->fast_callback = 0;
2575 cop->tfrm_op.tfrm_cfg = NULL;
2576 cop->tfrm_op.desc = NULL;
2577 cop->tfrm_op.indata = NULL;
2578 cop->tfrm_op.incount = 0;
2579 cop->tfrm_op.inlen = 0;
2580 cop->tfrm_op.outdata = NULL;
2581 cop->tfrm_op.outcount = 0;
2582 cop->tfrm_op.outlen = 0;
2584 sess = get_session(oper.ses_id);
2586 DEBUG_API(printk("cryptocop_ioctl_process: bad session id.\n"));
2592 if (oper.do_cipher) {
2593 unsigned int cipher_outlen = 0;
2594 struct cryptocop_transform_ctx *tc = get_transform_ctx(sess, CRYPTOCOP_IOCTL_CIPHER_TID);
2596 DEBUG_API(printk("cryptocop_ioctl_process: no cipher transform in session.\n"));
2600 ciph_tcfg.tid = CRYPTOCOP_IOCTL_CIPHER_TID;
2601 ciph_tcfg.inject_ix = 0;
2602 ciph_tcfg.flags = 0;
2603 if ((oper.cipher_start < 0) || (oper.cipher_len <= 0) || (oper.cipher_start > oper.inlen) || ((oper.cipher_start + oper.cipher_len) > oper.inlen)){
2604 DEBUG_API(printk("cryptocop_ioctl_process: bad cipher length\n"));
2609 cblocklen = tc->init.alg == cryptocop_alg_aes ? AES_BLOCK_LENGTH : DES_BLOCK_LENGTH;
2610 if (oper.cipher_len % cblocklen) {
2613 DEBUG_API(printk("cryptocop_ioctl_process: cipher inlength not multiple of block length.\n"));
2616 cipher_outlen = oper.cipher_len;
2617 if (tc->init.cipher_mode == cryptocop_cipher_mode_cbc){
2618 if (oper.cipher_explicit) {
2619 ciph_tcfg.flags |= CRYPTOCOP_EXPLICIT_IV;
2620 memcpy(ciph_tcfg.iv, oper.cipher_iv, cblocklen);
2622 cipher_outlen = oper.cipher_len - cblocklen;
2625 if (oper.cipher_explicit){
2628 DEBUG_API(printk("cryptocop_ioctl_process: explicit_iv when not CBC mode\n"));
2632 if (oper.cipher_outlen != cipher_outlen) {
2635 DEBUG_API(printk("cryptocop_ioctl_process: cipher_outlen incorrect, should be %d not %d.\n", cipher_outlen, oper.cipher_outlen));
2640 ciph_tcfg.flags |= CRYPTOCOP_DECRYPT;
2642 ciph_tcfg.flags |= CRYPTOCOP_ENCRYPT;
2644 ciph_tcfg.next = cop->tfrm_op.tfrm_cfg;
2645 cop->tfrm_op.tfrm_cfg = &ciph_tcfg;
2647 if (oper.do_digest){
2648 struct cryptocop_transform_ctx *tc = get_transform_ctx(sess, CRYPTOCOP_IOCTL_DIGEST_TID);
2650 DEBUG_API(printk("cryptocop_ioctl_process: no digest transform in session.\n"));
2654 digest_length = tc->init.alg == cryptocop_alg_md5 ? 16 : 20;
2655 digest_result = kmalloc(digest_length, GFP_KERNEL);
2656 if (!digest_result) {
2657 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc digest_result\n"));
2661 DEBUG(memset(digest_result, 0xff, digest_length));
2663 digest_tcfg.tid = CRYPTOCOP_IOCTL_DIGEST_TID;
2664 digest_tcfg.inject_ix = 0;
2665 ciph_tcfg.inject_ix += digest_length;
2666 if ((oper.digest_start < 0) || (oper.digest_len <= 0) || (oper.digest_start > oper.inlen) || ((oper.digest_start + oper.digest_len) > oper.inlen)){
2667 DEBUG_API(printk("cryptocop_ioctl_process: bad digest length\n"));
2672 digest_tcfg.next = cop->tfrm_op.tfrm_cfg;
2673 cop->tfrm_op.tfrm_cfg = &digest_tcfg;
2676 csum_tcfg.tid = CRYPTOCOP_IOCTL_CSUM_TID;
2677 csum_tcfg.inject_ix = digest_length;
2678 ciph_tcfg.inject_ix += 2;
2680 if ((oper.csum_start < 0) || (oper.csum_len <= 0) || (oper.csum_start > oper.inlen) || ((oper.csum_start + oper.csum_len) > oper.inlen)){
2681 DEBUG_API(printk("cryptocop_ioctl_process: bad csum length\n"));
2687 csum_tcfg.next = cop->tfrm_op.tfrm_cfg;
2688 cop->tfrm_op.tfrm_cfg = &csum_tcfg;
2691 prev_ix = first_cfg_change_ix(&oper);
2692 if (prev_ix > oper.inlen) {
2693 DEBUG_API(printk("cryptocop_ioctl_process: length mismatch\n"));
2694 nooutpages = noinpages = 0;
2698 DEBUG(printk("cryptocop_ioctl_process: inlen=%d, cipher_outlen=%d\n", oper.inlen, oper.cipher_outlen));
2700 /* Map user pages for in and out data of the operation. */
2701 noinpages = (((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK) + oper.inlen - 1 - prev_ix + ~PAGE_MASK) >> PAGE_SHIFT;
2702 DEBUG(printk("cryptocop_ioctl_process: noinpages=%d\n", noinpages));
2703 inpages = kmalloc(noinpages * sizeof(struct page*), GFP_KERNEL);
2705 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc inpages\n"));
2706 nooutpages = noinpages = 0;
2710 if (oper.do_cipher){
2711 nooutpages = (((unsigned long int)oper.cipher_outdata & ~PAGE_MASK) + oper.cipher_outlen - 1 + ~PAGE_MASK) >> PAGE_SHIFT;
2712 DEBUG(printk("cryptocop_ioctl_process: nooutpages=%d\n", nooutpages));
2713 outpages = kmalloc(nooutpages * sizeof(struct page*), GFP_KERNEL);
2715 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc outpages\n"));
2716 nooutpages = noinpages = 0;
2722 /* Acquire the mm page semaphore. */
2723 down_read(¤t->mm->mmap_sem);
2725 err = get_user_pages(current,
2727 (unsigned long int)(oper.indata + prev_ix),
2729 0, /* read access only for in data */
2735 up_read(¤t->mm->mmap_sem);
2736 nooutpages = noinpages = 0;
2737 DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages indata\n"));
2741 if (oper.do_cipher){
2742 err = get_user_pages(current,
2744 (unsigned long int)oper.cipher_outdata,
2746 1, /* write access for out data */
2750 up_read(¤t->mm->mmap_sem);
2753 DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages outdata\n"));
2758 up_read(¤t->mm->mmap_sem);
2761 /* Add 6 to nooutpages to make room for possibly inserted buffers for storing digest and
2762 * csum output and splits when units are (dis-)connected. */
2763 cop->tfrm_op.indata = kmalloc((noinpages) * sizeof(struct iovec), GFP_KERNEL);
2764 cop->tfrm_op.outdata = kmalloc((6 + nooutpages) * sizeof(struct iovec), GFP_KERNEL);
2765 if (!cop->tfrm_op.indata || !cop->tfrm_op.outdata) {
2766 DEBUG_API(printk("cryptocop_ioctl_process: kmalloc iovecs\n"));
2771 cop->tfrm_op.inlen = oper.inlen - prev_ix;
2772 cop->tfrm_op.outlen = 0;
2773 if (oper.do_cipher) cop->tfrm_op.outlen += oper.cipher_outlen;
2774 if (oper.do_digest) cop->tfrm_op.outlen += digest_length;
2775 if (oper.do_csum) cop->tfrm_op.outlen += 2;
2777 /* Setup the in iovecs. */
2778 cop->tfrm_op.incount = noinpages;
2780 size_t tmplen = cop->tfrm_op.inlen;
2782 cop->tfrm_op.indata[0].iov_len = PAGE_SIZE - ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK);
2783 cop->tfrm_op.indata[0].iov_base = (unsigned char*)page_address(inpages[0]) + ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK);
2784 tmplen -= cop->tfrm_op.indata[0].iov_len;
2785 for (i = 1; i<noinpages; i++){
2786 cop->tfrm_op.indata[i].iov_len = tmplen < PAGE_SIZE ? tmplen : PAGE_SIZE;
2787 cop->tfrm_op.indata[i].iov_base = (unsigned char*)page_address(inpages[i]);
2788 tmplen -= PAGE_SIZE;
2791 cop->tfrm_op.indata[0].iov_len = oper.inlen - prev_ix;
2792 cop->tfrm_op.indata[0].iov_base = (unsigned char*)page_address(inpages[0]) + ((unsigned long int)(oper.indata + prev_ix) & ~PAGE_MASK);
2795 iovlen = nooutpages + 6;
2796 pageoffset = oper.do_cipher ? ((unsigned long int)oper.cipher_outdata & ~PAGE_MASK) : 0;
2798 next_ix = next_cfg_change_ix(&oper, prev_ix);
2799 if (prev_ix == next_ix){
2800 DEBUG_API(printk("cryptocop_ioctl_process: length configuration broken.\n"));
2801 err = -EINVAL; /* This should be impossible barring bugs. */
2804 while (prev_ix != next_ix){
2805 end_digest = end_csum = cipher_active = digest_active = csum_active = 0;
2806 descs[desc_ix].cfg = NULL;
2807 descs[desc_ix].length = next_ix - prev_ix;
2809 if (oper.do_cipher && (oper.cipher_start < next_ix) && (prev_ix < (oper.cipher_start + oper.cipher_len))) {
2810 dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_CIPHER_TID;
2811 dcfgs[dcfg_ix].src = cryptocop_source_dma;
2814 if (next_ix == (oper.cipher_start + oper.cipher_len)){
2816 dcfgs[dcfg_ix].last = 1;
2818 dcfgs[dcfg_ix].last = 0;
2820 dcfgs[dcfg_ix].next = descs[desc_ix].cfg;
2821 descs[desc_ix].cfg = &dcfgs[dcfg_ix];
2824 if (oper.do_digest && (oper.digest_start < next_ix) && (prev_ix < (oper.digest_start + oper.digest_len))) {
2826 dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_DIGEST_TID;
2827 dcfgs[dcfg_ix].src = cryptocop_source_dma;
2828 if (next_ix == (oper.digest_start + oper.digest_len)){
2829 assert(!digest_done);
2831 dcfgs[dcfg_ix].last = 1;
2833 dcfgs[dcfg_ix].last = 0;
2835 dcfgs[dcfg_ix].next = descs[desc_ix].cfg;
2836 descs[desc_ix].cfg = &dcfgs[dcfg_ix];
2839 if (oper.do_csum && (oper.csum_start < next_ix) && (prev_ix < (oper.csum_start + oper.csum_len))){
2841 dcfgs[dcfg_ix].tid = CRYPTOCOP_IOCTL_CSUM_TID;
2842 dcfgs[dcfg_ix].src = cryptocop_source_dma;
2843 if (next_ix == (oper.csum_start + oper.csum_len)){
2845 dcfgs[dcfg_ix].last = 1;
2847 dcfgs[dcfg_ix].last = 0;
2849 dcfgs[dcfg_ix].next = descs[desc_ix].cfg;
2850 descs[desc_ix].cfg = &dcfgs[dcfg_ix];
2853 if (!descs[desc_ix].cfg){
2854 DEBUG_API(printk("cryptocop_ioctl_process: data segment %d (%d to %d) had no active transforms\n", desc_ix, prev_ix, next_ix));
2858 descs[desc_ix].next = &(descs[desc_ix]) + 1;
2861 next_ix = next_cfg_change_ix(&oper, prev_ix);
2864 descs[desc_ix-1].next = NULL;
2866 descs[0].next = NULL;
2868 if (oper.do_digest) {
2869 DEBUG(printk("cryptocop_ioctl_process: mapping %d byte digest output to iovec %d\n", digest_length, iovix));
2870 /* Add outdata iovec, length == <length of type of digest> */
2871 cop->tfrm_op.outdata[iovix].iov_base = digest_result;
2872 cop->tfrm_op.outdata[iovix].iov_len = digest_length;
2876 /* Add outdata iovec, length == 2, the length of csum. */
2877 DEBUG(printk("cryptocop_ioctl_process: mapping 2 byte csum output to iovec %d\n", iovix));
2878 /* Add outdata iovec, length == <length of type of digest> */
2879 cop->tfrm_op.outdata[iovix].iov_base = csum_result;
2880 cop->tfrm_op.outdata[iovix].iov_len = 2;
2883 if (oper.do_cipher) {
2884 if (!map_pages_to_iovec(cop->tfrm_op.outdata, iovlen, &iovix, outpages, nooutpages, &pageix, &pageoffset, oper.cipher_outlen)){
2885 DEBUG_API(printk("cryptocop_ioctl_process: failed to map pages to iovec.\n"));
2886 err = -ENOSYS; /* This should be impossible barring bugs. */
2890 DEBUG(printk("cryptocop_ioctl_process: setting cop->tfrm_op.outcount %d\n", iovix));
2891 cop->tfrm_op.outcount = iovix;
2892 assert(iovix <= (nooutpages + 6));
2894 cop->sid = oper.ses_id;
2895 cop->tfrm_op.desc = &descs[0];
2897 DEBUG(printk("cryptocop_ioctl_process: inserting job, cb_data=0x%p\n", cop->cb_data));
2899 if ((err = cryptocop_job_queue_insert_user_job(cop)) != 0) {
2900 DEBUG_API(printk("cryptocop_ioctl_process: insert job %d\n", err));
2905 DEBUG(printk("cryptocop_ioctl_process: begin wait for result\n"));
2907 wait_event(cryptocop_ioc_process_wq, (jc->processed != 0));
2908 DEBUG(printk("cryptocop_ioctl_process: end wait for result\n"));
2909 if (!jc->processed){
2910 printk(KERN_WARNING "cryptocop_ioctl_process: job not processed at completion\n");
2915 /* Job process done. Cipher output should already be correct in job so no post processing of outdata. */
2916 DEBUG(printk("cryptocop_ioctl_process: operation_status = %d\n", cop->operation_status));
2917 if (cop->operation_status == 0){
2918 if (oper.do_digest){
2919 DEBUG(printk("cryptocop_ioctl_process: copy %d bytes digest to user\n", digest_length));
2920 err = copy_to_user((unsigned char*)crp_oper + offsetof(struct strcop_crypto_op, digest), digest_result, digest_length);
2922 DEBUG_API(printk("cryptocop_ioctl_process: copy_to_user, digest length %d, err %d\n", digest_length, err));
2928 DEBUG(printk("cryptocop_ioctl_process: copy 2 bytes checksum to user\n"));
2929 err = copy_to_user((unsigned char*)crp_oper + offsetof(struct strcop_crypto_op, csum), csum_result, 2);
2931 DEBUG_API(printk("cryptocop_ioctl_process: copy_to_user, csum, err %d\n", err));
2938 DEBUG(printk("cryptocop_ioctl_process: returning err = operation_status = %d\n", cop->operation_status));
2939 err = cop->operation_status;
2943 /* Release page caches. */
2944 for (i = 0; i < noinpages; i++){
2945 put_page(inpages[i]);
2947 for (i = 0; i < nooutpages; i++){
2949 /* Mark output pages dirty. */
2950 spdl_err = set_page_dirty_lock(outpages[i]);
2951 DEBUG(if (spdl_err < 0)printk("cryptocop_ioctl_process: set_page_dirty_lock returned %d\n", spdl_err));
2953 for (i = 0; i < nooutpages; i++){
2954 put_page(outpages[i]);
2957 kfree(digest_result);
2961 kfree(cop->tfrm_op.indata);
2962 kfree(cop->tfrm_op.outdata);
2967 DEBUG(print_lock_status());
2973 static int cryptocop_ioctl_create_session(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2975 cryptocop_session_id sid;
2977 struct cryptocop_private *dev;
2978 struct strcop_session_op *sess_op = (struct strcop_session_op *)arg;
2979 struct strcop_session_op sop;
2980 struct cryptocop_transform_init *tis = NULL;
2981 struct cryptocop_transform_init ti_cipher = {0};
2982 struct cryptocop_transform_init ti_digest = {0};
2983 struct cryptocop_transform_init ti_csum = {0};
2985 if (!access_ok(VERIFY_WRITE, sess_op, sizeof(struct strcop_session_op)))
2987 err = copy_from_user(&sop, sess_op, sizeof(struct strcop_session_op));
2988 if (err) return -EFAULT;
2989 if (sop.cipher != cryptocop_cipher_none) {
2990 if (!access_ok(VERIFY_READ, sop.key, sop.keylen)) return -EFAULT;
2992 DEBUG(printk("cryptocop_ioctl_create_session, sess_op:\n"));
2994 DEBUG(printk("\tcipher:%d\n"
2995 "\tcipher_mode:%d\n"
3003 if (sop.cipher != cryptocop_cipher_none){
3004 /* Init the cipher. */
3005 switch (sop.cipher){
3006 case cryptocop_cipher_des:
3007 ti_cipher.alg = cryptocop_alg_des;
3009 case cryptocop_cipher_3des:
3010 ti_cipher.alg = cryptocop_alg_3des;
3012 case cryptocop_cipher_aes:
3013 ti_cipher.alg = cryptocop_alg_aes;
3016 DEBUG_API(printk("create session, bad cipher algorithm %d\n", sop.cipher));
3019 DEBUG(printk("setting cipher transform %d\n", ti_cipher.alg));
3020 copy_from_user(ti_cipher.key, sop.key, sop.keylen/8);
3021 ti_cipher.keylen = sop.keylen;
3023 case cryptocop_cipher_mode_cbc:
3024 case cryptocop_cipher_mode_ecb:
3025 ti_cipher.cipher_mode = sop.cmode;
3028 DEBUG_API(printk("create session, bad cipher mode %d\n", sop.cmode));
3031 DEBUG(printk("cryptocop_ioctl_create_session: setting CBC mode %d\n", ti_cipher.cipher_mode));
3032 switch (sop.des3_mode){
3033 case cryptocop_3des_eee:
3034 case cryptocop_3des_eed:
3035 case cryptocop_3des_ede:
3036 case cryptocop_3des_edd:
3037 case cryptocop_3des_dee:
3038 case cryptocop_3des_ded:
3039 case cryptocop_3des_dde:
3040 case cryptocop_3des_ddd:
3041 ti_cipher.tdes_mode = sop.des3_mode;
3044 DEBUG_API(printk("create session, bad 3DES mode %d\n", sop.des3_mode));
3047 ti_cipher.tid = CRYPTOCOP_IOCTL_CIPHER_TID;
3048 ti_cipher.next = tis;
3050 } /* if (sop.cipher != cryptocop_cipher_none) */
3051 if (sop.digest != cryptocop_digest_none){
3052 DEBUG(printk("setting digest transform\n"));
3053 switch (sop.digest){
3054 case cryptocop_digest_md5:
3055 ti_digest.alg = cryptocop_alg_md5;
3057 case cryptocop_digest_sha1:
3058 ti_digest.alg = cryptocop_alg_sha1;
3061 DEBUG_API(printk("create session, bad digest algorithm %d\n", sop.digest));
3064 ti_digest.tid = CRYPTOCOP_IOCTL_DIGEST_TID;
3065 ti_digest.next = tis;
3067 } /* if (sop.digest != cryptocop_digest_none) */
3068 if (sop.csum != cryptocop_csum_none){
3069 DEBUG(printk("setting csum transform\n"));
3071 case cryptocop_csum_le:
3072 case cryptocop_csum_be:
3073 ti_csum.csum_mode = sop.csum;
3076 DEBUG_API(printk("create session, bad checksum algorithm %d\n", sop.csum));
3079 ti_csum.alg = cryptocop_alg_csum;
3080 ti_csum.tid = CRYPTOCOP_IOCTL_CSUM_TID;
3083 } /* (sop.csum != cryptocop_csum_none) */
3084 dev = kmalloc(sizeof(struct cryptocop_private), GFP_KERNEL);
3086 DEBUG_API(printk("create session, alloc dev\n"));
3090 err = cryptocop_new_session(&sid, tis, GFP_KERNEL);
3091 DEBUG({ if (err) printk("create session, cryptocop_new_session %d\n", err);});
3097 sess_op->ses_id = sid;
3099 dev->next = filp->private_data;
3100 filp->private_data = dev;
3105 static int cryptocop_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
3108 if (_IOC_TYPE(cmd) != ETRAXCRYPTOCOP_IOCTYPE) {
3109 DEBUG_API(printk("cryptocop_ioctl: wrong type\n"));
3112 if (_IOC_NR(cmd) > CRYPTOCOP_IO_MAXNR){
3115 /* Access check of the argument. Some commands, e.g. create session and process op,
3116 needs additional checks. Those are handled in the command handling functions. */
3117 if (_IOC_DIR(cmd) & _IOC_READ)
3118 err = !access_ok(VERIFY_WRITE, (void *)arg, _IOC_SIZE(cmd));
3119 else if (_IOC_DIR(cmd) & _IOC_WRITE)
3120 err = !access_ok(VERIFY_READ, (void *)arg, _IOC_SIZE(cmd));
3121 if (err) return -EFAULT;
3124 case CRYPTOCOP_IO_CREATE_SESSION:
3125 return cryptocop_ioctl_create_session(inode, filp, cmd, arg);
3126 case CRYPTOCOP_IO_CLOSE_SESSION:
3127 return cryptocop_ioctl_close_session(inode, filp, cmd, arg);
3128 case CRYPTOCOP_IO_PROCESS_OP:
3129 return cryptocop_ioctl_process(inode, filp, cmd, arg);
3131 DEBUG_API(printk("cryptocop_ioctl: unknown command\n"));
3139 static void print_dma_descriptors(struct cryptocop_int_operation *iop)
3141 struct cryptocop_dma_desc *cdesc_out = iop->cdesc_out;
3142 struct cryptocop_dma_desc *cdesc_in = iop->cdesc_in;
3145 printk("print_dma_descriptors start\n");
3148 printk("\tsid: 0x%lld\n", iop->sid);
3150 printk("\tcdesc_out: 0x%p\n", iop->cdesc_out);
3151 printk("\tcdesc_in: 0x%p\n", iop->cdesc_in);
3152 printk("\tddesc_out: 0x%p\n", iop->ddesc_out);
3153 printk("\tddesc_in: 0x%p\n", iop->ddesc_in);
3155 printk("\niop->ctx_out: 0x%p phys: 0x%p\n", &iop->ctx_out, (char*)virt_to_phys(&iop->ctx_out));
3156 printk("\tnext: 0x%p\n"
3157 "\tsaved_data: 0x%p\n"
3158 "\tsaved_data_buf: 0x%p\n",
3160 iop->ctx_out.saved_data,
3161 iop->ctx_out.saved_data_buf);
3163 printk("\niop->ctx_in: 0x%p phys: 0x%p\n", &iop->ctx_in, (char*)virt_to_phys(&iop->ctx_in));
3164 printk("\tnext: 0x%p\n"
3165 "\tsaved_data: 0x%p\n"
3166 "\tsaved_data_buf: 0x%p\n",
3168 iop->ctx_in.saved_data,
3169 iop->ctx_in.saved_data_buf);
3174 printk("cdesc_out %d, desc=0x%p\n", i, cdesc_out->dma_descr);
3175 printk("\n\tvirt_to_phys(desc): 0x%p\n", (char*)virt_to_phys(cdesc_out->dma_descr));
3176 td = cdesc_out->dma_descr;
3177 printk("\n\tbuf: 0x%p\n"
3196 cdesc_out = cdesc_out->next;
3202 printk("cdesc_in %d, desc=0x%p\n", i, cdesc_in->dma_descr);
3203 printk("\n\tvirt_to_phys(desc): 0x%p\n", (char*)virt_to_phys(cdesc_in->dma_descr));
3204 td = cdesc_in->dma_descr;
3205 printk("\n\tbuf: 0x%p\n"
3224 cdesc_in = cdesc_in->next;
3228 printk("print_dma_descriptors end\n");
3232 static void print_strcop_crypto_op(struct strcop_crypto_op *cop)
3234 printk("print_strcop_crypto_op, 0x%p\n", cop);
3237 printk("indata=0x%p\n"
3241 "cipher_explicit=%d\n"
3250 cop->cipher_explicit,
3253 cop->cipher_outdata,
3254 cop->cipher_outlen);
3256 printk("do_digest=%d\n"
3263 printk("do_csum=%d\n"
3271 static void print_cryptocop_operation(struct cryptocop_operation *cop)
3273 struct cryptocop_desc *d;
3274 struct cryptocop_tfrm_cfg *tc;
3275 struct cryptocop_desc_cfg *dc;
3278 printk("print_cryptocop_operation, cop=0x%p\n\n", cop);
3279 printk("sid: %lld\n", cop->sid);
3280 printk("operation_status=%d\n"
3283 "fast_callback=%d\n",
3284 cop->operation_status,
3287 cop->fast_callback);
3289 if (cop->use_dmalists){
3290 print_user_dma_lists(&cop->list_op);
3292 printk("cop->tfrm_op\n"
3301 cop->tfrm_op.tfrm_cfg,
3303 cop->tfrm_op.indata,
3304 cop->tfrm_op.incount,
3306 cop->tfrm_op.outdata,
3307 cop->tfrm_op.outcount,
3308 cop->tfrm_op.outlen);
3310 tc = cop->tfrm_op.tfrm_cfg;
3312 printk("tfrm_cfg, 0x%p\n"
3324 d = cop->tfrm_op.desc;
3326 printk("\n======================desc, 0x%p\n"
3336 printk("=========desc_cfg, 0x%p\n"
3350 printk("\n====iniov\n");
3351 for (i = 0; i < cop->tfrm_op.incount; i++){
3352 printk("indata[%d]\n"
3356 cop->tfrm_op.indata[i].iov_base,
3357 cop->tfrm_op.indata[i].iov_len);
3359 printk("\n====outiov\n");
3360 for (i = 0; i < cop->tfrm_op.outcount; i++){
3361 printk("outdata[%d]\n"
3365 cop->tfrm_op.outdata[i].iov_base,
3366 cop->tfrm_op.outdata[i].iov_len);
3369 printk("------------end print_cryptocop_operation\n");
3373 static void print_user_dma_lists(struct cryptocop_dma_list_operation *dma_op)
3378 printk("print_user_dma_lists, dma_op=0x%p\n", dma_op);
3380 printk("out_data_buf = 0x%p, phys_to_virt(out_data_buf) = 0x%p\n", dma_op->out_data_buf, phys_to_virt((unsigned long int)dma_op->out_data_buf));
3381 printk("in_data_buf = 0x%p, phys_to_virt(in_data_buf) = 0x%p\n", dma_op->in_data_buf, phys_to_virt((unsigned long int)dma_op->in_data_buf));
3383 printk("##############outlist\n");
3384 dd = phys_to_virt((unsigned long int)dma_op->outlist);
3386 while (dd != NULL) {
3387 printk("#%d phys_to_virt(desc) 0x%p\n", i, dd);
3388 printk("\n\tbuf: 0x%p\n"
3410 dd = phys_to_virt((unsigned long int)dd->next);
3414 printk("##############inlist\n");
3415 dd = phys_to_virt((unsigned long int)dma_op->inlist);
3417 while (dd != NULL) {
3418 printk("#%d phys_to_virt(desc) 0x%p\n", i, dd);
3419 printk("\n\tbuf: 0x%p\n"
3441 dd = phys_to_virt((unsigned long int)dd->next);
3447 static void print_lock_status(void)
3449 printk("**********************print_lock_status\n");
3450 printk("cryptocop_completed_jobs_lock %d\n", spin_is_locked(&cryptocop_completed_jobs_lock));
3451 printk("cryptocop_job_queue_lock %d\n", spin_is_locked(&cryptocop_job_queue_lock));
3452 printk("descr_pool_lock %d\n", spin_is_locked(&descr_pool_lock));
3453 printk("cryptocop_sessions_lock %d\n", spin_is_locked(cryptocop_sessions_lock));
3454 printk("running_job_lock %d\n", spin_is_locked(running_job_lock));
3455 printk("cryptocop_process_lock %d\n", spin_is_locked(cryptocop_process_lock));
3460 static const char cryptocop_name[] = "ETRAX FS stream co-processor";
3462 static int init_stream_coprocessor(void)
3466 static int initialized = 0;
3473 printk("ETRAX FS stream co-processor driver v0.01, (c) 2003 Axis Communications AB\n");
3475 err = register_chrdev(CRYPTOCOP_MAJOR, cryptocop_name, &cryptocop_fops);
3477 printk(KERN_ERR "stream co-processor: could not get major number.\n");
3481 err = init_cryptocop();
3483 (void)unregister_chrdev(CRYPTOCOP_MAJOR, cryptocop_name);
3486 err = cryptocop_job_queue_init();
3488 release_cryptocop();
3489 (void)unregister_chrdev(CRYPTOCOP_MAJOR, cryptocop_name);
3492 /* Init the descriptor pool. */
3493 for (i = 0; i < CRYPTOCOP_DESCRIPTOR_POOL_SIZE - 1; i++) {
3494 descr_pool[i].from_pool = 1;
3495 descr_pool[i].next = &descr_pool[i + 1];
3497 descr_pool[i].from_pool = 1;
3498 descr_pool[i].next = NULL;
3499 descr_pool_free_list = &descr_pool[0];
3500 descr_pool_no_free = CRYPTOCOP_DESCRIPTOR_POOL_SIZE;
3502 spin_lock_init(&cryptocop_completed_jobs_lock);
3503 spin_lock_init(&cryptocop_job_queue_lock);
3504 spin_lock_init(&descr_pool_lock);
3505 spin_lock_init(&cryptocop_sessions_lock);
3506 spin_lock_init(&running_job_lock);
3507 spin_lock_init(&cryptocop_process_lock);
3509 cryptocop_sessions = NULL;
3512 cryptocop_running_job = NULL;
3514 printk("stream co-processor: init done.\n");
3518 static void __exit exit_stream_coprocessor(void)
3520 release_cryptocop();
3521 cryptocop_job_queue_close();
3524 module_init(init_stream_coprocessor);
3525 module_exit(exit_stream_coprocessor);