2 * linux/drivers/s390/crypto/ap_bus.c
4 * Copyright (C) 2006 IBM Corporation
5 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Ralph Wuerthner <rwuerthn@de.ibm.com>
9 * Adjunct processor bus.
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/delay.h>
29 #include <linux/err.h>
30 #include <linux/interrupt.h>
31 #include <linux/workqueue.h>
32 #include <linux/notifier.h>
33 #include <linux/kthread.h>
34 #include <linux/mutex.h>
35 #include <asm/s390_rdev.h>
36 #include <asm/reset.h>
40 /* Some prototypes. */
41 static void ap_scan_bus(struct work_struct *);
42 static void ap_poll_all(unsigned long);
43 static void ap_poll_timeout(unsigned long);
44 static int ap_poll_thread_start(void);
45 static void ap_poll_thread_stop(void);
46 static void ap_request_timeout(unsigned long);
51 MODULE_AUTHOR("IBM Corporation");
52 MODULE_DESCRIPTION("Adjunct Processor Bus driver, "
53 "Copyright 2006 IBM Corporation");
54 MODULE_LICENSE("GPL");
59 int ap_domain_index = -1; /* Adjunct Processor Domain Index */
60 module_param_named(domain, ap_domain_index, int, 0000);
61 MODULE_PARM_DESC(domain, "domain index for ap devices");
62 EXPORT_SYMBOL(ap_domain_index);
64 static int ap_thread_flag = 1;
65 module_param_named(poll_thread, ap_thread_flag, int, 0000);
66 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 1 (on).");
68 static struct device *ap_root_device = NULL;
69 static DEFINE_SPINLOCK(ap_device_lock);
70 static LIST_HEAD(ap_device_list);
73 * Workqueue & timer for bus rescan.
75 static struct workqueue_struct *ap_work_queue;
76 static struct timer_list ap_config_timer;
77 static int ap_config_time = AP_CONFIG_TIME;
78 static DECLARE_WORK(ap_config_work, ap_scan_bus);
81 * Tasklet & timer for AP request polling.
83 static struct timer_list ap_poll_timer = TIMER_INITIALIZER(ap_poll_timeout,0,0);
84 static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0);
85 static atomic_t ap_poll_requests = ATOMIC_INIT(0);
86 static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait);
87 static struct task_struct *ap_poll_kthread = NULL;
88 static DEFINE_MUTEX(ap_poll_thread_mutex);
91 * Test if ap instructions are available.
93 * Returns 0 if the ap instructions are installed.
95 static inline int ap_instructions_available(void)
97 register unsigned long reg0 asm ("0") = AP_MKQID(0,0);
98 register unsigned long reg1 asm ("1") = -ENODEV;
99 register unsigned long reg2 asm ("2") = 0UL;
102 " .long 0xb2af0000\n" /* PQAP(TAPQ) */
106 : "+d" (reg0), "+d" (reg1), "+d" (reg2) : : "cc" );
111 * Test adjunct processor queue.
112 * @qid: the ap queue number
113 * @queue_depth: pointer to queue depth value
114 * @device_type: pointer to device type value
116 * Returns ap queue status structure.
118 static inline struct ap_queue_status
119 ap_test_queue(ap_qid_t qid, int *queue_depth, int *device_type)
121 register unsigned long reg0 asm ("0") = qid;
122 register struct ap_queue_status reg1 asm ("1");
123 register unsigned long reg2 asm ("2") = 0UL;
125 asm volatile(".long 0xb2af0000" /* PQAP(TAPQ) */
126 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
127 *device_type = (int) (reg2 >> 24);
128 *queue_depth = (int) (reg2 & 0xff);
133 * Reset adjunct processor queue.
134 * @qid: the ap queue number
136 * Returns ap queue status structure.
138 static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid)
140 register unsigned long reg0 asm ("0") = qid | 0x01000000UL;
141 register struct ap_queue_status reg1 asm ("1");
142 register unsigned long reg2 asm ("2") = 0UL;
145 ".long 0xb2af0000" /* PQAP(RAPQ) */
146 : "+d" (reg0), "=d" (reg1), "+d" (reg2) : : "cc");
151 * Send message to adjunct processor queue.
152 * @qid: the ap queue number
153 * @psmid: the program supplied message identifier
154 * @msg: the message text
155 * @length: the message length
157 * Returns ap queue status structure.
159 * Condition code 1 on NQAP can't happen because the L bit is 1.
161 * Condition code 2 on NQAP also means the send is incomplete,
162 * because a segment boundary was reached. The NQAP is repeated.
164 static inline struct ap_queue_status
165 __ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
167 typedef struct { char _[length]; } msgblock;
168 register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
169 register struct ap_queue_status reg1 asm ("1");
170 register unsigned long reg2 asm ("2") = (unsigned long) msg;
171 register unsigned long reg3 asm ("3") = (unsigned long) length;
172 register unsigned long reg4 asm ("4") = (unsigned int) (psmid >> 32);
173 register unsigned long reg5 asm ("5") = (unsigned int) psmid;
176 "0: .long 0xb2ad0042\n" /* DQAP */
178 : "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
179 : "d" (reg4), "d" (reg5), "m" (*(msgblock *) msg)
184 int ap_send(ap_qid_t qid, unsigned long long psmid, void *msg, size_t length)
186 struct ap_queue_status status;
188 status = __ap_send(qid, psmid, msg, length);
189 switch (status.response_code) {
190 case AP_RESPONSE_NORMAL:
192 case AP_RESPONSE_Q_FULL:
193 case AP_RESPONSE_RESET_IN_PROGRESS:
195 default: /* Device is gone. */
199 EXPORT_SYMBOL(ap_send);
202 * Receive message from adjunct processor queue.
203 * @qid: the ap queue number
204 * @psmid: pointer to program supplied message identifier
205 * @msg: the message text
206 * @length: the message length
208 * Returns ap queue status structure.
210 * Condition code 1 on DQAP means the receive has taken place
211 * but only partially. The response is incomplete, hence the
214 * Condition code 2 on DQAP also means the receive is incomplete,
215 * this time because a segment boundary was reached. Again, the
218 * Note that gpr2 is used by the DQAP instruction to keep track of
219 * any 'residual' length, in case the instruction gets interrupted.
220 * Hence it gets zeroed before the instruction.
222 static inline struct ap_queue_status
223 __ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
225 typedef struct { char _[length]; } msgblock;
226 register unsigned long reg0 asm("0") = qid | 0x80000000UL;
227 register struct ap_queue_status reg1 asm ("1");
228 register unsigned long reg2 asm("2") = 0UL;
229 register unsigned long reg4 asm("4") = (unsigned long) msg;
230 register unsigned long reg5 asm("5") = (unsigned long) length;
231 register unsigned long reg6 asm("6") = 0UL;
232 register unsigned long reg7 asm("7") = 0UL;
236 "0: .long 0xb2ae0064\n"
238 : "+d" (reg0), "=d" (reg1), "+d" (reg2),
239 "+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
240 "=m" (*(msgblock *) msg) : : "cc" );
241 *psmid = (((unsigned long long) reg6) << 32) + reg7;
245 int ap_recv(ap_qid_t qid, unsigned long long *psmid, void *msg, size_t length)
247 struct ap_queue_status status;
249 status = __ap_recv(qid, psmid, msg, length);
250 switch (status.response_code) {
251 case AP_RESPONSE_NORMAL:
253 case AP_RESPONSE_NO_PENDING_REPLY:
254 if (status.queue_empty)
257 case AP_RESPONSE_RESET_IN_PROGRESS:
263 EXPORT_SYMBOL(ap_recv);
266 * Check if an AP queue is available. The test is repeated for
267 * AP_MAX_RESET times.
268 * @qid: the ap queue number
269 * @queue_depth: pointer to queue depth value
270 * @device_type: pointer to device type value
272 static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type)
274 struct ap_queue_status status;
275 int t_depth, t_device_type, rc, i;
278 for (i = 0; i < AP_MAX_RESET; i++) {
279 status = ap_test_queue(qid, &t_depth, &t_device_type);
280 switch (status.response_code) {
281 case AP_RESPONSE_NORMAL:
282 *queue_depth = t_depth + 1;
283 *device_type = t_device_type;
286 case AP_RESPONSE_Q_NOT_AVAIL:
289 case AP_RESPONSE_RESET_IN_PROGRESS:
291 case AP_RESPONSE_DECONFIGURED:
294 case AP_RESPONSE_CHECKSTOPPED:
297 case AP_RESPONSE_BUSY:
304 if (i < AP_MAX_RESET - 1)
311 * Reset an AP queue and wait for it to become available again.
312 * @qid: the ap queue number
314 static int ap_init_queue(ap_qid_t qid)
316 struct ap_queue_status status;
320 status = ap_reset_queue(qid);
321 for (i = 0; i < AP_MAX_RESET; i++) {
322 switch (status.response_code) {
323 case AP_RESPONSE_NORMAL:
324 if (status.queue_empty)
327 case AP_RESPONSE_Q_NOT_AVAIL:
328 case AP_RESPONSE_DECONFIGURED:
329 case AP_RESPONSE_CHECKSTOPPED:
330 i = AP_MAX_RESET; /* return with -ENODEV */
332 case AP_RESPONSE_RESET_IN_PROGRESS:
334 case AP_RESPONSE_BUSY:
338 if (rc != -ENODEV && rc != -EBUSY)
340 if (i < AP_MAX_RESET - 1) {
342 status = ap_test_queue(qid, &dummy, &dummy);
349 * Arm request timeout if a AP device was idle and a new request is submitted.
351 static void ap_increase_queue_count(struct ap_device *ap_dev)
353 int timeout = ap_dev->drv->request_timeout;
355 ap_dev->queue_count++;
356 if (ap_dev->queue_count == 1) {
357 mod_timer(&ap_dev->timeout, jiffies + timeout);
358 ap_dev->reset = AP_RESET_ARMED;
363 * AP device is still alive, re-schedule request timeout if there are still
366 static void ap_decrease_queue_count(struct ap_device *ap_dev)
368 int timeout = ap_dev->drv->request_timeout;
370 ap_dev->queue_count--;
371 if (ap_dev->queue_count > 0)
372 mod_timer(&ap_dev->timeout, jiffies + timeout);
375 * The timeout timer should to be disabled now - since
376 * del_timer_sync() is very expensive, we just tell via the
377 * reset flag to ignore the pending timeout timer.
379 ap_dev->reset = AP_RESET_IGNORE;
383 * AP device related attributes.
385 static ssize_t ap_hwtype_show(struct device *dev,
386 struct device_attribute *attr, char *buf)
388 struct ap_device *ap_dev = to_ap_dev(dev);
389 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type);
391 static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL);
393 static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr,
396 struct ap_device *ap_dev = to_ap_dev(dev);
397 return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth);
399 static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL);
401 static ssize_t ap_request_count_show(struct device *dev,
402 struct device_attribute *attr,
405 struct ap_device *ap_dev = to_ap_dev(dev);
408 spin_lock_bh(&ap_dev->lock);
409 rc = snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->total_request_count);
410 spin_unlock_bh(&ap_dev->lock);
414 static DEVICE_ATTR(request_count, 0444, ap_request_count_show, NULL);
416 static ssize_t ap_modalias_show(struct device *dev,
417 struct device_attribute *attr, char *buf)
419 return sprintf(buf, "ap:t%02X", to_ap_dev(dev)->device_type);
422 static DEVICE_ATTR(modalias, 0444, ap_modalias_show, NULL);
424 static struct attribute *ap_dev_attrs[] = {
425 &dev_attr_hwtype.attr,
426 &dev_attr_depth.attr,
427 &dev_attr_request_count.attr,
428 &dev_attr_modalias.attr,
431 static struct attribute_group ap_dev_attr_group = {
432 .attrs = ap_dev_attrs
436 * AP bus driver registration/unregistration.
438 static int ap_bus_match(struct device *dev, struct device_driver *drv)
440 struct ap_device *ap_dev = to_ap_dev(dev);
441 struct ap_driver *ap_drv = to_ap_drv(drv);
442 struct ap_device_id *id;
445 * Compare device type of the device with the list of
446 * supported types of the device_driver.
448 for (id = ap_drv->ids; id->match_flags; id++) {
449 if ((id->match_flags & AP_DEVICE_ID_MATCH_DEVICE_TYPE) &&
450 (id->dev_type != ap_dev->device_type))
458 * uevent function for AP devices. It sets up a single environment
459 * variable DEV_TYPE which contains the hardware device type.
461 static int ap_uevent (struct device *dev, char **envp, int num_envp,
462 char *buffer, int buffer_size)
464 struct ap_device *ap_dev = to_ap_dev(dev);
465 int retval = 0, length = 0, i = 0;
470 /* Set up DEV_TYPE environment variable. */
471 retval = add_uevent_var(envp, num_envp, &i,
472 buffer, buffer_size, &length,
473 "DEV_TYPE=%04X", ap_dev->device_type);
478 retval = add_uevent_var(envp, num_envp, &i,
479 buffer, buffer_size, &length,
480 "MODALIAS=ap:t%02X", ap_dev->device_type);
486 static struct bus_type ap_bus_type = {
488 .match = &ap_bus_match,
489 .uevent = &ap_uevent,
492 static int ap_device_probe(struct device *dev)
494 struct ap_device *ap_dev = to_ap_dev(dev);
495 struct ap_driver *ap_drv = to_ap_drv(dev->driver);
498 ap_dev->drv = ap_drv;
499 spin_lock_bh(&ap_device_lock);
500 list_add(&ap_dev->list, &ap_device_list);
501 spin_unlock_bh(&ap_device_lock);
502 rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
507 * Flush all requests from the request/pending queue of an AP device.
508 * @ap_dev: pointer to the AP device.
510 static void __ap_flush_queue(struct ap_device *ap_dev)
512 struct ap_message *ap_msg, *next;
514 list_for_each_entry_safe(ap_msg, next, &ap_dev->pendingq, list) {
515 list_del_init(&ap_msg->list);
516 ap_dev->pendingq_count--;
517 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
519 list_for_each_entry_safe(ap_msg, next, &ap_dev->requestq, list) {
520 list_del_init(&ap_msg->list);
521 ap_dev->requestq_count--;
522 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
526 void ap_flush_queue(struct ap_device *ap_dev)
528 spin_lock_bh(&ap_dev->lock);
529 __ap_flush_queue(ap_dev);
530 spin_unlock_bh(&ap_dev->lock);
532 EXPORT_SYMBOL(ap_flush_queue);
534 static int ap_device_remove(struct device *dev)
536 struct ap_device *ap_dev = to_ap_dev(dev);
537 struct ap_driver *ap_drv = ap_dev->drv;
539 ap_flush_queue(ap_dev);
540 del_timer_sync(&ap_dev->timeout);
542 ap_drv->remove(ap_dev);
543 spin_lock_bh(&ap_device_lock);
544 list_del_init(&ap_dev->list);
545 spin_unlock_bh(&ap_device_lock);
546 spin_lock_bh(&ap_dev->lock);
547 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
548 spin_unlock_bh(&ap_dev->lock);
552 int ap_driver_register(struct ap_driver *ap_drv, struct module *owner,
555 struct device_driver *drv = &ap_drv->driver;
557 drv->bus = &ap_bus_type;
558 drv->probe = ap_device_probe;
559 drv->remove = ap_device_remove;
562 return driver_register(drv);
564 EXPORT_SYMBOL(ap_driver_register);
566 void ap_driver_unregister(struct ap_driver *ap_drv)
568 driver_unregister(&ap_drv->driver);
570 EXPORT_SYMBOL(ap_driver_unregister);
575 static ssize_t ap_domain_show(struct bus_type *bus, char *buf)
577 return snprintf(buf, PAGE_SIZE, "%d\n", ap_domain_index);
580 static BUS_ATTR(ap_domain, 0444, ap_domain_show, NULL);
582 static ssize_t ap_config_time_show(struct bus_type *bus, char *buf)
584 return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time);
587 static ssize_t ap_config_time_store(struct bus_type *bus,
588 const char *buf, size_t count)
592 if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
594 ap_config_time = time;
595 if (!timer_pending(&ap_config_timer) ||
596 !mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ)) {
597 ap_config_timer.expires = jiffies + ap_config_time * HZ;
598 add_timer(&ap_config_timer);
603 static BUS_ATTR(config_time, 0644, ap_config_time_show, ap_config_time_store);
605 static ssize_t ap_poll_thread_show(struct bus_type *bus, char *buf)
607 return snprintf(buf, PAGE_SIZE, "%d\n", ap_poll_kthread ? 1 : 0);
610 static ssize_t ap_poll_thread_store(struct bus_type *bus,
611 const char *buf, size_t count)
615 if (sscanf(buf, "%d\n", &flag) != 1)
618 rc = ap_poll_thread_start();
623 ap_poll_thread_stop();
627 static BUS_ATTR(poll_thread, 0644, ap_poll_thread_show, ap_poll_thread_store);
629 static struct bus_attribute *const ap_bus_attrs[] = {
631 &bus_attr_config_time,
632 &bus_attr_poll_thread,
637 * Pick one of the 16 ap domains.
639 static int ap_select_domain(void)
641 int queue_depth, device_type, count, max_count, best_domain;
645 * We want to use a single domain. Either the one specified with
646 * the "domain=" parameter or the domain with the maximum number
649 if (ap_domain_index >= 0 && ap_domain_index < AP_DOMAINS)
650 /* Domain has already been selected. */
654 for (i = 0; i < AP_DOMAINS; i++) {
656 for (j = 0; j < AP_DEVICES; j++) {
657 ap_qid_t qid = AP_MKQID(j, i);
658 rc = ap_query_queue(qid, &queue_depth, &device_type);
663 if (count > max_count) {
668 if (best_domain >= 0){
669 ap_domain_index = best_domain;
676 * Find the device type if query queue returned a device type of 0.
677 * @ap_dev: pointer to the AP device.
679 static int ap_probe_device_type(struct ap_device *ap_dev)
681 static unsigned char msg[] = {
682 0x00,0x06,0x00,0x00,0x00,0x00,0x00,0x00,
683 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
684 0x00,0x00,0x00,0x58,0x00,0x00,0x00,0x00,
685 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
686 0x01,0x00,0x43,0x43,0x41,0x2d,0x41,0x50,
687 0x50,0x4c,0x20,0x20,0x20,0x01,0x01,0x01,
688 0x00,0x00,0x00,0x00,0x50,0x4b,0x00,0x00,
689 0x00,0x00,0x01,0x1c,0x00,0x00,0x00,0x00,
690 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
691 0x00,0x00,0x05,0xb8,0x00,0x00,0x00,0x00,
692 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
693 0x70,0x00,0x41,0x00,0x00,0x00,0x00,0x00,
694 0x00,0x00,0x54,0x32,0x01,0x00,0xa0,0x00,
695 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
696 0x00,0x00,0x00,0x00,0xb8,0x05,0x00,0x00,
697 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
698 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
699 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
700 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
701 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
702 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
703 0x00,0x00,0x0a,0x00,0x00,0x00,0x00,0x00,
704 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
705 0x00,0x00,0x00,0x00,0x00,0x00,0x08,0x00,
706 0x49,0x43,0x53,0x46,0x20,0x20,0x20,0x20,
707 0x50,0x4b,0x0a,0x00,0x50,0x4b,0x43,0x53,
708 0x2d,0x31,0x2e,0x32,0x37,0x00,0x11,0x22,
709 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
710 0x11,0x22,0x33,0x44,0x55,0x66,0x77,0x88,
711 0x99,0x00,0x11,0x22,0x33,0x44,0x55,0x66,
712 0x77,0x88,0x99,0x00,0x11,0x22,0x33,0x44,
713 0x55,0x66,0x77,0x88,0x99,0x00,0x11,0x22,
714 0x33,0x44,0x55,0x66,0x77,0x88,0x99,0x00,
715 0x11,0x22,0x33,0x5d,0x00,0x5b,0x00,0x77,
716 0x88,0x1e,0x00,0x00,0x57,0x00,0x00,0x00,
717 0x00,0x04,0x00,0x00,0x4f,0x00,0x00,0x00,
718 0x03,0x02,0x00,0x00,0x40,0x01,0x00,0x01,
719 0xce,0x02,0x68,0x2d,0x5f,0xa9,0xde,0x0c,
720 0xf6,0xd2,0x7b,0x58,0x4b,0xf9,0x28,0x68,
721 0x3d,0xb4,0xf4,0xef,0x78,0xd5,0xbe,0x66,
722 0x63,0x42,0xef,0xf8,0xfd,0xa4,0xf8,0xb0,
723 0x8e,0x29,0xc2,0xc9,0x2e,0xd8,0x45,0xb8,
724 0x53,0x8c,0x6f,0x4e,0x72,0x8f,0x6c,0x04,
725 0x9c,0x88,0xfc,0x1e,0xc5,0x83,0x55,0x57,
726 0xf7,0xdd,0xfd,0x4f,0x11,0x36,0x95,0x5d,
728 struct ap_queue_status status;
729 unsigned long long psmid;
733 reply = (void *) get_zeroed_page(GFP_KERNEL);
739 status = __ap_send(ap_dev->qid, 0x0102030405060708ULL,
741 if (status.response_code != AP_RESPONSE_NORMAL) {
746 /* Wait for the test message to complete. */
747 for (i = 0; i < 6; i++) {
749 status = __ap_recv(ap_dev->qid, &psmid, reply, 4096);
750 if (status.response_code == AP_RESPONSE_NORMAL &&
751 psmid == 0x0102030405060708ULL)
756 if (reply[0] == 0x00 && reply[1] == 0x86)
757 ap_dev->device_type = AP_DEVICE_TYPE_PCICC;
759 ap_dev->device_type = AP_DEVICE_TYPE_PCICA;
765 free_page((unsigned long) reply);
771 * Scan the ap bus for new devices.
773 static int __ap_scan_bus(struct device *dev, void *data)
775 return to_ap_dev(dev)->qid == (ap_qid_t)(unsigned long) data;
778 static void ap_device_release(struct device *dev)
780 struct ap_device *ap_dev = to_ap_dev(dev);
785 static void ap_scan_bus(struct work_struct *unused)
787 struct ap_device *ap_dev;
790 int queue_depth, device_type;
793 if (ap_select_domain() != 0)
795 for (i = 0; i < AP_DEVICES; i++) {
796 qid = AP_MKQID(i, ap_domain_index);
797 dev = bus_find_device(&ap_bus_type, NULL,
798 (void *)(unsigned long)qid,
800 rc = ap_query_queue(qid, &queue_depth, &device_type);
803 set_current_state(TASK_UNINTERRUPTIBLE);
804 schedule_timeout(AP_RESET_TIMEOUT);
805 rc = ap_query_queue(qid, &queue_depth,
808 ap_dev = to_ap_dev(dev);
809 spin_lock_bh(&ap_dev->lock);
810 if (rc || ap_dev->unregistered) {
811 spin_unlock_bh(&ap_dev->lock);
812 device_unregister(dev);
816 spin_unlock_bh(&ap_dev->lock);
822 rc = ap_init_queue(qid);
825 ap_dev = kzalloc(sizeof(*ap_dev), GFP_KERNEL);
829 ap_dev->queue_depth = queue_depth;
830 ap_dev->unregistered = 1;
831 spin_lock_init(&ap_dev->lock);
832 INIT_LIST_HEAD(&ap_dev->pendingq);
833 INIT_LIST_HEAD(&ap_dev->requestq);
834 INIT_LIST_HEAD(&ap_dev->list);
835 setup_timer(&ap_dev->timeout, ap_request_timeout,
836 (unsigned long) ap_dev);
837 if (device_type == 0)
838 ap_probe_device_type(ap_dev);
840 ap_dev->device_type = device_type;
842 ap_dev->device.bus = &ap_bus_type;
843 ap_dev->device.parent = ap_root_device;
844 snprintf(ap_dev->device.bus_id, BUS_ID_SIZE, "card%02x",
845 AP_QID_DEVICE(ap_dev->qid));
846 ap_dev->device.release = ap_device_release;
847 rc = device_register(&ap_dev->device);
852 /* Add device attributes. */
853 rc = sysfs_create_group(&ap_dev->device.kobj,
856 spin_lock_bh(&ap_dev->lock);
857 ap_dev->unregistered = 0;
858 spin_unlock_bh(&ap_dev->lock);
861 device_unregister(&ap_dev->device);
866 ap_config_timeout(unsigned long ptr)
868 queue_work(ap_work_queue, &ap_config_work);
869 ap_config_timer.expires = jiffies + ap_config_time * HZ;
870 add_timer(&ap_config_timer);
874 * Set up the timer to run the poll tasklet
876 static inline void ap_schedule_poll_timer(void)
878 if (timer_pending(&ap_poll_timer))
880 mod_timer(&ap_poll_timer, jiffies + AP_POLL_TIME);
884 * Receive pending reply messages from an AP device.
885 * @ap_dev: pointer to the AP device
886 * @flags: pointer to control flags, bit 2^0 is set if another poll is
887 * required, bit 2^1 is set if the poll timer needs to get armed
888 * Returns 0 if the device is still present, -ENODEV if not.
890 static int ap_poll_read(struct ap_device *ap_dev, unsigned long *flags)
892 struct ap_queue_status status;
893 struct ap_message *ap_msg;
895 if (ap_dev->queue_count <= 0)
897 status = __ap_recv(ap_dev->qid, &ap_dev->reply->psmid,
898 ap_dev->reply->message, ap_dev->reply->length);
899 switch (status.response_code) {
900 case AP_RESPONSE_NORMAL:
901 atomic_dec(&ap_poll_requests);
902 ap_decrease_queue_count(ap_dev);
903 list_for_each_entry(ap_msg, &ap_dev->pendingq, list) {
904 if (ap_msg->psmid != ap_dev->reply->psmid)
906 list_del_init(&ap_msg->list);
907 ap_dev->pendingq_count--;
908 ap_dev->drv->receive(ap_dev, ap_msg, ap_dev->reply);
911 if (ap_dev->queue_count > 0)
914 case AP_RESPONSE_NO_PENDING_REPLY:
915 if (status.queue_empty) {
916 /* The card shouldn't forget requests but who knows. */
917 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
918 ap_dev->queue_count = 0;
919 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
920 ap_dev->requestq_count += ap_dev->pendingq_count;
921 ap_dev->pendingq_count = 0;
932 * Send messages from the request queue to an AP device.
933 * @ap_dev: pointer to the AP device
934 * @flags: pointer to control flags, bit 2^0 is set if another poll is
935 * required, bit 2^1 is set if the poll timer needs to get armed
936 * Returns 0 if the device is still present, -ENODEV if not.
938 static int ap_poll_write(struct ap_device *ap_dev, unsigned long *flags)
940 struct ap_queue_status status;
941 struct ap_message *ap_msg;
943 if (ap_dev->requestq_count <= 0 ||
944 ap_dev->queue_count >= ap_dev->queue_depth)
946 /* Start the next request on the queue. */
947 ap_msg = list_entry(ap_dev->requestq.next, struct ap_message, list);
948 status = __ap_send(ap_dev->qid, ap_msg->psmid,
949 ap_msg->message, ap_msg->length);
950 switch (status.response_code) {
951 case AP_RESPONSE_NORMAL:
952 atomic_inc(&ap_poll_requests);
953 ap_increase_queue_count(ap_dev);
954 list_move_tail(&ap_msg->list, &ap_dev->pendingq);
955 ap_dev->requestq_count--;
956 ap_dev->pendingq_count++;
957 if (ap_dev->queue_count < ap_dev->queue_depth &&
958 ap_dev->requestq_count > 0)
962 case AP_RESPONSE_Q_FULL:
963 case AP_RESPONSE_RESET_IN_PROGRESS:
966 case AP_RESPONSE_MESSAGE_TOO_BIG:
975 * Poll AP device for pending replies and send new messages. If either
976 * ap_poll_read or ap_poll_write returns -ENODEV unregister the device.
977 * @ap_dev: pointer to the bus device
978 * @flags: pointer to control flags, bit 2^0 is set if another poll is
979 * required, bit 2^1 is set if the poll timer needs to get armed
982 static inline int ap_poll_queue(struct ap_device *ap_dev, unsigned long *flags)
986 rc = ap_poll_read(ap_dev, flags);
989 return ap_poll_write(ap_dev, flags);
993 * Queue a message to a device.
994 * @ap_dev: pointer to the AP device
995 * @ap_msg: the message to be queued
997 static int __ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
999 struct ap_queue_status status;
1001 if (list_empty(&ap_dev->requestq) &&
1002 ap_dev->queue_count < ap_dev->queue_depth) {
1003 status = __ap_send(ap_dev->qid, ap_msg->psmid,
1004 ap_msg->message, ap_msg->length);
1005 switch (status.response_code) {
1006 case AP_RESPONSE_NORMAL:
1007 list_add_tail(&ap_msg->list, &ap_dev->pendingq);
1008 atomic_inc(&ap_poll_requests);
1009 ap_dev->pendingq_count++;
1010 ap_increase_queue_count(ap_dev);
1011 ap_dev->total_request_count++;
1013 case AP_RESPONSE_Q_FULL:
1014 case AP_RESPONSE_RESET_IN_PROGRESS:
1015 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1016 ap_dev->requestq_count++;
1017 ap_dev->total_request_count++;
1019 case AP_RESPONSE_MESSAGE_TOO_BIG:
1020 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-EINVAL));
1022 default: /* Device is gone. */
1023 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1027 list_add_tail(&ap_msg->list, &ap_dev->requestq);
1028 ap_dev->requestq_count++;
1029 ap_dev->total_request_count++;
1032 ap_schedule_poll_timer();
1036 void ap_queue_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1038 unsigned long flags;
1041 spin_lock_bh(&ap_dev->lock);
1042 if (!ap_dev->unregistered) {
1043 /* Make room on the queue by polling for finished requests. */
1044 rc = ap_poll_queue(ap_dev, &flags);
1046 rc = __ap_queue_message(ap_dev, ap_msg);
1048 wake_up(&ap_poll_wait);
1050 ap_dev->unregistered = 1;
1052 ap_dev->drv->receive(ap_dev, ap_msg, ERR_PTR(-ENODEV));
1055 spin_unlock_bh(&ap_dev->lock);
1057 device_unregister(&ap_dev->device);
1059 EXPORT_SYMBOL(ap_queue_message);
1062 * Cancel a crypto request. This is done by removing the request
1063 * from the devive pendingq or requestq queue. Note that the
1064 * request stays on the AP queue. When it finishes the message
1065 * reply will be discarded because the psmid can't be found.
1066 * @ap_dev: AP device that has the message queued
1067 * @ap_msg: the message that is to be removed
1069 void ap_cancel_message(struct ap_device *ap_dev, struct ap_message *ap_msg)
1071 struct ap_message *tmp;
1073 spin_lock_bh(&ap_dev->lock);
1074 if (!list_empty(&ap_msg->list)) {
1075 list_for_each_entry(tmp, &ap_dev->pendingq, list)
1076 if (tmp->psmid == ap_msg->psmid) {
1077 ap_dev->pendingq_count--;
1080 ap_dev->requestq_count--;
1082 list_del_init(&ap_msg->list);
1084 spin_unlock_bh(&ap_dev->lock);
1086 EXPORT_SYMBOL(ap_cancel_message);
1089 * AP receive polling for finished AP requests
1091 static void ap_poll_timeout(unsigned long unused)
1093 tasklet_schedule(&ap_tasklet);
1097 * Reset a not responding AP device and move all requests from the
1098 * pending queue to the request queue.
1100 static void ap_reset(struct ap_device *ap_dev)
1104 ap_dev->reset = AP_RESET_IGNORE;
1105 atomic_sub(ap_dev->queue_count, &ap_poll_requests);
1106 ap_dev->queue_count = 0;
1107 list_splice_init(&ap_dev->pendingq, &ap_dev->requestq);
1108 ap_dev->requestq_count += ap_dev->pendingq_count;
1109 ap_dev->pendingq_count = 0;
1110 rc = ap_init_queue(ap_dev->qid);
1112 ap_dev->unregistered = 1;
1116 * Poll all AP devices on the bus in a round robin fashion. Continue
1117 * polling until bit 2^0 of the control flags is not set. If bit 2^1
1118 * of the control flags has been set arm the poll timer.
1120 static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags)
1122 spin_lock(&ap_dev->lock);
1123 if (!ap_dev->unregistered) {
1124 if (ap_poll_queue(ap_dev, flags))
1125 ap_dev->unregistered = 1;
1126 if (ap_dev->reset == AP_RESET_DO)
1129 spin_unlock(&ap_dev->lock);
1133 static void ap_poll_all(unsigned long dummy)
1135 unsigned long flags;
1136 struct ap_device *ap_dev;
1140 spin_lock(&ap_device_lock);
1141 list_for_each_entry(ap_dev, &ap_device_list, list) {
1142 __ap_poll_all(ap_dev, &flags);
1144 spin_unlock(&ap_device_lock);
1145 } while (flags & 1);
1147 ap_schedule_poll_timer();
1151 * AP bus poll thread. The purpose of this thread is to poll for
1152 * finished requests in a loop if there is a "free" cpu - that is
1153 * a cpu that doesn't have anything better to do. The polling stops
1154 * as soon as there is another task or if all messages have been
1157 static int ap_poll_thread(void *data)
1159 DECLARE_WAITQUEUE(wait, current);
1160 unsigned long flags;
1162 struct ap_device *ap_dev;
1164 set_user_nice(current, 19);
1166 if (need_resched()) {
1170 add_wait_queue(&ap_poll_wait, &wait);
1171 set_current_state(TASK_INTERRUPTIBLE);
1172 if (kthread_should_stop())
1174 requests = atomic_read(&ap_poll_requests);
1177 set_current_state(TASK_RUNNING);
1178 remove_wait_queue(&ap_poll_wait, &wait);
1181 spin_lock_bh(&ap_device_lock);
1182 list_for_each_entry(ap_dev, &ap_device_list, list) {
1183 __ap_poll_all(ap_dev, &flags);
1185 spin_unlock_bh(&ap_device_lock);
1187 set_current_state(TASK_RUNNING);
1188 remove_wait_queue(&ap_poll_wait, &wait);
1192 static int ap_poll_thread_start(void)
1196 mutex_lock(&ap_poll_thread_mutex);
1197 if (!ap_poll_kthread) {
1198 ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll");
1199 rc = IS_ERR(ap_poll_kthread) ? PTR_ERR(ap_poll_kthread) : 0;
1201 ap_poll_kthread = NULL;
1205 mutex_unlock(&ap_poll_thread_mutex);
1209 static void ap_poll_thread_stop(void)
1211 mutex_lock(&ap_poll_thread_mutex);
1212 if (ap_poll_kthread) {
1213 kthread_stop(ap_poll_kthread);
1214 ap_poll_kthread = NULL;
1216 mutex_unlock(&ap_poll_thread_mutex);
1220 * Handling of request timeouts
1222 static void ap_request_timeout(unsigned long data)
1224 struct ap_device *ap_dev = (struct ap_device *) data;
1226 if (ap_dev->reset == AP_RESET_ARMED)
1227 ap_dev->reset = AP_RESET_DO;
1230 static void ap_reset_domain(void)
1234 for (i = 0; i < AP_DEVICES; i++)
1235 ap_reset_queue(AP_MKQID(i, ap_domain_index));
1238 static void ap_reset_all(void)
1242 for (i = 0; i < AP_DOMAINS; i++)
1243 for (j = 0; j < AP_DEVICES; j++)
1244 ap_reset_queue(AP_MKQID(j, i));
1247 static struct reset_call ap_reset_call = {
1252 * The module initialization code.
1254 int __init ap_module_init(void)
1258 if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) {
1259 printk(KERN_WARNING "Invalid param: domain = %d. "
1260 " Not loading.\n", ap_domain_index);
1263 if (ap_instructions_available() != 0) {
1264 printk(KERN_WARNING "AP instructions not installed.\n");
1267 register_reset_call(&ap_reset_call);
1269 /* Create /sys/bus/ap. */
1270 rc = bus_register(&ap_bus_type);
1273 for (i = 0; ap_bus_attrs[i]; i++) {
1274 rc = bus_create_file(&ap_bus_type, ap_bus_attrs[i]);
1279 /* Create /sys/devices/ap. */
1280 ap_root_device = s390_root_dev_register("ap");
1281 rc = IS_ERR(ap_root_device) ? PTR_ERR(ap_root_device) : 0;
1285 ap_work_queue = create_singlethread_workqueue("kapwork");
1286 if (!ap_work_queue) {
1291 if (ap_select_domain() == 0)
1294 /* Setup the ap bus rescan timer. */
1295 init_timer(&ap_config_timer);
1296 ap_config_timer.function = ap_config_timeout;
1297 ap_config_timer.data = 0;
1298 ap_config_timer.expires = jiffies + ap_config_time * HZ;
1299 add_timer(&ap_config_timer);
1301 /* Start the low priority AP bus poll thread. */
1302 if (ap_thread_flag) {
1303 rc = ap_poll_thread_start();
1311 del_timer_sync(&ap_config_timer);
1312 del_timer_sync(&ap_poll_timer);
1313 destroy_workqueue(ap_work_queue);
1315 s390_root_dev_unregister(ap_root_device);
1318 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1319 bus_unregister(&ap_bus_type);
1321 unregister_reset_call(&ap_reset_call);
1325 static int __ap_match_all(struct device *dev, void *data)
1331 * The module termination code
1333 void ap_module_exit(void)
1339 ap_poll_thread_stop();
1340 del_timer_sync(&ap_config_timer);
1341 del_timer_sync(&ap_poll_timer);
1342 destroy_workqueue(ap_work_queue);
1343 tasklet_kill(&ap_tasklet);
1344 s390_root_dev_unregister(ap_root_device);
1345 while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
1348 device_unregister(dev);
1351 for (i = 0; ap_bus_attrs[i]; i++)
1352 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1353 bus_unregister(&ap_bus_type);
1354 unregister_reset_call(&ap_reset_call);
1357 #ifndef CONFIG_ZCRYPT_MONOLITHIC
1358 module_init(ap_module_init);
1359 module_exit(ap_module_exit);