[PATCH] atmel: save on array initialization
[linux-2.6] / drivers / s390 / char / sclp.c
1 /*
2  *  drivers/s390/char/sclp.c
3  *     core function to access sclp interface
4  *
5  *  S390 version
6  *    Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
7  *    Author(s): Martin Peschke <mpeschke@de.ibm.com>
8  *               Martin Schwidefsky <schwidefsky@de.ibm.com>
9  */
10
11 #include <linux/module.h>
12 #include <linux/err.h>
13 #include <linux/spinlock.h>
14 #include <linux/interrupt.h>
15 #include <linux/timer.h>
16 #include <linux/reboot.h>
17 #include <linux/jiffies.h>
18 #include <asm/types.h>
19 #include <asm/s390_ext.h>
20
21 #include "sclp.h"
22
23 #define SCLP_HEADER             "sclp: "
24
25 /* Structure for register_early_external_interrupt. */
26 static ext_int_info_t ext_int_info_hwc;
27
28 /* Lock to protect internal data consistency. */
29 static DEFINE_SPINLOCK(sclp_lock);
30
31 /* Mask of events that we can receive from the sclp interface. */
32 static sccb_mask_t sclp_receive_mask;
33
34 /* Mask of events that we can send to the sclp interface. */
35 static sccb_mask_t sclp_send_mask;
36
37 /* List of registered event listeners and senders. */
38 static struct list_head sclp_reg_list;
39
40 /* List of queued requests. */
41 static struct list_head sclp_req_queue;
42
43 /* Data for read and and init requests. */
44 static struct sclp_req sclp_read_req;
45 static struct sclp_req sclp_init_req;
46 static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
47 static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE)));
48
49 /* Timer for request retries. */
50 static struct timer_list sclp_request_timer;
51
52 /* Internal state: is the driver initialized? */
53 static volatile enum sclp_init_state_t {
54         sclp_init_state_uninitialized,
55         sclp_init_state_initializing,
56         sclp_init_state_initialized
57 } sclp_init_state = sclp_init_state_uninitialized;
58
59 /* Internal state: is a request active at the sclp? */
60 static volatile enum sclp_running_state_t {
61         sclp_running_state_idle,
62         sclp_running_state_running
63 } sclp_running_state = sclp_running_state_idle;
64
65 /* Internal state: is a read request pending? */
66 static volatile enum sclp_reading_state_t {
67         sclp_reading_state_idle,
68         sclp_reading_state_reading
69 } sclp_reading_state = sclp_reading_state_idle;
70
71 /* Internal state: is the driver currently serving requests? */
72 static volatile enum sclp_activation_state_t {
73         sclp_activation_state_active,
74         sclp_activation_state_deactivating,
75         sclp_activation_state_inactive,
76         sclp_activation_state_activating
77 } sclp_activation_state = sclp_activation_state_active;
78
79 /* Internal state: is an init mask request pending? */
80 static volatile enum sclp_mask_state_t {
81         sclp_mask_state_idle,
82         sclp_mask_state_initializing
83 } sclp_mask_state = sclp_mask_state_idle;
84
85 /* Maximum retry counts */
86 #define SCLP_INIT_RETRY         3
87 #define SCLP_MASK_RETRY         3
88
89 /* Timeout intervals in seconds.*/
90 #define SCLP_BUSY_INTERVAL      10
91 #define SCLP_RETRY_INTERVAL     15
92
93 static void sclp_process_queue(void);
94 static int sclp_init_mask(int calculate);
95 static int sclp_init(void);
96
97 /* Perform service call. Return 0 on success, non-zero otherwise. */
98 static int
99 service_call(sclp_cmdw_t command, void *sccb)
100 {
101         int cc;
102
103         asm volatile(
104                 "       .insn   rre,0xb2200000,%1,%2\n"  /* servc %1,%2 */
105                 "       ipm     %0\n"
106                 "       srl     %0,28"
107                 : "=&d" (cc) : "d" (command), "a" (__pa(sccb))
108                 : "cc", "memory");
109         if (cc == 3)
110                 return -EIO;
111         if (cc == 2)
112                 return -EBUSY;
113         return 0;
114 }
115
116 /* Request timeout handler. Restart the request queue. If DATA is non-zero,
117  * force restart of running request. */
118 static void
119 sclp_request_timeout(unsigned long data)
120 {
121         unsigned long flags;
122
123         if (data) {
124                 spin_lock_irqsave(&sclp_lock, flags);
125                 sclp_running_state = sclp_running_state_idle;
126                 spin_unlock_irqrestore(&sclp_lock, flags);
127         }
128         sclp_process_queue();
129 }
130
131 /* Set up request retry timer. Called while sclp_lock is locked. */
132 static inline void
133 __sclp_set_request_timer(unsigned long time, void (*function)(unsigned long),
134                          unsigned long data)
135 {
136         del_timer(&sclp_request_timer);
137         sclp_request_timer.function = function;
138         sclp_request_timer.data = data;
139         sclp_request_timer.expires = jiffies + time;
140         add_timer(&sclp_request_timer);
141 }
142
143 /* Try to start a request. Return zero if the request was successfully
144  * started or if it will be started at a later time. Return non-zero otherwise.
145  * Called while sclp_lock is locked. */
146 static int
147 __sclp_start_request(struct sclp_req *req)
148 {
149         int rc;
150
151         if (sclp_running_state != sclp_running_state_idle)
152                 return 0;
153         del_timer(&sclp_request_timer);
154         rc = service_call(req->command, req->sccb);
155         req->start_count++;
156
157         if (rc == 0) {
158                 /* Sucessfully started request */
159                 req->status = SCLP_REQ_RUNNING;
160                 sclp_running_state = sclp_running_state_running;
161                 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
162                                          sclp_request_timeout, 1);
163                 return 0;
164         } else if (rc == -EBUSY) {
165                 /* Try again later */
166                 __sclp_set_request_timer(SCLP_BUSY_INTERVAL * HZ,
167                                          sclp_request_timeout, 0);
168                 return 0;
169         }
170         /* Request failed */
171         req->status = SCLP_REQ_FAILED;
172         return rc;
173 }
174
175 /* Try to start queued requests. */
176 static void
177 sclp_process_queue(void)
178 {
179         struct sclp_req *req;
180         int rc;
181         unsigned long flags;
182
183         spin_lock_irqsave(&sclp_lock, flags);
184         if (sclp_running_state != sclp_running_state_idle) {
185                 spin_unlock_irqrestore(&sclp_lock, flags);
186                 return;
187         }
188         del_timer(&sclp_request_timer);
189         while (!list_empty(&sclp_req_queue)) {
190                 req = list_entry(sclp_req_queue.next, struct sclp_req, list);
191                 rc = __sclp_start_request(req);
192                 if (rc == 0)
193                         break;
194                 /* Request failed. */
195                 list_del(&req->list);
196                 if (req->callback) {
197                         spin_unlock_irqrestore(&sclp_lock, flags);
198                         req->callback(req, req->callback_data);
199                         spin_lock_irqsave(&sclp_lock, flags);
200                 }
201         }
202         spin_unlock_irqrestore(&sclp_lock, flags);
203 }
204
205 /* Queue a new request. Return zero on success, non-zero otherwise. */
206 int
207 sclp_add_request(struct sclp_req *req)
208 {
209         unsigned long flags;
210         int rc;
211
212         spin_lock_irqsave(&sclp_lock, flags);
213         if ((sclp_init_state != sclp_init_state_initialized ||
214              sclp_activation_state != sclp_activation_state_active) &&
215             req != &sclp_init_req) {
216                 spin_unlock_irqrestore(&sclp_lock, flags);
217                 return -EIO;
218         }
219         req->status = SCLP_REQ_QUEUED;
220         req->start_count = 0;
221         list_add_tail(&req->list, &sclp_req_queue);
222         rc = 0;
223         /* Start if request is first in list */
224         if (req->list.prev == &sclp_req_queue) {
225                 rc = __sclp_start_request(req);
226                 if (rc)
227                         list_del(&req->list);
228         }
229         spin_unlock_irqrestore(&sclp_lock, flags);
230         return rc;
231 }
232
233 EXPORT_SYMBOL(sclp_add_request);
234
235 /* Dispatch events found in request buffer to registered listeners. Return 0
236  * if all events were dispatched, non-zero otherwise. */
237 static int
238 sclp_dispatch_evbufs(struct sccb_header *sccb)
239 {
240         unsigned long flags;
241         struct evbuf_header *evbuf;
242         struct list_head *l;
243         struct sclp_register *reg;
244         int offset;
245         int rc;
246
247         spin_lock_irqsave(&sclp_lock, flags);
248         rc = 0;
249         for (offset = sizeof(struct sccb_header); offset < sccb->length;
250              offset += evbuf->length) {
251                 /* Search for event handler */
252                 evbuf = (struct evbuf_header *) ((addr_t) sccb + offset);
253                 reg = NULL;
254                 list_for_each(l, &sclp_reg_list) {
255                         reg = list_entry(l, struct sclp_register, list);
256                         if (reg->receive_mask & (1 << (32 - evbuf->type)))
257                                 break;
258                         else
259                                 reg = NULL;
260                 }
261                 if (reg && reg->receiver_fn) {
262                         spin_unlock_irqrestore(&sclp_lock, flags);
263                         reg->receiver_fn(evbuf);
264                         spin_lock_irqsave(&sclp_lock, flags);
265                 } else if (reg == NULL)
266                         rc = -ENOSYS;
267         }
268         spin_unlock_irqrestore(&sclp_lock, flags);
269         return rc;
270 }
271
272 /* Read event data request callback. */
273 static void
274 sclp_read_cb(struct sclp_req *req, void *data)
275 {
276         unsigned long flags;
277         struct sccb_header *sccb;
278
279         sccb = (struct sccb_header *) req->sccb;
280         if (req->status == SCLP_REQ_DONE && (sccb->response_code == 0x20 ||
281             sccb->response_code == 0x220))
282                 sclp_dispatch_evbufs(sccb);
283         spin_lock_irqsave(&sclp_lock, flags);
284         sclp_reading_state = sclp_reading_state_idle;
285         spin_unlock_irqrestore(&sclp_lock, flags);
286 }
287
288 /* Prepare read event data request. Called while sclp_lock is locked. */
289 static inline void
290 __sclp_make_read_req(void)
291 {
292         struct sccb_header *sccb;
293
294         sccb = (struct sccb_header *) sclp_read_sccb;
295         clear_page(sccb);
296         memset(&sclp_read_req, 0, sizeof(struct sclp_req));
297         sclp_read_req.command = SCLP_CMDW_READDATA;
298         sclp_read_req.status = SCLP_REQ_QUEUED;
299         sclp_read_req.start_count = 0;
300         sclp_read_req.callback = sclp_read_cb;
301         sclp_read_req.sccb = sccb;
302         sccb->length = PAGE_SIZE;
303         sccb->function_code = 0;
304         sccb->control_mask[2] = 0x80;
305 }
306
307 /* Search request list for request with matching sccb. Return request if found,
308  * NULL otherwise. Called while sclp_lock is locked. */
309 static inline struct sclp_req *
310 __sclp_find_req(u32 sccb)
311 {
312         struct list_head *l;
313         struct sclp_req *req;
314
315         list_for_each(l, &sclp_req_queue) {
316                 req = list_entry(l, struct sclp_req, list);
317                 if (sccb == (u32) (addr_t) req->sccb)
318                                 return req;
319         }
320         return NULL;
321 }
322
323 /* Handler for external interruption. Perform request post-processing.
324  * Prepare read event data request if necessary. Start processing of next
325  * request on queue. */
326 static void
327 sclp_interrupt_handler(__u16 code)
328 {
329         struct sclp_req *req;
330         u32 finished_sccb;
331         u32 evbuf_pending;
332
333         spin_lock(&sclp_lock);
334         finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
335         evbuf_pending = S390_lowcore.ext_params & 0x3;
336         if (finished_sccb) {
337                 req = __sclp_find_req(finished_sccb);
338                 if (req) {
339                         /* Request post-processing */
340                         list_del(&req->list);
341                         req->status = SCLP_REQ_DONE;
342                         if (req->callback) {
343                                 spin_unlock(&sclp_lock);
344                                 req->callback(req, req->callback_data);
345                                 spin_lock(&sclp_lock);
346                         }
347                 }
348                 sclp_running_state = sclp_running_state_idle;
349         }
350         if (evbuf_pending && sclp_receive_mask != 0 &&
351             sclp_reading_state == sclp_reading_state_idle &&
352             sclp_activation_state == sclp_activation_state_active ) {
353                 sclp_reading_state = sclp_reading_state_reading;
354                 __sclp_make_read_req();
355                 /* Add request to head of queue */
356                 list_add(&sclp_read_req.list, &sclp_req_queue);
357         }
358         spin_unlock(&sclp_lock);
359         sclp_process_queue();
360 }
361
362 /* Convert interval in jiffies to TOD ticks. */
363 static inline u64
364 sclp_tod_from_jiffies(unsigned long jiffies)
365 {
366         return (u64) (jiffies / HZ) << 32;
367 }
368
369 /* Wait until a currently running request finished. Note: while this function
370  * is running, no timers are served on the calling CPU. */
371 void
372 sclp_sync_wait(void)
373 {
374         unsigned long flags;
375         unsigned long cr0, cr0_sync;
376         u64 timeout;
377
378         /* We'll be disabling timer interrupts, so we need a custom timeout
379          * mechanism */
380         timeout = 0;
381         if (timer_pending(&sclp_request_timer)) {
382                 /* Get timeout TOD value */
383                 timeout = get_clock() +
384                           sclp_tod_from_jiffies(sclp_request_timer.expires -
385                                                 jiffies);
386         }
387         local_irq_save(flags);
388         /* Prevent bottom half from executing once we force interrupts open */
389         local_bh_disable();
390         /* Enable service-signal interruption, disable timer interrupts */
391         trace_hardirqs_on();
392         __ctl_store(cr0, 0, 0);
393         cr0_sync = cr0;
394         cr0_sync |= 0x00000200;
395         cr0_sync &= 0xFFFFF3AC;
396         __ctl_load(cr0_sync, 0, 0);
397         __raw_local_irq_stosm(0x01);
398         /* Loop until driver state indicates finished request */
399         while (sclp_running_state != sclp_running_state_idle) {
400                 /* Check for expired request timer */
401                 if (timer_pending(&sclp_request_timer) &&
402                     get_clock() > timeout &&
403                     del_timer(&sclp_request_timer))
404                         sclp_request_timer.function(sclp_request_timer.data);
405                 barrier();
406                 cpu_relax();
407         }
408         local_irq_disable();
409         __ctl_load(cr0, 0, 0);
410         _local_bh_enable();
411         local_irq_restore(flags);
412 }
413
414 EXPORT_SYMBOL(sclp_sync_wait);
415
416 /* Dispatch changes in send and receive mask to registered listeners. */
417 static inline void
418 sclp_dispatch_state_change(void)
419 {
420         struct list_head *l;
421         struct sclp_register *reg;
422         unsigned long flags;
423         sccb_mask_t receive_mask;
424         sccb_mask_t send_mask;
425
426         do {
427                 spin_lock_irqsave(&sclp_lock, flags);
428                 reg = NULL;
429                 list_for_each(l, &sclp_reg_list) {
430                         reg = list_entry(l, struct sclp_register, list);
431                         receive_mask = reg->receive_mask & sclp_receive_mask;
432                         send_mask = reg->send_mask & sclp_send_mask;
433                         if (reg->sclp_receive_mask != receive_mask ||
434                             reg->sclp_send_mask != send_mask) {
435                                 reg->sclp_receive_mask = receive_mask;
436                                 reg->sclp_send_mask = send_mask;
437                                 break;
438                         } else
439                                 reg = NULL;
440                 }
441                 spin_unlock_irqrestore(&sclp_lock, flags);
442                 if (reg && reg->state_change_fn)
443                         reg->state_change_fn(reg);
444         } while (reg);
445 }
446
447 struct sclp_statechangebuf {
448         struct evbuf_header     header;
449         u8              validity_sclp_active_facility_mask : 1;
450         u8              validity_sclp_receive_mask : 1;
451         u8              validity_sclp_send_mask : 1;
452         u8              validity_read_data_function_mask : 1;
453         u16             _zeros : 12;
454         u16             mask_length;
455         u64             sclp_active_facility_mask;
456         sccb_mask_t     sclp_receive_mask;
457         sccb_mask_t     sclp_send_mask;
458         u32             read_data_function_mask;
459 } __attribute__((packed));
460
461
462 /* State change event callback. Inform listeners of changes. */
463 static void
464 sclp_state_change_cb(struct evbuf_header *evbuf)
465 {
466         unsigned long flags;
467         struct sclp_statechangebuf *scbuf;
468
469         scbuf = (struct sclp_statechangebuf *) evbuf;
470         if (scbuf->mask_length != sizeof(sccb_mask_t))
471                 return;
472         spin_lock_irqsave(&sclp_lock, flags);
473         if (scbuf->validity_sclp_receive_mask)
474                 sclp_receive_mask = scbuf->sclp_receive_mask;
475         if (scbuf->validity_sclp_send_mask)
476                 sclp_send_mask = scbuf->sclp_send_mask;
477         spin_unlock_irqrestore(&sclp_lock, flags);
478         sclp_dispatch_state_change();
479 }
480
481 static struct sclp_register sclp_state_change_event = {
482         .receive_mask = EvTyp_StateChange_Mask,
483         .receiver_fn = sclp_state_change_cb
484 };
485
486 /* Calculate receive and send mask of currently registered listeners.
487  * Called while sclp_lock is locked. */
488 static inline void
489 __sclp_get_mask(sccb_mask_t *receive_mask, sccb_mask_t *send_mask)
490 {
491         struct list_head *l;
492         struct sclp_register *t;
493
494         *receive_mask = 0;
495         *send_mask = 0;
496         list_for_each(l, &sclp_reg_list) {
497                 t = list_entry(l, struct sclp_register, list);
498                 *receive_mask |= t->receive_mask;
499                 *send_mask |= t->send_mask;
500         }
501 }
502
503 /* Register event listener. Return 0 on success, non-zero otherwise. */
504 int
505 sclp_register(struct sclp_register *reg)
506 {
507         unsigned long flags;
508         sccb_mask_t receive_mask;
509         sccb_mask_t send_mask;
510         int rc;
511
512         rc = sclp_init();
513         if (rc)
514                 return rc;
515         spin_lock_irqsave(&sclp_lock, flags);
516         /* Check event mask for collisions */
517         __sclp_get_mask(&receive_mask, &send_mask);
518         if (reg->receive_mask & receive_mask || reg->send_mask & send_mask) {
519                 spin_unlock_irqrestore(&sclp_lock, flags);
520                 return -EBUSY;
521         }
522         /* Trigger initial state change callback */
523         reg->sclp_receive_mask = 0;
524         reg->sclp_send_mask = 0;
525         list_add(&reg->list, &sclp_reg_list);
526         spin_unlock_irqrestore(&sclp_lock, flags);
527         rc = sclp_init_mask(1);
528         if (rc) {
529                 spin_lock_irqsave(&sclp_lock, flags);
530                 list_del(&reg->list);
531                 spin_unlock_irqrestore(&sclp_lock, flags);
532         }
533         return rc;
534 }
535
536 EXPORT_SYMBOL(sclp_register);
537
538 /* Unregister event listener. */
539 void
540 sclp_unregister(struct sclp_register *reg)
541 {
542         unsigned long flags;
543
544         spin_lock_irqsave(&sclp_lock, flags);
545         list_del(&reg->list);
546         spin_unlock_irqrestore(&sclp_lock, flags);
547         sclp_init_mask(1);
548 }
549
550 EXPORT_SYMBOL(sclp_unregister);
551
552 /* Remove event buffers which are marked processed. Return the number of
553  * remaining event buffers. */
554 int
555 sclp_remove_processed(struct sccb_header *sccb)
556 {
557         struct evbuf_header *evbuf;
558         int unprocessed;
559         u16 remaining;
560
561         evbuf = (struct evbuf_header *) (sccb + 1);
562         unprocessed = 0;
563         remaining = sccb->length - sizeof(struct sccb_header);
564         while (remaining > 0) {
565                 remaining -= evbuf->length;
566                 if (evbuf->flags & 0x80) {
567                         sccb->length -= evbuf->length;
568                         memcpy(evbuf, (void *) ((addr_t) evbuf + evbuf->length),
569                                remaining);
570                 } else {
571                         unprocessed++;
572                         evbuf = (struct evbuf_header *)
573                                         ((addr_t) evbuf + evbuf->length);
574                 }
575         }
576         return unprocessed;
577 }
578
579 EXPORT_SYMBOL(sclp_remove_processed);
580
581 struct init_sccb {
582         struct sccb_header header;
583         u16 _reserved;
584         u16 mask_length;
585         sccb_mask_t receive_mask;
586         sccb_mask_t send_mask;
587         sccb_mask_t sclp_send_mask;
588         sccb_mask_t sclp_receive_mask;
589 } __attribute__((packed));
590
591 /* Prepare init mask request. Called while sclp_lock is locked. */
592 static inline void
593 __sclp_make_init_req(u32 receive_mask, u32 send_mask)
594 {
595         struct init_sccb *sccb;
596
597         sccb = (struct init_sccb *) sclp_init_sccb;
598         clear_page(sccb);
599         memset(&sclp_init_req, 0, sizeof(struct sclp_req));
600         sclp_init_req.command = SCLP_CMDW_WRITEMASK;
601         sclp_init_req.status = SCLP_REQ_FILLED;
602         sclp_init_req.start_count = 0;
603         sclp_init_req.callback = NULL;
604         sclp_init_req.callback_data = NULL;
605         sclp_init_req.sccb = sccb;
606         sccb->header.length = sizeof(struct init_sccb);
607         sccb->mask_length = sizeof(sccb_mask_t);
608         sccb->receive_mask = receive_mask;
609         sccb->send_mask = send_mask;
610         sccb->sclp_receive_mask = 0;
611         sccb->sclp_send_mask = 0;
612 }
613
614 /* Start init mask request. If calculate is non-zero, calculate the mask as
615  * requested by registered listeners. Use zero mask otherwise. Return 0 on
616  * success, non-zero otherwise. */
617 static int
618 sclp_init_mask(int calculate)
619 {
620         unsigned long flags;
621         struct init_sccb *sccb = (struct init_sccb *) sclp_init_sccb;
622         sccb_mask_t receive_mask;
623         sccb_mask_t send_mask;
624         int retry;
625         int rc;
626         unsigned long wait;
627
628         spin_lock_irqsave(&sclp_lock, flags);
629         /* Check if interface is in appropriate state */
630         if (sclp_mask_state != sclp_mask_state_idle) {
631                 spin_unlock_irqrestore(&sclp_lock, flags);
632                 return -EBUSY;
633         }
634         if (sclp_activation_state == sclp_activation_state_inactive) {
635                 spin_unlock_irqrestore(&sclp_lock, flags);
636                 return -EINVAL;
637         }
638         sclp_mask_state = sclp_mask_state_initializing;
639         /* Determine mask */
640         if (calculate)
641                 __sclp_get_mask(&receive_mask, &send_mask);
642         else {
643                 receive_mask = 0;
644                 send_mask = 0;
645         }
646         rc = -EIO;
647         for (retry = 0; retry <= SCLP_MASK_RETRY; retry++) {
648                 /* Prepare request */
649                 __sclp_make_init_req(receive_mask, send_mask);
650                 spin_unlock_irqrestore(&sclp_lock, flags);
651                 if (sclp_add_request(&sclp_init_req)) {
652                         /* Try again later */
653                         wait = jiffies + SCLP_BUSY_INTERVAL * HZ;
654                         while (time_before(jiffies, wait))
655                                 sclp_sync_wait();
656                         spin_lock_irqsave(&sclp_lock, flags);
657                         continue;
658                 }
659                 while (sclp_init_req.status != SCLP_REQ_DONE &&
660                        sclp_init_req.status != SCLP_REQ_FAILED)
661                         sclp_sync_wait();
662                 spin_lock_irqsave(&sclp_lock, flags);
663                 if (sclp_init_req.status == SCLP_REQ_DONE &&
664                     sccb->header.response_code == 0x20) {
665                         /* Successful request */
666                         if (calculate) {
667                                 sclp_receive_mask = sccb->sclp_receive_mask;
668                                 sclp_send_mask = sccb->sclp_send_mask;
669                         } else {
670                                 sclp_receive_mask = 0;
671                                 sclp_send_mask = 0;
672                         }
673                         spin_unlock_irqrestore(&sclp_lock, flags);
674                         sclp_dispatch_state_change();
675                         spin_lock_irqsave(&sclp_lock, flags);
676                         rc = 0;
677                         break;
678                 }
679         }
680         sclp_mask_state = sclp_mask_state_idle;
681         spin_unlock_irqrestore(&sclp_lock, flags);
682         return rc;
683 }
684
685 /* Deactivate SCLP interface. On success, new requests will be rejected,
686  * events will no longer be dispatched. Return 0 on success, non-zero
687  * otherwise. */
688 int
689 sclp_deactivate(void)
690 {
691         unsigned long flags;
692         int rc;
693
694         spin_lock_irqsave(&sclp_lock, flags);
695         /* Deactivate can only be called when active */
696         if (sclp_activation_state != sclp_activation_state_active) {
697                 spin_unlock_irqrestore(&sclp_lock, flags);
698                 return -EINVAL;
699         }
700         sclp_activation_state = sclp_activation_state_deactivating;
701         spin_unlock_irqrestore(&sclp_lock, flags);
702         rc = sclp_init_mask(0);
703         spin_lock_irqsave(&sclp_lock, flags);
704         if (rc == 0)
705                 sclp_activation_state = sclp_activation_state_inactive;
706         else
707                 sclp_activation_state = sclp_activation_state_active;
708         spin_unlock_irqrestore(&sclp_lock, flags);
709         return rc;
710 }
711
712 EXPORT_SYMBOL(sclp_deactivate);
713
714 /* Reactivate SCLP interface after sclp_deactivate. On success, new
715  * requests will be accepted, events will be dispatched again. Return 0 on
716  * success, non-zero otherwise. */
717 int
718 sclp_reactivate(void)
719 {
720         unsigned long flags;
721         int rc;
722
723         spin_lock_irqsave(&sclp_lock, flags);
724         /* Reactivate can only be called when inactive */
725         if (sclp_activation_state != sclp_activation_state_inactive) {
726                 spin_unlock_irqrestore(&sclp_lock, flags);
727                 return -EINVAL;
728         }
729         sclp_activation_state = sclp_activation_state_activating;
730         spin_unlock_irqrestore(&sclp_lock, flags);
731         rc = sclp_init_mask(1);
732         spin_lock_irqsave(&sclp_lock, flags);
733         if (rc == 0)
734                 sclp_activation_state = sclp_activation_state_active;
735         else
736                 sclp_activation_state = sclp_activation_state_inactive;
737         spin_unlock_irqrestore(&sclp_lock, flags);
738         return rc;
739 }
740
741 EXPORT_SYMBOL(sclp_reactivate);
742
743 /* Handler for external interruption used during initialization. Modify
744  * request state to done. */
745 static void
746 sclp_check_handler(__u16 code)
747 {
748         u32 finished_sccb;
749
750         finished_sccb = S390_lowcore.ext_params & 0xfffffff8;
751         /* Is this the interrupt we are waiting for? */
752         if (finished_sccb == 0)
753                 return;
754         if (finished_sccb != (u32) (addr_t) sclp_init_sccb) {
755                 printk(KERN_WARNING SCLP_HEADER "unsolicited interrupt "
756                        "for buffer at 0x%x\n", finished_sccb);
757                 return;
758         }
759         spin_lock(&sclp_lock);
760         if (sclp_running_state == sclp_running_state_running) {
761                 sclp_init_req.status = SCLP_REQ_DONE;
762                 sclp_running_state = sclp_running_state_idle;
763         }
764         spin_unlock(&sclp_lock);
765 }
766
767 /* Initial init mask request timed out. Modify request state to failed. */
768 static void
769 sclp_check_timeout(unsigned long data)
770 {
771         unsigned long flags;
772
773         spin_lock_irqsave(&sclp_lock, flags);
774         if (sclp_running_state == sclp_running_state_running) {
775                 sclp_init_req.status = SCLP_REQ_FAILED;
776                 sclp_running_state = sclp_running_state_idle;
777         }
778         spin_unlock_irqrestore(&sclp_lock, flags);
779 }
780
781 /* Perform a check of the SCLP interface. Return zero if the interface is
782  * available and there are no pending requests from a previous instance.
783  * Return non-zero otherwise. */
784 static int
785 sclp_check_interface(void)
786 {
787         struct init_sccb *sccb;
788         unsigned long flags;
789         int retry;
790         int rc;
791
792         spin_lock_irqsave(&sclp_lock, flags);
793         /* Prepare init mask command */
794         rc = register_early_external_interrupt(0x2401, sclp_check_handler,
795                                                &ext_int_info_hwc);
796         if (rc) {
797                 spin_unlock_irqrestore(&sclp_lock, flags);
798                 return rc;
799         }
800         for (retry = 0; retry <= SCLP_INIT_RETRY; retry++) {
801                 __sclp_make_init_req(0, 0);
802                 sccb = (struct init_sccb *) sclp_init_req.sccb;
803                 rc = service_call(sclp_init_req.command, sccb);
804                 if (rc == -EIO)
805                         break;
806                 sclp_init_req.status = SCLP_REQ_RUNNING;
807                 sclp_running_state = sclp_running_state_running;
808                 __sclp_set_request_timer(SCLP_RETRY_INTERVAL * HZ,
809                                          sclp_check_timeout, 0);
810                 spin_unlock_irqrestore(&sclp_lock, flags);
811                 /* Enable service-signal interruption - needs to happen
812                  * with IRQs enabled. */
813                 ctl_set_bit(0, 9);
814                 /* Wait for signal from interrupt or timeout */
815                 sclp_sync_wait();
816                 /* Disable service-signal interruption - needs to happen
817                  * with IRQs enabled. */
818                 ctl_clear_bit(0,9);
819                 spin_lock_irqsave(&sclp_lock, flags);
820                 del_timer(&sclp_request_timer);
821                 if (sclp_init_req.status == SCLP_REQ_DONE &&
822                     sccb->header.response_code == 0x20) {
823                         rc = 0;
824                         break;
825                 } else
826                         rc = -EBUSY;
827         }
828         unregister_early_external_interrupt(0x2401, sclp_check_handler,
829                                             &ext_int_info_hwc);
830         spin_unlock_irqrestore(&sclp_lock, flags);
831         return rc;
832 }
833
834 /* Reboot event handler. Reset send and receive mask to prevent pending SCLP
835  * events from interfering with rebooted system. */
836 static int
837 sclp_reboot_event(struct notifier_block *this, unsigned long event, void *ptr)
838 {
839         sclp_deactivate();
840         return NOTIFY_DONE;
841 }
842
843 static struct notifier_block sclp_reboot_notifier = {
844         .notifier_call = sclp_reboot_event
845 };
846
847 /* Initialize SCLP driver. Return zero if driver is operational, non-zero
848  * otherwise. */
849 static int
850 sclp_init(void)
851 {
852         unsigned long flags;
853         int rc;
854
855         if (!MACHINE_HAS_SCLP)
856                 return -ENODEV;
857         spin_lock_irqsave(&sclp_lock, flags);
858         /* Check for previous or running initialization */
859         if (sclp_init_state != sclp_init_state_uninitialized) {
860                 spin_unlock_irqrestore(&sclp_lock, flags);
861                 return 0;
862         }
863         sclp_init_state = sclp_init_state_initializing;
864         /* Set up variables */
865         INIT_LIST_HEAD(&sclp_req_queue);
866         INIT_LIST_HEAD(&sclp_reg_list);
867         list_add(&sclp_state_change_event.list, &sclp_reg_list);
868         init_timer(&sclp_request_timer);
869         /* Check interface */
870         spin_unlock_irqrestore(&sclp_lock, flags);
871         rc = sclp_check_interface();
872         spin_lock_irqsave(&sclp_lock, flags);
873         if (rc) {
874                 sclp_init_state = sclp_init_state_uninitialized;
875                 spin_unlock_irqrestore(&sclp_lock, flags);
876                 return rc;
877         }
878         /* Register reboot handler */
879         rc = register_reboot_notifier(&sclp_reboot_notifier);
880         if (rc) {
881                 sclp_init_state = sclp_init_state_uninitialized;
882                 spin_unlock_irqrestore(&sclp_lock, flags);
883                 return rc;
884         }
885         /* Register interrupt handler */
886         rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler,
887                                                &ext_int_info_hwc);
888         if (rc) {
889                 unregister_reboot_notifier(&sclp_reboot_notifier);
890                 sclp_init_state = sclp_init_state_uninitialized;
891                 spin_unlock_irqrestore(&sclp_lock, flags);
892                 return rc;
893         }
894         sclp_init_state = sclp_init_state_initialized;
895         spin_unlock_irqrestore(&sclp_lock, flags);
896         /* Enable service-signal external interruption - needs to happen with
897          * IRQs enabled. */
898         ctl_set_bit(0, 9);
899         sclp_init_mask(1);
900         return 0;
901 }