Merge with /shiny/git/linux-2.6/.git
[linux-2.6] / drivers / s390 / cio / device_ops.c
1 /*
2  *  drivers/s390/cio/device_ops.c
3  *
4  *   $Revision: 1.56 $
5  *
6  *    Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
7  *                       IBM Corporation
8  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
9  *               Cornelia Huck (cohuck@de.ibm.com)
10  */
11 #include <linux/config.h>
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/errno.h>
15 #include <linux/slab.h>
16 #include <linux/list.h>
17 #include <linux/device.h>
18 #include <linux/delay.h>
19
20 #include <asm/ccwdev.h>
21 #include <asm/idals.h>
22 #include <asm/qdio.h>
23
24 #include "cio.h"
25 #include "cio_debug.h"
26 #include "css.h"
27 #include "chsc.h"
28 #include "device.h"
29 #include "qdio.h"
30
31 int
32 ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
33 {
34        /*
35         * The flag usage is mutal exclusive ...
36         */
37         if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
38             (flags & CCWDEV_REPORT_ALL))
39                 return -EINVAL;
40         cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
41         cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
42         cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
43         cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
44         return 0;
45 }
46
47 int
48 ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
49 {
50         struct subchannel *sch;
51         int ret;
52
53         if (!cdev)
54                 return -ENODEV;
55         if (cdev->private->state == DEV_STATE_NOT_OPER)
56                 return -ENODEV;
57         if (cdev->private->state != DEV_STATE_ONLINE &&
58             cdev->private->state != DEV_STATE_WAIT4IO &&
59             cdev->private->state != DEV_STATE_W4SENSE)
60                 return -EINVAL;
61         sch = to_subchannel(cdev->dev.parent);
62         if (!sch)
63                 return -ENODEV;
64         ret = cio_clear(sch);
65         if (ret == 0)
66                 cdev->private->intparm = intparm;
67         return ret;
68 }
69
70 int
71 ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
72                      unsigned long intparm, __u8 lpm, __u8 key,
73                      unsigned long flags)
74 {
75         struct subchannel *sch;
76         int ret;
77
78         if (!cdev)
79                 return -ENODEV;
80         sch = to_subchannel(cdev->dev.parent);
81         if (!sch)
82                 return -ENODEV;
83         if (cdev->private->state == DEV_STATE_NOT_OPER)
84                 return -ENODEV;
85         if (cdev->private->state == DEV_STATE_VERIFY) {
86                 /* Remember to fake irb when finished. */
87                 if (!cdev->private->flags.fake_irb) {
88                         cdev->private->flags.fake_irb = 1;
89                         cdev->private->intparm = intparm;
90                         return 0;
91                 } else
92                         /* There's already a fake I/O around. */
93                         return -EBUSY;
94         }
95         if (cdev->private->state != DEV_STATE_ONLINE ||
96             ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
97              !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
98             cdev->private->flags.doverify)
99                 return -EBUSY;
100         ret = cio_set_options (sch, flags);
101         if (ret)
102                 return ret;
103         ret = cio_start_key (sch, cpa, lpm, key);
104         if (ret == 0)
105                 cdev->private->intparm = intparm;
106         return ret;
107 }
108
109
110 int
111 ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
112                              unsigned long intparm, __u8 lpm, __u8 key,
113                              unsigned long flags, int expires)
114 {
115         int ret;
116
117         if (!cdev)
118                 return -ENODEV;
119         ccw_device_set_timeout(cdev, expires);
120         ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
121         if (ret != 0)
122                 ccw_device_set_timeout(cdev, 0);
123         return ret;
124 }
125
126 int
127 ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
128                  unsigned long intparm, __u8 lpm, unsigned long flags)
129 {
130         return ccw_device_start_key(cdev, cpa, intparm, lpm,
131                                     PAGE_DEFAULT_KEY, flags);
132 }
133
134 int
135 ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
136                          unsigned long intparm, __u8 lpm, unsigned long flags,
137                          int expires)
138 {
139         return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
140                                             PAGE_DEFAULT_KEY, flags,
141                                             expires);
142 }
143
144
145 int
146 ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
147 {
148         struct subchannel *sch;
149         int ret;
150
151         if (!cdev)
152                 return -ENODEV;
153         if (cdev->private->state == DEV_STATE_NOT_OPER)
154                 return -ENODEV;
155         if (cdev->private->state != DEV_STATE_ONLINE &&
156             cdev->private->state != DEV_STATE_WAIT4IO &&
157             cdev->private->state != DEV_STATE_W4SENSE)
158                 return -EINVAL;
159         sch = to_subchannel(cdev->dev.parent);
160         if (!sch)
161                 return -ENODEV;
162         ret = cio_halt(sch);
163         if (ret == 0)
164                 cdev->private->intparm = intparm;
165         return ret;
166 }
167
168 int
169 ccw_device_resume(struct ccw_device *cdev)
170 {
171         struct subchannel *sch;
172
173         if (!cdev)
174                 return -ENODEV;
175         sch = to_subchannel(cdev->dev.parent);
176         if (!sch)
177                 return -ENODEV;
178         if (cdev->private->state == DEV_STATE_NOT_OPER)
179                 return -ENODEV;
180         if (cdev->private->state != DEV_STATE_ONLINE ||
181             !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED))
182                 return -EINVAL;
183         return cio_resume(sch);
184 }
185
186 /*
187  * Pass interrupt to device driver.
188  */
189 int
190 ccw_device_call_handler(struct ccw_device *cdev)
191 {
192         struct subchannel *sch;
193         unsigned int stctl;
194         int ending_status;
195
196         sch = to_subchannel(cdev->dev.parent);
197
198         /*
199          * we allow for the device action handler if .
200          *  - we received ending status
201          *  - the action handler requested to see all interrupts
202          *  - we received an intermediate status
203          *  - fast notification was requested (primary status)
204          *  - unsolicited interrupts
205          */
206         stctl = cdev->private->irb.scsw.stctl;
207         ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
208                 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
209                 (stctl == SCSW_STCTL_STATUS_PEND);
210         if (!ending_status &&
211             !cdev->private->options.repall &&
212             !(stctl & SCSW_STCTL_INTER_STATUS) &&
213             !(cdev->private->options.fast &&
214               (stctl & SCSW_STCTL_PRIM_STATUS)))
215                 return 0;
216
217         /*
218          * Now we are ready to call the device driver interrupt handler.
219          */
220         if (cdev->handler)
221                 cdev->handler(cdev, cdev->private->intparm,
222                               &cdev->private->irb);
223
224         /*
225          * Clear the old and now useless interrupt response block.
226          */
227         memset(&cdev->private->irb, 0, sizeof(struct irb));
228
229         return 1;
230 }
231
232 /*
233  * Search for CIW command in extended sense data.
234  */
235 struct ciw *
236 ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
237 {
238         int ciw_cnt;
239
240         if (cdev->private->flags.esid == 0)
241                 return NULL;
242         for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
243                 if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
244                         return cdev->private->senseid.ciw + ciw_cnt;
245         return NULL;
246 }
247
248 __u8
249 ccw_device_get_path_mask(struct ccw_device *cdev)
250 {
251         struct subchannel *sch;
252
253         sch = to_subchannel(cdev->dev.parent);
254         if (!sch)
255                 return 0;
256         else
257                 return sch->vpm;
258 }
259
260 static void
261 ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
262 {
263         if (!ip)
264                 /* unsolicited interrupt */
265                 return;
266
267         /* Abuse intparm for error reporting. */
268         if (IS_ERR(irb))
269                 cdev->private->intparm = -EIO;
270         else if ((irb->scsw.dstat !=
271                   (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
272                  (irb->scsw.cstat != 0)) {
273                 /*
274                  * We didn't get channel end / device end. Check if path
275                  * verification has been started; we can retry after it has
276                  * finished. We also retry unit checks except for command reject
277                  * or intervention required.
278                  */
279                  if (cdev->private->flags.doverify ||
280                          cdev->private->state == DEV_STATE_VERIFY)
281                          cdev->private->intparm = -EAGAIN;
282                  if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
283                      !(irb->ecw[0] &
284                        (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
285                          cdev->private->intparm = -EAGAIN;
286                  else
287                          cdev->private->intparm = -EIO;
288                          
289         } else
290                 cdev->private->intparm = 0;
291         wake_up(&cdev->private->wait_q);
292 }
293
294 static inline int
295 __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm)
296 {
297         int ret;
298         struct subchannel *sch;
299
300         sch = to_subchannel(cdev->dev.parent);
301         do {
302                 ret = cio_start (sch, ccw, lpm);
303                 if ((ret == -EBUSY) || (ret == -EACCES)) {
304                         /* Try again later. */
305                         spin_unlock_irq(&sch->lock);
306                         msleep(10);
307                         spin_lock_irq(&sch->lock);
308                         continue;
309                 }
310                 if (ret != 0)
311                         /* Non-retryable error. */
312                         break;
313                 /* Wait for end of request. */
314                 cdev->private->intparm = magic;
315                 spin_unlock_irq(&sch->lock);
316                 wait_event(cdev->private->wait_q,
317                            (cdev->private->intparm == -EIO) ||
318                            (cdev->private->intparm == -EAGAIN) ||
319                            (cdev->private->intparm == 0));
320                 spin_lock_irq(&sch->lock);
321                 /* Check at least for channel end / device end */
322                 if (cdev->private->intparm == -EIO) {
323                         /* Non-retryable error. */
324                         ret = -EIO;
325                         break;
326                 }
327                 if (cdev->private->intparm == 0)
328                         /* Success. */
329                         break;
330                 /* Try again later. */
331                 spin_unlock_irq(&sch->lock);
332                 msleep(10);
333                 spin_lock_irq(&sch->lock);
334         } while (1);
335
336         return ret;
337 }
338
339 /**
340  * read_dev_chars() - read device characteristics
341  * @param cdev   target ccw device
342  * @param buffer pointer to buffer for rdc data
343  * @param length size of rdc data
344  * @returns 0 for success, negative error value on failure
345  *
346  * Context:
347  *   called for online device, lock not held
348  **/
349 int
350 read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
351 {
352         void (*handler)(struct ccw_device *, unsigned long, struct irb *);
353         struct subchannel *sch;
354         int ret;
355         struct ccw1 *rdc_ccw;
356
357         if (!cdev)
358                 return -ENODEV;
359         if (!buffer || !length)
360                 return -EINVAL;
361         sch = to_subchannel(cdev->dev.parent);
362
363         CIO_TRACE_EVENT (4, "rddevch");
364         CIO_TRACE_EVENT (4, sch->dev.bus_id);
365
366         rdc_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
367         if (!rdc_ccw)
368                 return -ENOMEM;
369         memset(rdc_ccw, 0, sizeof(struct ccw1));
370         rdc_ccw->cmd_code = CCW_CMD_RDC;
371         rdc_ccw->count = length;
372         rdc_ccw->flags = CCW_FLAG_SLI;
373         ret = set_normalized_cda (rdc_ccw, (*buffer));
374         if (ret != 0) {
375                 kfree(rdc_ccw);
376                 return ret;
377         }
378
379         spin_lock_irq(&sch->lock);
380         /* Save interrupt handler. */
381         handler = cdev->handler;
382         /* Temporarily install own handler. */
383         cdev->handler = ccw_device_wake_up;
384         if (cdev->private->state != DEV_STATE_ONLINE)
385                 ret = -ENODEV;
386         else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
387                   !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
388                  cdev->private->flags.doverify)
389                 ret = -EBUSY;
390         else
391                 /* 0x00D9C4C3 == ebcdic "RDC" */
392                 ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3, 0);
393
394         /* Restore interrupt handler. */
395         cdev->handler = handler;
396         spin_unlock_irq(&sch->lock);
397
398         clear_normalized_cda (rdc_ccw);
399         kfree(rdc_ccw);
400
401         return ret;
402 }
403
404 /*
405  *  Read Configuration data using path mask
406  */
407 int
408 read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lpm)
409 {
410         void (*handler)(struct ccw_device *, unsigned long, struct irb *);
411         struct subchannel *sch;
412         struct ciw *ciw;
413         char *rcd_buf;
414         int ret;
415         struct ccw1 *rcd_ccw;
416
417         if (!cdev)
418                 return -ENODEV;
419         if (!buffer || !length)
420                 return -EINVAL;
421         sch = to_subchannel(cdev->dev.parent);
422
423         CIO_TRACE_EVENT (4, "rdconf");
424         CIO_TRACE_EVENT (4, sch->dev.bus_id);
425
426         /*
427          * scan for RCD command in extended SenseID data
428          */
429         ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD);
430         if (!ciw || ciw->cmd == 0)
431                 return -EOPNOTSUPP;
432
433         rcd_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
434         if (!rcd_ccw)
435                 return -ENOMEM;
436         memset(rcd_ccw, 0, sizeof(struct ccw1));
437         rcd_buf = kmalloc(ciw->count, GFP_KERNEL | GFP_DMA);
438         if (!rcd_buf) {
439                 kfree(rcd_ccw);
440                 return -ENOMEM;
441         }
442         memset (rcd_buf, 0, ciw->count);
443         rcd_ccw->cmd_code = ciw->cmd;
444         rcd_ccw->cda = (__u32) __pa (rcd_buf);
445         rcd_ccw->count = ciw->count;
446         rcd_ccw->flags = CCW_FLAG_SLI;
447
448         spin_lock_irq(&sch->lock);
449         /* Save interrupt handler. */
450         handler = cdev->handler;
451         /* Temporarily install own handler. */
452         cdev->handler = ccw_device_wake_up;
453         if (cdev->private->state != DEV_STATE_ONLINE)
454                 ret = -ENODEV;
455         else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
456                   !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
457                  cdev->private->flags.doverify)
458                 ret = -EBUSY;
459         else
460                 /* 0x00D9C3C4 == ebcdic "RCD" */
461                 ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4, lpm);
462
463         /* Restore interrupt handler. */
464         cdev->handler = handler;
465         spin_unlock_irq(&sch->lock);
466
467         /*
468          * on success we update the user input parms
469          */
470         if (ret) {
471                 kfree (rcd_buf);
472                 *buffer = NULL;
473                 *length = 0;
474         } else {
475                 *length = ciw->count;
476                 *buffer = rcd_buf;
477         }
478         kfree(rcd_ccw);
479
480         return ret;
481 }
482
483 /*
484  *  Read Configuration data
485  */
486 int
487 read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
488 {
489         return read_conf_data_lpm (cdev, buffer, length, 0);
490 }
491
492 /*
493  * Try to break the lock on a boxed device.
494  */
495 int
496 ccw_device_stlck(struct ccw_device *cdev)
497 {
498         void *buf, *buf2;
499         unsigned long flags;
500         struct subchannel *sch;
501         int ret;
502
503         if (!cdev)
504                 return -ENODEV;
505
506         if (cdev->drv && !cdev->private->options.force)
507                 return -EINVAL;
508
509         sch = to_subchannel(cdev->dev.parent);
510         
511         CIO_TRACE_EVENT(2, "stl lock");
512         CIO_TRACE_EVENT(2, cdev->dev.bus_id);
513
514         buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
515         if (!buf)
516                 return -ENOMEM;
517         buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
518         if (!buf2) {
519                 kfree(buf);
520                 return -ENOMEM;
521         }
522         spin_lock_irqsave(&sch->lock, flags);
523         ret = cio_enable_subchannel(sch, 3);
524         if (ret)
525                 goto out_unlock;
526         /*
527          * Setup ccw. We chain an unconditional reserve and a release so we
528          * only break the lock.
529          */
530         cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
531         cdev->private->iccws[0].cda = (__u32) __pa(buf);
532         cdev->private->iccws[0].count = 32;
533         cdev->private->iccws[0].flags = CCW_FLAG_CC;
534         cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
535         cdev->private->iccws[1].cda = (__u32) __pa(buf2);
536         cdev->private->iccws[1].count = 32;
537         cdev->private->iccws[1].flags = 0;
538         ret = cio_start(sch, cdev->private->iccws, 0);
539         if (ret) {
540                 cio_disable_subchannel(sch); //FIXME: return code?
541                 goto out_unlock;
542         }
543         cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
544         spin_unlock_irqrestore(&sch->lock, flags);
545         wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
546         spin_lock_irqsave(&sch->lock, flags);
547         cio_disable_subchannel(sch); //FIXME: return code?
548         if ((cdev->private->irb.scsw.dstat !=
549              (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
550             (cdev->private->irb.scsw.cstat != 0))
551                 ret = -EIO;
552         /* Clear irb. */
553         memset(&cdev->private->irb, 0, sizeof(struct irb));
554 out_unlock:
555         if (buf)
556                 kfree(buf);
557         if (buf2)
558                 kfree(buf2);
559         spin_unlock_irqrestore(&sch->lock, flags);
560         return ret;
561 }
562
563 void *
564 ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
565 {
566         struct subchannel *sch;
567
568         sch = to_subchannel(cdev->dev.parent);
569         return chsc_get_chp_desc(sch, chp_no);
570 }
571
572 // FIXME: these have to go:
573
574 int
575 _ccw_device_get_subchannel_number(struct ccw_device *cdev)
576 {
577         return cdev->private->irq;
578 }
579
580 int
581 _ccw_device_get_device_number(struct ccw_device *cdev)
582 {
583         return cdev->private->devno;
584 }
585
586
587 MODULE_LICENSE("GPL");
588 EXPORT_SYMBOL(ccw_device_set_options);
589 EXPORT_SYMBOL(ccw_device_clear);
590 EXPORT_SYMBOL(ccw_device_halt);
591 EXPORT_SYMBOL(ccw_device_resume);
592 EXPORT_SYMBOL(ccw_device_start_timeout);
593 EXPORT_SYMBOL(ccw_device_start);
594 EXPORT_SYMBOL(ccw_device_start_timeout_key);
595 EXPORT_SYMBOL(ccw_device_start_key);
596 EXPORT_SYMBOL(ccw_device_get_ciw);
597 EXPORT_SYMBOL(ccw_device_get_path_mask);
598 EXPORT_SYMBOL(read_conf_data);
599 EXPORT_SYMBOL(read_dev_chars);
600 EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
601 EXPORT_SYMBOL(_ccw_device_get_device_number);
602 EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
603 EXPORT_SYMBOL_GPL(read_conf_data_lpm);