2 * drivers/s390/cio/device_ops.c
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH,
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com)
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/list.h>
14 #include <linux/device.h>
15 #include <linux/delay.h>
17 #include <asm/ccwdev.h>
18 #include <asm/idals.h>
21 #include "cio_debug.h"
26 int ccw_device_set_options_mask(struct ccw_device *cdev, unsigned long flags)
29 * The flag usage is mutal exclusive ...
31 if ((flags & CCWDEV_EARLY_NOTIFICATION) &&
32 (flags & CCWDEV_REPORT_ALL))
34 cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
35 cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0;
36 cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0;
37 cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0;
41 int ccw_device_set_options(struct ccw_device *cdev, unsigned long flags)
44 * The flag usage is mutal exclusive ...
46 if (((flags & CCWDEV_EARLY_NOTIFICATION) &&
47 (flags & CCWDEV_REPORT_ALL)) ||
48 ((flags & CCWDEV_EARLY_NOTIFICATION) &&
49 cdev->private->options.repall) ||
50 ((flags & CCWDEV_REPORT_ALL) &&
51 cdev->private->options.fast))
53 cdev->private->options.fast |= (flags & CCWDEV_EARLY_NOTIFICATION) != 0;
54 cdev->private->options.repall |= (flags & CCWDEV_REPORT_ALL) != 0;
55 cdev->private->options.pgroup |= (flags & CCWDEV_DO_PATHGROUP) != 0;
56 cdev->private->options.force |= (flags & CCWDEV_ALLOW_FORCE) != 0;
60 void ccw_device_clear_options(struct ccw_device *cdev, unsigned long flags)
62 cdev->private->options.fast &= (flags & CCWDEV_EARLY_NOTIFICATION) == 0;
63 cdev->private->options.repall &= (flags & CCWDEV_REPORT_ALL) == 0;
64 cdev->private->options.pgroup &= (flags & CCWDEV_DO_PATHGROUP) == 0;
65 cdev->private->options.force &= (flags & CCWDEV_ALLOW_FORCE) == 0;
69 ccw_device_clear(struct ccw_device *cdev, unsigned long intparm)
71 struct subchannel *sch;
76 if (cdev->private->state == DEV_STATE_NOT_OPER)
78 if (cdev->private->state != DEV_STATE_ONLINE &&
79 cdev->private->state != DEV_STATE_W4SENSE)
81 sch = to_subchannel(cdev->dev.parent);
86 cdev->private->intparm = intparm;
91 ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
92 unsigned long intparm, __u8 lpm, __u8 key,
95 struct subchannel *sch;
100 sch = to_subchannel(cdev->dev.parent);
103 if (cdev->private->state == DEV_STATE_NOT_OPER)
105 if (cdev->private->state == DEV_STATE_VERIFY ||
106 cdev->private->state == DEV_STATE_CLEAR_VERIFY) {
107 /* Remember to fake irb when finished. */
108 if (!cdev->private->flags.fake_irb) {
109 cdev->private->flags.fake_irb = 1;
110 cdev->private->intparm = intparm;
113 /* There's already a fake I/O around. */
116 if (cdev->private->state != DEV_STATE_ONLINE ||
117 ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
118 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
119 cdev->private->flags.doverify)
121 ret = cio_set_options (sch, flags);
124 /* Adjust requested path mask to excluded varied off paths. */
130 ret = cio_start_key (sch, cpa, lpm, key);
132 cdev->private->intparm = intparm;
138 ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
139 unsigned long intparm, __u8 lpm, __u8 key,
140 unsigned long flags, int expires)
146 ccw_device_set_timeout(cdev, expires);
147 ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags);
149 ccw_device_set_timeout(cdev, 0);
154 ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa,
155 unsigned long intparm, __u8 lpm, unsigned long flags)
157 return ccw_device_start_key(cdev, cpa, intparm, lpm,
158 PAGE_DEFAULT_KEY, flags);
162 ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa,
163 unsigned long intparm, __u8 lpm, unsigned long flags,
166 return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm,
167 PAGE_DEFAULT_KEY, flags,
173 ccw_device_halt(struct ccw_device *cdev, unsigned long intparm)
175 struct subchannel *sch;
180 if (cdev->private->state == DEV_STATE_NOT_OPER)
182 if (cdev->private->state != DEV_STATE_ONLINE &&
183 cdev->private->state != DEV_STATE_W4SENSE)
185 sch = to_subchannel(cdev->dev.parent);
190 cdev->private->intparm = intparm;
195 ccw_device_resume(struct ccw_device *cdev)
197 struct subchannel *sch;
201 sch = to_subchannel(cdev->dev.parent);
204 if (cdev->private->state == DEV_STATE_NOT_OPER)
206 if (cdev->private->state != DEV_STATE_ONLINE ||
207 !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED))
209 return cio_resume(sch);
213 * Pass interrupt to device driver.
216 ccw_device_call_handler(struct ccw_device *cdev)
218 struct subchannel *sch;
222 sch = to_subchannel(cdev->dev.parent);
225 * we allow for the device action handler if .
226 * - we received ending status
227 * - the action handler requested to see all interrupts
228 * - we received an intermediate status
229 * - fast notification was requested (primary status)
230 * - unsolicited interrupts
232 stctl = cdev->private->irb.scsw.stctl;
233 ending_status = (stctl & SCSW_STCTL_SEC_STATUS) ||
234 (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) ||
235 (stctl == SCSW_STCTL_STATUS_PEND);
236 if (!ending_status &&
237 !cdev->private->options.repall &&
238 !(stctl & SCSW_STCTL_INTER_STATUS) &&
239 !(cdev->private->options.fast &&
240 (stctl & SCSW_STCTL_PRIM_STATUS)))
243 /* Clear pending timers for device driver initiated I/O. */
245 ccw_device_set_timeout(cdev, 0);
247 * Now we are ready to call the device driver interrupt handler.
250 cdev->handler(cdev, cdev->private->intparm,
251 &cdev->private->irb);
254 * Clear the old and now useless interrupt response block.
256 memset(&cdev->private->irb, 0, sizeof(struct irb));
262 * Search for CIW command in extended sense data.
265 ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct)
269 if (cdev->private->flags.esid == 0)
271 for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++)
272 if (cdev->private->senseid.ciw[ciw_cnt].ct == ct)
273 return cdev->private->senseid.ciw + ciw_cnt;
278 ccw_device_get_path_mask(struct ccw_device *cdev)
280 struct subchannel *sch;
282 sch = to_subchannel(cdev->dev.parent);
290 ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
293 /* unsolicited interrupt */
296 /* Abuse intparm for error reporting. */
298 cdev->private->intparm = -EIO;
299 else if (irb->scsw.cc == 1)
300 /* Retry for deferred condition code. */
301 cdev->private->intparm = -EAGAIN;
302 else if ((irb->scsw.dstat !=
303 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
304 (irb->scsw.cstat != 0)) {
306 * We didn't get channel end / device end. Check if path
307 * verification has been started; we can retry after it has
308 * finished. We also retry unit checks except for command reject
309 * or intervention required. Also check for long busy
312 if (cdev->private->flags.doverify ||
313 cdev->private->state == DEV_STATE_VERIFY)
314 cdev->private->intparm = -EAGAIN;
315 else if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
317 (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
318 cdev->private->intparm = -EAGAIN;
319 else if ((irb->scsw.dstat & DEV_STAT_ATTENTION) &&
320 (irb->scsw.dstat & DEV_STAT_DEV_END) &&
321 (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP))
322 cdev->private->intparm = -EAGAIN;
324 cdev->private->intparm = -EIO;
327 cdev->private->intparm = 0;
328 wake_up(&cdev->private->wait_q);
332 __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm)
335 struct subchannel *sch;
337 sch = to_subchannel(cdev->dev.parent);
339 ccw_device_set_timeout(cdev, 60 * HZ);
340 ret = cio_start (sch, ccw, lpm);
342 ccw_device_set_timeout(cdev, 0);
344 /* Try again later. */
345 spin_unlock_irq(sch->lock);
347 spin_lock_irq(sch->lock);
351 /* Non-retryable error. */
353 /* Wait for end of request. */
354 cdev->private->intparm = magic;
355 spin_unlock_irq(sch->lock);
356 wait_event(cdev->private->wait_q,
357 (cdev->private->intparm == -EIO) ||
358 (cdev->private->intparm == -EAGAIN) ||
359 (cdev->private->intparm == 0));
360 spin_lock_irq(sch->lock);
361 /* Check at least for channel end / device end */
362 if (cdev->private->intparm == -EIO) {
363 /* Non-retryable error. */
367 if (cdev->private->intparm == 0)
370 /* Try again later. */
371 spin_unlock_irq(sch->lock);
373 spin_lock_irq(sch->lock);
380 * read_dev_chars() - read device characteristics
381 * @param cdev target ccw device
382 * @param buffer pointer to buffer for rdc data
383 * @param length size of rdc data
384 * @returns 0 for success, negative error value on failure
387 * called for online device, lock not held
390 read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
392 void (*handler)(struct ccw_device *, unsigned long, struct irb *);
393 struct subchannel *sch;
395 struct ccw1 *rdc_ccw;
399 if (!buffer || !length)
401 sch = to_subchannel(cdev->dev.parent);
403 CIO_TRACE_EVENT (4, "rddevch");
404 CIO_TRACE_EVENT (4, sch->dev.bus_id);
406 rdc_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
409 rdc_ccw->cmd_code = CCW_CMD_RDC;
410 rdc_ccw->count = length;
411 rdc_ccw->flags = CCW_FLAG_SLI;
412 ret = set_normalized_cda (rdc_ccw, (*buffer));
418 spin_lock_irq(sch->lock);
419 /* Save interrupt handler. */
420 handler = cdev->handler;
421 /* Temporarily install own handler. */
422 cdev->handler = ccw_device_wake_up;
423 if (cdev->private->state != DEV_STATE_ONLINE)
425 else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
426 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
427 cdev->private->flags.doverify)
430 /* 0x00D9C4C3 == ebcdic "RDC" */
431 ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3, 0);
433 /* Restore interrupt handler. */
434 cdev->handler = handler;
435 spin_unlock_irq(sch->lock);
437 clear_normalized_cda (rdc_ccw);
444 * Read Configuration data using path mask
447 read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lpm)
449 void (*handler)(struct ccw_device *, unsigned long, struct irb *);
450 struct subchannel *sch;
454 struct ccw1 *rcd_ccw;
458 if (!buffer || !length)
460 sch = to_subchannel(cdev->dev.parent);
462 CIO_TRACE_EVENT (4, "rdconf");
463 CIO_TRACE_EVENT (4, sch->dev.bus_id);
466 * scan for RCD command in extended SenseID data
468 ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD);
469 if (!ciw || ciw->cmd == 0)
472 /* Adjust requested path mask to excluded varied off paths. */
479 rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
482 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
487 rcd_ccw->cmd_code = ciw->cmd;
488 rcd_ccw->cda = (__u32) __pa (rcd_buf);
489 rcd_ccw->count = ciw->count;
490 rcd_ccw->flags = CCW_FLAG_SLI;
492 spin_lock_irq(sch->lock);
493 /* Save interrupt handler. */
494 handler = cdev->handler;
495 /* Temporarily install own handler. */
496 cdev->handler = ccw_device_wake_up;
497 if (cdev->private->state != DEV_STATE_ONLINE)
499 else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
500 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
501 cdev->private->flags.doverify)
504 /* 0x00D9C3C4 == ebcdic "RCD" */
505 ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4, lpm);
507 /* Restore interrupt handler. */
508 cdev->handler = handler;
509 spin_unlock_irq(sch->lock);
512 * on success we update the user input parms
519 *length = ciw->count;
528 * Read Configuration data
531 read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
533 return read_conf_data_lpm (cdev, buffer, length, 0);
537 * Try to break the lock on a boxed device.
540 ccw_device_stlck(struct ccw_device *cdev)
544 struct subchannel *sch;
550 if (cdev->drv && !cdev->private->options.force)
553 sch = to_subchannel(cdev->dev.parent);
555 CIO_TRACE_EVENT(2, "stl lock");
556 CIO_TRACE_EVENT(2, cdev->dev.bus_id);
558 buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
561 buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL);
566 spin_lock_irqsave(sch->lock, flags);
567 ret = cio_enable_subchannel(sch, 3);
571 * Setup ccw. We chain an unconditional reserve and a release so we
572 * only break the lock.
574 cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK;
575 cdev->private->iccws[0].cda = (__u32) __pa(buf);
576 cdev->private->iccws[0].count = 32;
577 cdev->private->iccws[0].flags = CCW_FLAG_CC;
578 cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE;
579 cdev->private->iccws[1].cda = (__u32) __pa(buf2);
580 cdev->private->iccws[1].count = 32;
581 cdev->private->iccws[1].flags = 0;
582 ret = cio_start(sch, cdev->private->iccws, 0);
584 cio_disable_subchannel(sch); //FIXME: return code?
587 cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
588 spin_unlock_irqrestore(sch->lock, flags);
589 wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
590 spin_lock_irqsave(sch->lock, flags);
591 cio_disable_subchannel(sch); //FIXME: return code?
592 if ((cdev->private->irb.scsw.dstat !=
593 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
594 (cdev->private->irb.scsw.cstat != 0))
597 memset(&cdev->private->irb, 0, sizeof(struct irb));
601 spin_unlock_irqrestore(sch->lock, flags);
606 ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no)
608 struct subchannel *sch;
610 sch = to_subchannel(cdev->dev.parent);
611 return chsc_get_chp_desc(sch, chp_no);
614 // FIXME: these have to go:
617 _ccw_device_get_subchannel_number(struct ccw_device *cdev)
619 return cdev->private->schid.sch_no;
623 _ccw_device_get_device_number(struct ccw_device *cdev)
625 return cdev->private->dev_id.devno;
629 MODULE_LICENSE("GPL");
630 EXPORT_SYMBOL(ccw_device_set_options_mask);
631 EXPORT_SYMBOL(ccw_device_set_options);
632 EXPORT_SYMBOL(ccw_device_clear_options);
633 EXPORT_SYMBOL(ccw_device_clear);
634 EXPORT_SYMBOL(ccw_device_halt);
635 EXPORT_SYMBOL(ccw_device_resume);
636 EXPORT_SYMBOL(ccw_device_start_timeout);
637 EXPORT_SYMBOL(ccw_device_start);
638 EXPORT_SYMBOL(ccw_device_start_timeout_key);
639 EXPORT_SYMBOL(ccw_device_start_key);
640 EXPORT_SYMBOL(ccw_device_get_ciw);
641 EXPORT_SYMBOL(ccw_device_get_path_mask);
642 EXPORT_SYMBOL(read_conf_data);
643 EXPORT_SYMBOL(read_dev_chars);
644 EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
645 EXPORT_SYMBOL(_ccw_device_get_device_number);
646 EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
647 EXPORT_SYMBOL_GPL(read_conf_data_lpm);