2 * File...........: linux/drivers/s390/block/dasd_eckd.c
3 * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
4 * Horst Hummel <Horst.Hummel@de.ibm.com>
5 * Carsten Otte <Cotte@de.ibm.com>
6 * Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * Bugreports.to..: <Linux390@de.ibm.com>
8 * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000
9 * EMC Symmetrix ioctl Copyright EMC Corporation, 2008
10 * Author.........: Nigel Hislop <hislop_nigel@emc.com>
14 #include <linux/stddef.h>
15 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include <linux/hdreg.h> /* HDIO_GETGEO */
18 #include <linux/bio.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
22 #include <asm/debug.h>
23 #include <asm/idals.h>
24 #include <asm/ebcdic.h>
26 #include <asm/todclk.h>
27 #include <asm/uaccess.h>
29 #include <asm/ccwdev.h>
32 #include "dasd_eckd.h"
36 #endif /* PRINTK_HEADER */
37 #define PRINTK_HEADER "dasd(eckd):"
39 #define ECKD_C0(i) (i->home_bytes)
40 #define ECKD_F(i) (i->formula)
41 #define ECKD_F1(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f1):\
42 (i->factors.f_0x02.f1))
43 #define ECKD_F2(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f2):\
44 (i->factors.f_0x02.f2))
45 #define ECKD_F3(i) (ECKD_F(i)==0x01?(i->factors.f_0x01.f3):\
46 (i->factors.f_0x02.f3))
47 #define ECKD_F4(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f4):0)
48 #define ECKD_F5(i) (ECKD_F(i)==0x02?(i->factors.f_0x02.f5):0)
49 #define ECKD_F6(i) (i->factor6)
50 #define ECKD_F7(i) (i->factor7)
51 #define ECKD_F8(i) (i->factor8)
53 MODULE_LICENSE("GPL");
55 static struct dasd_discipline dasd_eckd_discipline;
57 /* The ccw bus type uses this table to find devices that it sends to
59 static struct ccw_device_id dasd_eckd_ids[] = {
60 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1},
61 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2},
62 { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3},
63 { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4},
64 { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5},
65 { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6},
66 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7},
67 { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8},
68 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9},
69 { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa},
70 { /* end of list */ },
73 MODULE_DEVICE_TABLE(ccw, dasd_eckd_ids);
75 static struct ccw_driver dasd_eckd_driver; /* see below */
77 /* initial attempt at a probe function. this can be simplified once
78 * the other detection code is gone */
80 dasd_eckd_probe (struct ccw_device *cdev)
84 /* set ECKD specific ccw-device options */
85 ret = ccw_device_set_options(cdev, CCWDEV_ALLOW_FORCE);
88 "dasd_eckd_probe: could not set ccw-device options "
89 "for %s\n", dev_name(&cdev->dev));
92 ret = dasd_generic_probe(cdev, &dasd_eckd_discipline);
97 dasd_eckd_set_online(struct ccw_device *cdev)
99 return dasd_generic_set_online(cdev, &dasd_eckd_discipline);
102 static struct ccw_driver dasd_eckd_driver = {
104 .owner = THIS_MODULE,
105 .ids = dasd_eckd_ids,
106 .probe = dasd_eckd_probe,
107 .remove = dasd_generic_remove,
108 .set_offline = dasd_generic_set_offline,
109 .set_online = dasd_eckd_set_online,
110 .notify = dasd_generic_notify,
113 static const int sizes_trk0[] = { 28, 148, 84 };
114 #define LABEL_SIZE 140
116 static inline unsigned int
117 round_up_multiple(unsigned int no, unsigned int mult)
120 return (rem ? no - rem + mult : no);
123 static inline unsigned int
124 ceil_quot(unsigned int d1, unsigned int d2)
126 return (d1 + (d2 - 1)) / d2;
130 recs_per_track(struct dasd_eckd_characteristics * rdc,
131 unsigned int kl, unsigned int dl)
135 switch (rdc->dev_type) {
138 return 1499 / (15 + 7 + ceil_quot(kl + 12, 32) +
139 ceil_quot(dl + 12, 32));
141 return 1499 / (15 + ceil_quot(dl + 12, 32));
143 dn = ceil_quot(dl + 6, 232) + 1;
145 kn = ceil_quot(kl + 6, 232) + 1;
146 return 1729 / (10 + 9 + ceil_quot(kl + 6 * kn, 34) +
147 9 + ceil_quot(dl + 6 * dn, 34));
149 return 1729 / (10 + 9 + ceil_quot(dl + 6 * dn, 34));
151 dn = ceil_quot(dl + 6, 232) + 1;
153 kn = ceil_quot(kl + 6, 232) + 1;
154 return 1420 / (18 + 7 + ceil_quot(kl + 6 * kn, 34) +
155 ceil_quot(dl + 6 * dn, 34));
157 return 1420 / (18 + 7 + ceil_quot(dl + 6 * dn, 34));
163 check_XRC (struct ccw1 *de_ccw,
164 struct DE_eckd_data *data,
165 struct dasd_device *device)
167 struct dasd_eckd_private *private;
170 private = (struct dasd_eckd_private *) device->private;
171 if (!private->rdc_data.facilities.XRC_supported)
174 /* switch on System Time Stamp - needed for XRC Support */
175 data->ga_extended |= 0x08; /* switch on 'Time Stamp Valid' */
176 data->ga_extended |= 0x02; /* switch on 'Extended Parameter' */
178 rc = get_sync_clock(&data->ep_sys_time);
179 /* Ignore return code if sync clock is switched off. */
180 if (rc == -ENOSYS || rc == -EACCES)
183 de_ccw->count = sizeof(struct DE_eckd_data);
184 de_ccw->flags |= CCW_FLAG_SLI;
189 define_extent(struct ccw1 * ccw, struct DE_eckd_data * data, int trk,
190 int totrk, int cmd, struct dasd_device * device)
192 struct dasd_eckd_private *private;
193 struct ch_t geo, beg, end;
196 private = (struct dasd_eckd_private *) device->private;
198 ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
201 ccw->cda = (__u32) __pa(data);
203 memset(data, 0, sizeof(struct DE_eckd_data));
205 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
206 case DASD_ECKD_CCW_READ_RECORD_ZERO:
207 case DASD_ECKD_CCW_READ:
208 case DASD_ECKD_CCW_READ_MT:
209 case DASD_ECKD_CCW_READ_CKD:
210 case DASD_ECKD_CCW_READ_CKD_MT:
211 case DASD_ECKD_CCW_READ_KD:
212 case DASD_ECKD_CCW_READ_KD_MT:
213 case DASD_ECKD_CCW_READ_COUNT:
214 data->mask.perm = 0x1;
215 data->attributes.operation = private->attrib.operation;
217 case DASD_ECKD_CCW_WRITE:
218 case DASD_ECKD_CCW_WRITE_MT:
219 case DASD_ECKD_CCW_WRITE_KD:
220 case DASD_ECKD_CCW_WRITE_KD_MT:
221 data->mask.perm = 0x02;
222 data->attributes.operation = private->attrib.operation;
223 rc = check_XRC (ccw, data, device);
225 case DASD_ECKD_CCW_WRITE_CKD:
226 case DASD_ECKD_CCW_WRITE_CKD_MT:
227 data->attributes.operation = DASD_BYPASS_CACHE;
228 rc = check_XRC (ccw, data, device);
230 case DASD_ECKD_CCW_ERASE:
231 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
232 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
233 data->mask.perm = 0x3;
234 data->mask.auth = 0x1;
235 data->attributes.operation = DASD_BYPASS_CACHE;
236 rc = check_XRC (ccw, data, device);
239 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
243 data->attributes.mode = 0x3; /* ECKD */
245 if ((private->rdc_data.cu_type == 0x2105 ||
246 private->rdc_data.cu_type == 0x2107 ||
247 private->rdc_data.cu_type == 0x1750)
248 && !(private->uses_cdl && trk < 2))
249 data->ga_extended |= 0x40; /* Regular Data Format Mode */
251 geo.cyl = private->rdc_data.no_cyl;
252 geo.head = private->rdc_data.trk_per_cyl;
253 beg.cyl = trk / geo.head;
254 beg.head = trk % geo.head;
255 end.cyl = totrk / geo.head;
256 end.head = totrk % geo.head;
258 /* check for sequential prestage - enhance cylinder range */
259 if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
260 data->attributes.operation == DASD_SEQ_ACCESS) {
262 if (end.cyl + private->attrib.nr_cyl < geo.cyl)
263 end.cyl += private->attrib.nr_cyl;
265 end.cyl = (geo.cyl - 1);
268 data->beg_ext.cyl = beg.cyl;
269 data->beg_ext.head = beg.head;
270 data->end_ext.cyl = end.cyl;
271 data->end_ext.head = end.head;
275 static int check_XRC_on_prefix(struct PFX_eckd_data *pfxdata,
276 struct dasd_device *device)
278 struct dasd_eckd_private *private;
281 private = (struct dasd_eckd_private *) device->private;
282 if (!private->rdc_data.facilities.XRC_supported)
285 /* switch on System Time Stamp - needed for XRC Support */
286 pfxdata->define_extend.ga_extended |= 0x08; /* 'Time Stamp Valid' */
287 pfxdata->define_extend.ga_extended |= 0x02; /* 'Extended Parameter' */
288 pfxdata->validity.time_stamp = 1; /* 'Time Stamp Valid' */
290 rc = get_sync_clock(&pfxdata->define_extend.ep_sys_time);
291 /* Ignore return code if sync clock is switched off. */
292 if (rc == -ENOSYS || rc == -EACCES)
297 static int prefix(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata, int trk,
298 int totrk, int cmd, struct dasd_device *basedev,
299 struct dasd_device *startdev)
301 struct dasd_eckd_private *basepriv, *startpriv;
302 struct DE_eckd_data *data;
303 struct ch_t geo, beg, end;
306 basepriv = (struct dasd_eckd_private *) basedev->private;
307 startpriv = (struct dasd_eckd_private *) startdev->private;
308 data = &pfxdata->define_extend;
310 ccw->cmd_code = DASD_ECKD_CCW_PFX;
312 ccw->count = sizeof(*pfxdata);
313 ccw->cda = (__u32) __pa(pfxdata);
315 memset(pfxdata, 0, sizeof(*pfxdata));
318 pfxdata->base_address = basepriv->ned->unit_addr;
319 pfxdata->base_lss = basepriv->ned->ID;
320 pfxdata->validity.define_extend = 1;
322 /* private uid is kept up to date, conf_data may be outdated */
323 if (startpriv->uid.type != UA_BASE_DEVICE) {
324 pfxdata->validity.verify_base = 1;
325 if (startpriv->uid.type == UA_HYPER_PAV_ALIAS)
326 pfxdata->validity.hyper_pav = 1;
329 /* define extend data (mostly)*/
331 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
332 case DASD_ECKD_CCW_READ_RECORD_ZERO:
333 case DASD_ECKD_CCW_READ:
334 case DASD_ECKD_CCW_READ_MT:
335 case DASD_ECKD_CCW_READ_CKD:
336 case DASD_ECKD_CCW_READ_CKD_MT:
337 case DASD_ECKD_CCW_READ_KD:
338 case DASD_ECKD_CCW_READ_KD_MT:
339 case DASD_ECKD_CCW_READ_COUNT:
340 data->mask.perm = 0x1;
341 data->attributes.operation = basepriv->attrib.operation;
343 case DASD_ECKD_CCW_WRITE:
344 case DASD_ECKD_CCW_WRITE_MT:
345 case DASD_ECKD_CCW_WRITE_KD:
346 case DASD_ECKD_CCW_WRITE_KD_MT:
347 data->mask.perm = 0x02;
348 data->attributes.operation = basepriv->attrib.operation;
349 rc = check_XRC_on_prefix(pfxdata, basedev);
351 case DASD_ECKD_CCW_WRITE_CKD:
352 case DASD_ECKD_CCW_WRITE_CKD_MT:
353 data->attributes.operation = DASD_BYPASS_CACHE;
354 rc = check_XRC_on_prefix(pfxdata, basedev);
356 case DASD_ECKD_CCW_ERASE:
357 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
358 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
359 data->mask.perm = 0x3;
360 data->mask.auth = 0x1;
361 data->attributes.operation = DASD_BYPASS_CACHE;
362 rc = check_XRC_on_prefix(pfxdata, basedev);
365 DEV_MESSAGE(KERN_ERR, basedev, "unknown opcode 0x%x", cmd);
369 data->attributes.mode = 0x3; /* ECKD */
371 if ((basepriv->rdc_data.cu_type == 0x2105 ||
372 basepriv->rdc_data.cu_type == 0x2107 ||
373 basepriv->rdc_data.cu_type == 0x1750)
374 && !(basepriv->uses_cdl && trk < 2))
375 data->ga_extended |= 0x40; /* Regular Data Format Mode */
377 geo.cyl = basepriv->rdc_data.no_cyl;
378 geo.head = basepriv->rdc_data.trk_per_cyl;
379 beg.cyl = trk / geo.head;
380 beg.head = trk % geo.head;
381 end.cyl = totrk / geo.head;
382 end.head = totrk % geo.head;
384 /* check for sequential prestage - enhance cylinder range */
385 if (data->attributes.operation == DASD_SEQ_PRESTAGE ||
386 data->attributes.operation == DASD_SEQ_ACCESS) {
388 if (end.cyl + basepriv->attrib.nr_cyl < geo.cyl)
389 end.cyl += basepriv->attrib.nr_cyl;
391 end.cyl = (geo.cyl - 1);
394 data->beg_ext.cyl = beg.cyl;
395 data->beg_ext.head = beg.head;
396 data->end_ext.cyl = end.cyl;
397 data->end_ext.head = end.head;
402 locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, int trk,
403 int rec_on_trk, int no_rec, int cmd,
404 struct dasd_device * device, int reclen)
406 struct dasd_eckd_private *private;
410 private = (struct dasd_eckd_private *) device->private;
412 DBF_DEV_EVENT(DBF_INFO, device,
413 "Locate: trk %d, rec %d, no_rec %d, cmd %d, reclen %d",
414 trk, rec_on_trk, no_rec, cmd, reclen);
416 ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
419 ccw->cda = (__u32) __pa(data);
421 memset(data, 0, sizeof(struct LO_eckd_data));
424 switch (private->rdc_data.dev_type) {
426 dn = ceil_quot(reclen + 6, 232);
427 d = 9 + ceil_quot(reclen + 6 * (dn + 1), 34);
428 sector = (49 + (rec_on_trk - 1) * (10 + d)) / 8;
431 d = 7 + ceil_quot(reclen + 12, 32);
432 sector = (39 + (rec_on_trk - 1) * (8 + d)) / 7;
436 data->sector = sector;
437 data->count = no_rec;
439 case DASD_ECKD_CCW_WRITE_HOME_ADDRESS:
440 data->operation.orientation = 0x3;
441 data->operation.operation = 0x03;
443 case DASD_ECKD_CCW_READ_HOME_ADDRESS:
444 data->operation.orientation = 0x3;
445 data->operation.operation = 0x16;
447 case DASD_ECKD_CCW_WRITE_RECORD_ZERO:
448 data->operation.orientation = 0x1;
449 data->operation.operation = 0x03;
452 case DASD_ECKD_CCW_READ_RECORD_ZERO:
453 data->operation.orientation = 0x3;
454 data->operation.operation = 0x16;
457 case DASD_ECKD_CCW_WRITE:
458 case DASD_ECKD_CCW_WRITE_MT:
459 case DASD_ECKD_CCW_WRITE_KD:
460 case DASD_ECKD_CCW_WRITE_KD_MT:
461 data->auxiliary.last_bytes_used = 0x1;
462 data->length = reclen;
463 data->operation.operation = 0x01;
465 case DASD_ECKD_CCW_WRITE_CKD:
466 case DASD_ECKD_CCW_WRITE_CKD_MT:
467 data->auxiliary.last_bytes_used = 0x1;
468 data->length = reclen;
469 data->operation.operation = 0x03;
471 case DASD_ECKD_CCW_READ:
472 case DASD_ECKD_CCW_READ_MT:
473 case DASD_ECKD_CCW_READ_KD:
474 case DASD_ECKD_CCW_READ_KD_MT:
475 data->auxiliary.last_bytes_used = 0x1;
476 data->length = reclen;
477 data->operation.operation = 0x06;
479 case DASD_ECKD_CCW_READ_CKD:
480 case DASD_ECKD_CCW_READ_CKD_MT:
481 data->auxiliary.last_bytes_used = 0x1;
482 data->length = reclen;
483 data->operation.operation = 0x16;
485 case DASD_ECKD_CCW_READ_COUNT:
486 data->operation.operation = 0x06;
488 case DASD_ECKD_CCW_ERASE:
489 data->length = reclen;
490 data->auxiliary.last_bytes_used = 0x1;
491 data->operation.operation = 0x0b;
494 DEV_MESSAGE(KERN_ERR, device, "unknown opcode 0x%x", cmd);
496 data->seek_addr.cyl = data->search_arg.cyl =
497 trk / private->rdc_data.trk_per_cyl;
498 data->seek_addr.head = data->search_arg.head =
499 trk % private->rdc_data.trk_per_cyl;
500 data->search_arg.record = rec_on_trk;
504 * Returns 1 if the block is one of the special blocks that needs
505 * to get read/written with the KD variant of the command.
506 * That is DASD_ECKD_READ_KD_MT instead of DASD_ECKD_READ_MT and
507 * DASD_ECKD_WRITE_KD_MT instead of DASD_ECKD_WRITE_MT.
508 * Luckily the KD variants differ only by one bit (0x08) from the
509 * normal variant. So don't wonder about code like:
510 * if (dasd_eckd_cdl_special(blk_per_trk, recid))
511 * ccw->cmd_code |= 0x8;
514 dasd_eckd_cdl_special(int blk_per_trk, int recid)
518 if (recid < blk_per_trk)
520 if (recid < 2 * blk_per_trk)
526 * Returns the record size for the special blocks of the cdl format.
527 * Only returns something useful if dasd_eckd_cdl_special is true
531 dasd_eckd_cdl_reclen(int recid)
534 return sizes_trk0[recid];
539 * Generate device unique id that specifies the physical device.
541 static int dasd_eckd_generate_uid(struct dasd_device *device,
542 struct dasd_uid *uid)
544 struct dasd_eckd_private *private;
547 private = (struct dasd_eckd_private *) device->private;
550 if (!private->ned || !private->gneq)
553 memset(uid, 0, sizeof(struct dasd_uid));
554 memcpy(uid->vendor, private->ned->HDA_manufacturer,
555 sizeof(uid->vendor) - 1);
556 EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
557 memcpy(uid->serial, private->ned->HDA_location,
558 sizeof(uid->serial) - 1);
559 EBCASC(uid->serial, sizeof(uid->serial) - 1);
560 uid->ssid = private->gneq->subsystemID;
561 uid->real_unit_addr = private->ned->unit_addr;;
563 uid->type = private->sneq->sua_flags;
564 if (uid->type == UA_BASE_PAV_ALIAS)
565 uid->base_unit_addr = private->sneq->base_unit_addr;
567 uid->type = UA_BASE_DEVICE;
569 if (private->vdsneq) {
570 for (count = 0; count < 16; count++) {
571 sprintf(uid->vduit+2*count, "%02x",
572 private->vdsneq->uit[count]);
578 static struct dasd_ccw_req *dasd_eckd_build_rcd_lpm(struct dasd_device *device,
580 struct ciw *ciw, __u8 lpm)
582 struct dasd_ccw_req *cqr;
585 cqr = dasd_smalloc_request("ECKD", 1 /* RCD */, ciw->count, device);
588 DEV_MESSAGE(KERN_WARNING, device, "%s",
589 "Could not allocate RCD request");
594 ccw->cmd_code = ciw->cmd;
595 ccw->cda = (__u32)(addr_t)rcd_buffer;
596 ccw->count = ciw->count;
598 cqr->startdev = device;
599 cqr->memdev = device;
601 cqr->expires = 10*HZ;
603 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
605 cqr->buildclk = get_clock();
606 cqr->status = DASD_CQR_FILLED;
610 static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
612 int *rcd_buffer_size, __u8 lpm)
615 char *rcd_buf = NULL;
617 struct dasd_ccw_req *cqr;
620 * scan for RCD command in extended SenseID data
622 ciw = ccw_device_get_ciw(device->cdev, CIW_TYPE_RCD);
623 if (!ciw || ciw->cmd == 0) {
627 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
634 * buffer has to start with EBCDIC "V1.0" to show
635 * support for virtual device SNEQ
641 cqr = dasd_eckd_build_rcd_lpm(device, rcd_buf, ciw, lpm);
646 ret = dasd_sleep_on(cqr);
648 * on success we update the user input parms
650 dasd_sfree_request(cqr, cqr->memdev);
654 *rcd_buffer_size = ciw->count;
655 *rcd_buffer = rcd_buf;
660 *rcd_buffer_size = 0;
664 static int dasd_eckd_identify_conf_parts(struct dasd_eckd_private *private)
667 struct dasd_sneq *sneq;
671 private->sneq = NULL;
672 private->vdsneq = NULL;
673 private->gneq = NULL;
674 count = private->conf_len / sizeof(struct dasd_sneq);
675 sneq = (struct dasd_sneq *)private->conf_data;
676 for (i = 0; i < count; ++i) {
677 if (sneq->flags.identifier == 1 && sneq->format == 1)
678 private->sneq = sneq;
679 else if (sneq->flags.identifier == 1 && sneq->format == 4)
680 private->vdsneq = (struct vd_sneq *)sneq;
681 else if (sneq->flags.identifier == 2)
682 private->gneq = (struct dasd_gneq *)sneq;
683 else if (sneq->flags.identifier == 3 && sneq->res1 == 1)
684 private->ned = (struct dasd_ned *)sneq;
687 if (!private->ned || !private->gneq) {
689 private->sneq = NULL;
690 private->vdsneq = NULL;
691 private->gneq = NULL;
698 static unsigned char dasd_eckd_path_access(void *conf_data, int conf_len)
700 struct dasd_gneq *gneq;
703 count = conf_len / sizeof(*gneq);
704 gneq = (struct dasd_gneq *)conf_data;
706 for (i = 0; i < count; ++i) {
707 if (gneq->flags.identifier == 2) {
714 return ((char *)gneq)[18] & 0x07;
719 static int dasd_eckd_read_conf(struct dasd_device *device)
722 int conf_len, conf_data_saved;
725 struct dasd_eckd_private *private;
726 struct dasd_eckd_path *path_data;
728 private = (struct dasd_eckd_private *) device->private;
729 path_data = (struct dasd_eckd_path *) &private->path_data;
730 path_data->opm = ccw_device_get_path_mask(device->cdev);
733 /* get configuration data per operational path */
734 for (lpm = 0x80; lpm; lpm>>= 1) {
735 if (lpm & path_data->opm){
736 rc = dasd_eckd_read_conf_lpm(device, &conf_data,
738 if (rc && rc != -EOPNOTSUPP) { /* -EOPNOTSUPP is ok */
739 MESSAGE(KERN_WARNING,
740 "Read configuration data returned "
744 if (conf_data == NULL) {
745 MESSAGE(KERN_WARNING, "%s", "No configuration "
747 continue; /* no error */
749 /* save first valid configuration data */
750 if (!conf_data_saved) {
751 kfree(private->conf_data);
752 private->conf_data = conf_data;
753 private->conf_len = conf_len;
754 if (dasd_eckd_identify_conf_parts(private)) {
755 private->conf_data = NULL;
756 private->conf_len = 0;
762 switch (dasd_eckd_path_access(conf_data, conf_len)) {
764 path_data->npm |= lpm;
767 path_data->ppm |= lpm;
770 if (conf_data != private->conf_data)
777 static int dasd_eckd_read_features(struct dasd_device *device)
779 struct dasd_psf_prssd_data *prssdp;
780 struct dasd_rssd_features *features;
781 struct dasd_ccw_req *cqr;
784 struct dasd_eckd_private *private;
786 private = (struct dasd_eckd_private *) device->private;
787 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
788 1 /* PSF */ + 1 /* RSSD */ ,
789 (sizeof(struct dasd_psf_prssd_data) +
790 sizeof(struct dasd_rssd_features)),
793 DEV_MESSAGE(KERN_WARNING, device, "%s",
794 "Could not allocate initialization request");
797 cqr->startdev = device;
798 cqr->memdev = device;
800 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
802 cqr->expires = 10 * HZ;
804 /* Prepare for Read Subsystem Data */
805 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
806 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
807 prssdp->order = PSF_ORDER_PRSSD;
808 prssdp->suborder = 0x41; /* Read Feature Codes */
809 /* all other bytes of prssdp must be zero */
812 ccw->cmd_code = DASD_ECKD_CCW_PSF;
813 ccw->count = sizeof(struct dasd_psf_prssd_data);
814 ccw->flags |= CCW_FLAG_CC;
815 ccw->cda = (__u32)(addr_t) prssdp;
817 /* Read Subsystem Data - feature codes */
818 features = (struct dasd_rssd_features *) (prssdp + 1);
819 memset(features, 0, sizeof(struct dasd_rssd_features));
822 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
823 ccw->count = sizeof(struct dasd_rssd_features);
824 ccw->cda = (__u32)(addr_t) features;
826 cqr->buildclk = get_clock();
827 cqr->status = DASD_CQR_FILLED;
828 rc = dasd_sleep_on(cqr);
830 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
831 features = (struct dasd_rssd_features *) (prssdp + 1);
832 memcpy(&private->features, features,
833 sizeof(struct dasd_rssd_features));
835 dasd_sfree_request(cqr, cqr->memdev);
841 * Build CP for Perform Subsystem Function - SSC.
843 static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device)
845 struct dasd_ccw_req *cqr;
846 struct dasd_psf_ssc_data *psf_ssc_data;
849 cqr = dasd_smalloc_request("ECKD", 1 /* PSF */ ,
850 sizeof(struct dasd_psf_ssc_data),
854 DEV_MESSAGE(KERN_WARNING, device, "%s",
855 "Could not allocate PSF-SSC request");
858 psf_ssc_data = (struct dasd_psf_ssc_data *)cqr->data;
859 psf_ssc_data->order = PSF_ORDER_SSC;
860 psf_ssc_data->suborder = 0x88;
861 psf_ssc_data->reserved[0] = 0x88;
864 ccw->cmd_code = DASD_ECKD_CCW_PSF;
865 ccw->cda = (__u32)(addr_t)psf_ssc_data;
868 cqr->startdev = device;
869 cqr->memdev = device;
871 cqr->expires = 10*HZ;
872 cqr->buildclk = get_clock();
873 cqr->status = DASD_CQR_FILLED;
878 * Perform Subsystem Function.
879 * It is necessary to trigger CIO for channel revalidation since this
880 * call might change behaviour of DASD devices.
883 dasd_eckd_psf_ssc(struct dasd_device *device)
885 struct dasd_ccw_req *cqr;
888 cqr = dasd_eckd_build_psf_ssc(device);
892 rc = dasd_sleep_on(cqr);
894 /* trigger CIO to reprobe devices */
895 css_schedule_reprobe();
896 dasd_sfree_request(cqr, cqr->memdev);
901 * Valide storage server of current device.
903 static int dasd_eckd_validate_server(struct dasd_device *device)
906 struct dasd_eckd_private *private;
908 /* Currently PAV is the only reason to 'validate' server on LPAR */
909 if (dasd_nopav || MACHINE_IS_VM)
912 rc = dasd_eckd_psf_ssc(device);
913 /* may be requested feature is not available on server,
914 * therefore just report error and go ahead */
915 private = (struct dasd_eckd_private *) device->private;
916 DEV_MESSAGE(KERN_INFO, device,
917 "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d",
918 private->uid.vendor, private->uid.serial,
919 private->uid.ssid, rc);
920 /* RE-Read Configuration Data */
921 return dasd_eckd_read_conf(device);
925 * Check device characteristics.
926 * If the device is accessible using ECKD discipline, the device is enabled.
929 dasd_eckd_check_characteristics(struct dasd_device *device)
931 struct dasd_eckd_private *private;
932 struct dasd_block *block;
936 private = (struct dasd_eckd_private *) device->private;
937 if (private == NULL) {
938 private = kzalloc(sizeof(struct dasd_eckd_private),
939 GFP_KERNEL | GFP_DMA);
940 if (private == NULL) {
941 DEV_MESSAGE(KERN_WARNING, device, "%s",
942 "memory allocation failed for private "
946 device->private = (void *) private;
948 /* Invalidate status of initial analysis. */
949 private->init_cqr_status = -1;
950 /* Set default cache operations. */
951 private->attrib.operation = DASD_NORMAL_CACHE;
952 private->attrib.nr_cyl = 0;
954 /* Read Configuration Data */
955 rc = dasd_eckd_read_conf(device);
959 /* Generate device unique id and register in devmap */
960 rc = dasd_eckd_generate_uid(device, &private->uid);
963 dasd_set_uid(device->cdev, &private->uid);
965 if (private->uid.type == UA_BASE_DEVICE) {
966 block = dasd_alloc_block();
968 DEV_MESSAGE(KERN_WARNING, device, "%s",
969 "could not allocate dasd block structure");
973 device->block = block;
974 block->base = device;
977 /* register lcu with alias handling, enable PAV if this is a new lcu */
978 is_known = dasd_alias_make_device_known_to_lcu(device);
985 rc = dasd_eckd_validate_server(device); /* will switch pav on */
990 /* Read Feature Codes */
991 rc = dasd_eckd_read_features(device);
995 /* Read Device Characteristics */
996 rdc_data = (void *) &(private->rdc_data);
997 memset(rdc_data, 0, sizeof(rdc_data));
998 rc = dasd_generic_read_dev_chars(device, "ECKD", &rdc_data, 64);
1000 DEV_MESSAGE(KERN_WARNING, device,
1001 "Read device characteristics returned "
1005 DEV_MESSAGE(KERN_INFO, device,
1006 "%04X/%02X(CU:%04X/%02X) Cyl:%d Head:%d Sec:%d",
1007 private->rdc_data.dev_type,
1008 private->rdc_data.dev_model,
1009 private->rdc_data.cu_type,
1010 private->rdc_data.cu_model.model,
1011 private->rdc_data.no_cyl,
1012 private->rdc_data.trk_per_cyl,
1013 private->rdc_data.sec_per_trk);
1017 dasd_alias_disconnect_device_from_lcu(device);
1019 dasd_free_block(device->block);
1020 device->block = NULL;
1022 kfree(private->conf_data);
1023 kfree(device->private);
1024 device->private = NULL;
1028 static void dasd_eckd_uncheck_device(struct dasd_device *device)
1030 struct dasd_eckd_private *private;
1032 private = (struct dasd_eckd_private *) device->private;
1033 dasd_alias_disconnect_device_from_lcu(device);
1034 private->ned = NULL;
1035 private->sneq = NULL;
1036 private->vdsneq = NULL;
1037 private->gneq = NULL;
1038 private->conf_len = 0;
1039 kfree(private->conf_data);
1040 private->conf_data = NULL;
1043 static struct dasd_ccw_req *
1044 dasd_eckd_analysis_ccw(struct dasd_device *device)
1046 struct dasd_eckd_private *private;
1047 struct eckd_count *count_data;
1048 struct LO_eckd_data *LO_data;
1049 struct dasd_ccw_req *cqr;
1051 int cplength, datasize;
1054 private = (struct dasd_eckd_private *) device->private;
1057 datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
1058 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1059 cplength, datasize, device);
1063 /* Define extent for the first 3 tracks. */
1064 define_extent(ccw++, cqr->data, 0, 2,
1065 DASD_ECKD_CCW_READ_COUNT, device);
1066 LO_data = cqr->data + sizeof(struct DE_eckd_data);
1067 /* Locate record for the first 4 records on track 0. */
1068 ccw[-1].flags |= CCW_FLAG_CC;
1069 locate_record(ccw++, LO_data++, 0, 0, 4,
1070 DASD_ECKD_CCW_READ_COUNT, device, 0);
1072 count_data = private->count_area;
1073 for (i = 0; i < 4; i++) {
1074 ccw[-1].flags |= CCW_FLAG_CC;
1075 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
1078 ccw->cda = (__u32)(addr_t) count_data;
1083 /* Locate record for the first record on track 2. */
1084 ccw[-1].flags |= CCW_FLAG_CC;
1085 locate_record(ccw++, LO_data++, 2, 0, 1,
1086 DASD_ECKD_CCW_READ_COUNT, device, 0);
1087 /* Read count ccw. */
1088 ccw[-1].flags |= CCW_FLAG_CC;
1089 ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
1092 ccw->cda = (__u32)(addr_t) count_data;
1095 cqr->startdev = device;
1096 cqr->memdev = device;
1098 cqr->buildclk = get_clock();
1099 cqr->status = DASD_CQR_FILLED;
1104 * This is the callback function for the init_analysis cqr. It saves
1105 * the status of the initial analysis ccw before it frees it and kicks
1106 * the device to continue the startup sequence. This will call
1107 * dasd_eckd_do_analysis again (if the devices has not been marked
1108 * for deletion in the meantime).
1111 dasd_eckd_analysis_callback(struct dasd_ccw_req *init_cqr, void *data)
1113 struct dasd_eckd_private *private;
1114 struct dasd_device *device;
1116 device = init_cqr->startdev;
1117 private = (struct dasd_eckd_private *) device->private;
1118 private->init_cqr_status = init_cqr->status;
1119 dasd_sfree_request(init_cqr, device);
1120 dasd_kick_device(device);
1124 dasd_eckd_start_analysis(struct dasd_block *block)
1126 struct dasd_eckd_private *private;
1127 struct dasd_ccw_req *init_cqr;
1129 private = (struct dasd_eckd_private *) block->base->private;
1130 init_cqr = dasd_eckd_analysis_ccw(block->base);
1131 if (IS_ERR(init_cqr))
1132 return PTR_ERR(init_cqr);
1133 init_cqr->callback = dasd_eckd_analysis_callback;
1134 init_cqr->callback_data = NULL;
1135 init_cqr->expires = 5*HZ;
1136 dasd_add_request_head(init_cqr);
1141 dasd_eckd_end_analysis(struct dasd_block *block)
1143 struct dasd_device *device;
1144 struct dasd_eckd_private *private;
1145 struct eckd_count *count_area;
1146 unsigned int sb, blk_per_trk;
1149 device = block->base;
1150 private = (struct dasd_eckd_private *) device->private;
1151 status = private->init_cqr_status;
1152 private->init_cqr_status = -1;
1153 if (status != DASD_CQR_DONE) {
1154 DEV_MESSAGE(KERN_WARNING, device, "%s",
1155 "volume analysis returned unformatted disk");
1156 return -EMEDIUMTYPE;
1159 private->uses_cdl = 1;
1160 /* Calculate number of blocks/records per track. */
1161 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
1162 /* Check Track 0 for Compatible Disk Layout */
1164 for (i = 0; i < 3; i++) {
1165 if (private->count_area[i].kl != 4 ||
1166 private->count_area[i].dl != dasd_eckd_cdl_reclen(i) - 4) {
1167 private->uses_cdl = 0;
1172 count_area = &private->count_area[4];
1174 if (private->uses_cdl == 0) {
1175 for (i = 0; i < 5; i++) {
1176 if ((private->count_area[i].kl != 0) ||
1177 (private->count_area[i].dl !=
1178 private->count_area[0].dl))
1182 count_area = &private->count_area[0];
1184 if (private->count_area[3].record == 1)
1185 DEV_MESSAGE(KERN_WARNING, device, "%s",
1186 "Trk 0: no records after VTOC!");
1188 if (count_area != NULL && count_area->kl == 0) {
1189 /* we found notthing violating our disk layout */
1190 if (dasd_check_blocksize(count_area->dl) == 0)
1191 block->bp_block = count_area->dl;
1193 if (block->bp_block == 0) {
1194 DEV_MESSAGE(KERN_WARNING, device, "%s",
1195 "Volume has incompatible disk layout");
1196 return -EMEDIUMTYPE;
1198 block->s2b_shift = 0; /* bits to shift 512 to get a block */
1199 for (sb = 512; sb < block->bp_block; sb = sb << 1)
1202 blk_per_trk = recs_per_track(&private->rdc_data, 0, block->bp_block);
1203 block->blocks = (private->rdc_data.no_cyl *
1204 private->rdc_data.trk_per_cyl *
1207 DEV_MESSAGE(KERN_INFO, device,
1208 "(%dkB blks): %dkB at %dkB/trk %s",
1209 (block->bp_block >> 10),
1210 ((private->rdc_data.no_cyl *
1211 private->rdc_data.trk_per_cyl *
1212 blk_per_trk * (block->bp_block >> 9)) >> 1),
1213 ((blk_per_trk * block->bp_block) >> 10),
1215 "compatible disk layout" : "linux disk layout");
1220 static int dasd_eckd_do_analysis(struct dasd_block *block)
1222 struct dasd_eckd_private *private;
1224 private = (struct dasd_eckd_private *) block->base->private;
1225 if (private->init_cqr_status < 0)
1226 return dasd_eckd_start_analysis(block);
1228 return dasd_eckd_end_analysis(block);
1231 static int dasd_eckd_ready_to_online(struct dasd_device *device)
1233 return dasd_alias_add_device(device);
1236 static int dasd_eckd_online_to_ready(struct dasd_device *device)
1238 return dasd_alias_remove_device(device);
1242 dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
1244 struct dasd_eckd_private *private;
1246 private = (struct dasd_eckd_private *) block->base->private;
1247 if (dasd_check_blocksize(block->bp_block) == 0) {
1248 geo->sectors = recs_per_track(&private->rdc_data,
1249 0, block->bp_block);
1251 geo->cylinders = private->rdc_data.no_cyl;
1252 geo->heads = private->rdc_data.trk_per_cyl;
1256 static struct dasd_ccw_req *
1257 dasd_eckd_format_device(struct dasd_device * device,
1258 struct format_data_t * fdata)
1260 struct dasd_eckd_private *private;
1261 struct dasd_ccw_req *fcp;
1262 struct eckd_count *ect;
1266 int cplength, datasize;
1269 private = (struct dasd_eckd_private *) device->private;
1270 rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize);
1271 cyl = fdata->start_unit / private->rdc_data.trk_per_cyl;
1272 head = fdata->start_unit % private->rdc_data.trk_per_cyl;
1274 /* Sanity checks. */
1275 if (fdata->start_unit >=
1276 (private->rdc_data.no_cyl * private->rdc_data.trk_per_cyl)) {
1277 DEV_MESSAGE(KERN_INFO, device, "Track no %d too big!",
1279 return ERR_PTR(-EINVAL);
1281 if (fdata->start_unit > fdata->stop_unit) {
1282 DEV_MESSAGE(KERN_INFO, device, "Track %d reached! ending.",
1284 return ERR_PTR(-EINVAL);
1286 if (dasd_check_blocksize(fdata->blksize) != 0) {
1287 DEV_MESSAGE(KERN_WARNING, device,
1288 "Invalid blocksize %d...terminating!",
1290 return ERR_PTR(-EINVAL);
1294 * fdata->intensity is a bit string that tells us what to do:
1295 * Bit 0: write record zero
1296 * Bit 1: write home address, currently not supported
1297 * Bit 2: invalidate tracks
1298 * Bit 3: use OS/390 compatible disk layout (cdl)
1299 * Only some bit combinations do make sense.
1301 switch (fdata->intensity) {
1302 case 0x00: /* Normal format */
1303 case 0x08: /* Normal format, use cdl. */
1305 datasize = sizeof(struct DE_eckd_data) +
1306 sizeof(struct LO_eckd_data) +
1307 rpt * sizeof(struct eckd_count);
1309 case 0x01: /* Write record zero and format track. */
1310 case 0x09: /* Write record zero and format track, use cdl. */
1312 datasize = sizeof(struct DE_eckd_data) +
1313 sizeof(struct LO_eckd_data) +
1314 sizeof(struct eckd_count) +
1315 rpt * sizeof(struct eckd_count);
1317 case 0x04: /* Invalidate track. */
1318 case 0x0c: /* Invalidate track, use cdl. */
1320 datasize = sizeof(struct DE_eckd_data) +
1321 sizeof(struct LO_eckd_data) +
1322 sizeof(struct eckd_count);
1325 DEV_MESSAGE(KERN_WARNING, device, "Invalid flags 0x%x.",
1327 return ERR_PTR(-EINVAL);
1329 /* Allocate the format ccw request. */
1330 fcp = dasd_smalloc_request(dasd_eckd_discipline.name,
1331 cplength, datasize, device);
1338 switch (fdata->intensity & ~0x08) {
1339 case 0x00: /* Normal format. */
1340 define_extent(ccw++, (struct DE_eckd_data *) data,
1341 fdata->start_unit, fdata->start_unit,
1342 DASD_ECKD_CCW_WRITE_CKD, device);
1343 data += sizeof(struct DE_eckd_data);
1344 ccw[-1].flags |= CCW_FLAG_CC;
1345 locate_record(ccw++, (struct LO_eckd_data *) data,
1346 fdata->start_unit, 0, rpt,
1347 DASD_ECKD_CCW_WRITE_CKD, device,
1349 data += sizeof(struct LO_eckd_data);
1351 case 0x01: /* Write record zero + format track. */
1352 define_extent(ccw++, (struct DE_eckd_data *) data,
1353 fdata->start_unit, fdata->start_unit,
1354 DASD_ECKD_CCW_WRITE_RECORD_ZERO,
1356 data += sizeof(struct DE_eckd_data);
1357 ccw[-1].flags |= CCW_FLAG_CC;
1358 locate_record(ccw++, (struct LO_eckd_data *) data,
1359 fdata->start_unit, 0, rpt + 1,
1360 DASD_ECKD_CCW_WRITE_RECORD_ZERO, device,
1361 device->block->bp_block);
1362 data += sizeof(struct LO_eckd_data);
1364 case 0x04: /* Invalidate track. */
1365 define_extent(ccw++, (struct DE_eckd_data *) data,
1366 fdata->start_unit, fdata->start_unit,
1367 DASD_ECKD_CCW_WRITE_CKD, device);
1368 data += sizeof(struct DE_eckd_data);
1369 ccw[-1].flags |= CCW_FLAG_CC;
1370 locate_record(ccw++, (struct LO_eckd_data *) data,
1371 fdata->start_unit, 0, 1,
1372 DASD_ECKD_CCW_WRITE_CKD, device, 8);
1373 data += sizeof(struct LO_eckd_data);
1376 if (fdata->intensity & 0x01) { /* write record zero */
1377 ect = (struct eckd_count *) data;
1378 data += sizeof(struct eckd_count);
1384 ccw[-1].flags |= CCW_FLAG_CC;
1385 ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
1386 ccw->flags = CCW_FLAG_SLI;
1388 ccw->cda = (__u32)(addr_t) ect;
1391 if ((fdata->intensity & ~0x08) & 0x04) { /* erase track */
1392 ect = (struct eckd_count *) data;
1393 data += sizeof(struct eckd_count);
1399 ccw[-1].flags |= CCW_FLAG_CC;
1400 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1401 ccw->flags = CCW_FLAG_SLI;
1403 ccw->cda = (__u32)(addr_t) ect;
1404 } else { /* write remaining records */
1405 for (i = 0; i < rpt; i++) {
1406 ect = (struct eckd_count *) data;
1407 data += sizeof(struct eckd_count);
1410 ect->record = i + 1;
1412 ect->dl = fdata->blksize;
1413 /* Check for special tracks 0-1 when formatting CDL */
1414 if ((fdata->intensity & 0x08) &&
1415 fdata->start_unit == 0) {
1418 ect->dl = sizes_trk0[i] - 4;
1421 if ((fdata->intensity & 0x08) &&
1422 fdata->start_unit == 1) {
1424 ect->dl = LABEL_SIZE - 44;
1426 ccw[-1].flags |= CCW_FLAG_CC;
1427 ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
1428 ccw->flags = CCW_FLAG_SLI;
1430 ccw->cda = (__u32)(addr_t) ect;
1434 fcp->startdev = device;
1435 fcp->memdev = device;
1436 clear_bit(DASD_CQR_FLAGS_USE_ERP, &fcp->flags);
1437 fcp->retries = 5; /* set retry counter to enable default ERP */
1438 fcp->buildclk = get_clock();
1439 fcp->status = DASD_CQR_FILLED;
1443 static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr)
1445 cqr->status = DASD_CQR_FILLED;
1446 if (cqr->block && (cqr->startdev != cqr->block->base)) {
1447 dasd_eckd_reset_ccw_to_base_io(cqr);
1448 cqr->startdev = cqr->block->base;
1452 static dasd_erp_fn_t
1453 dasd_eckd_erp_action(struct dasd_ccw_req * cqr)
1455 struct dasd_device *device = (struct dasd_device *) cqr->startdev;
1456 struct ccw_device *cdev = device->cdev;
1458 switch (cdev->id.cu_type) {
1463 return dasd_3990_erp_action;
1467 return dasd_default_erp_action;
1471 static dasd_erp_fn_t
1472 dasd_eckd_erp_postaction(struct dasd_ccw_req * cqr)
1474 return dasd_default_erp_postaction;
1478 static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device,
1483 /* first of all check for state change pending interrupt */
1484 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
1485 if ((irb->scsw.cmd.dstat & mask) == mask) {
1486 dasd_generic_handle_state_change(device);
1490 /* summary unit check */
1491 if ((irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) &&
1492 (irb->ecw[7] == 0x0D)) {
1493 dasd_alias_handle_summary_unit_check(device, irb);
1498 /* service information message SIM */
1499 if (irb->esw.esw0.erw.cons && !(irb->ecw[27] & DASD_SENSE_BIT_0) &&
1500 ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) {
1501 dasd_3990_erp_handle_sim(device, irb->ecw);
1502 dasd_schedule_device_bh(device);
1506 if ((irb->scsw.cmd.cc == 1) &&
1507 (irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
1508 (irb->scsw.cmd.actl & SCSW_ACTL_START_PEND) &&
1509 (irb->scsw.cmd.stctl & SCSW_STCTL_STATUS_PEND)) {
1510 /* fake irb do nothing, they are handled elsewhere */
1511 dasd_schedule_device_bh(device);
1515 if (!(irb->esw.esw0.erw.cons)) {
1516 /* just report other unsolicited interrupts */
1517 DEV_MESSAGE(KERN_ERR, device, "%s",
1518 "unsolicited interrupt received");
1520 DEV_MESSAGE(KERN_ERR, device, "%s",
1521 "unsolicited interrupt received "
1522 "(sense available)");
1523 device->discipline->dump_sense(device, NULL, irb);
1526 dasd_schedule_device_bh(device);
1530 static struct dasd_ccw_req *dasd_eckd_build_cp(struct dasd_device *startdev,
1531 struct dasd_block *block,
1532 struct request *req)
1534 struct dasd_eckd_private *private;
1535 unsigned long *idaws;
1536 struct LO_eckd_data *LO_data;
1537 struct dasd_ccw_req *cqr;
1539 struct req_iterator iter;
1542 unsigned int blksize, blk_per_trk, off;
1543 int count, cidaw, cplength, datasize;
1544 sector_t recid, first_rec, last_rec;
1545 sector_t first_trk, last_trk;
1546 unsigned int first_offs, last_offs;
1547 unsigned char cmd, rcmd;
1549 struct dasd_device *basedev;
1551 basedev = block->base;
1552 private = (struct dasd_eckd_private *) basedev->private;
1553 if (rq_data_dir(req) == READ)
1554 cmd = DASD_ECKD_CCW_READ_MT;
1555 else if (rq_data_dir(req) == WRITE)
1556 cmd = DASD_ECKD_CCW_WRITE_MT;
1558 return ERR_PTR(-EINVAL);
1559 /* Calculate number of blocks/records per track. */
1560 blksize = block->bp_block;
1561 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1562 /* Calculate record id of first and last block. */
1563 first_rec = first_trk = req->sector >> block->s2b_shift;
1564 first_offs = sector_div(first_trk, blk_per_trk);
1565 last_rec = last_trk =
1566 (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
1567 last_offs = sector_div(last_trk, blk_per_trk);
1568 /* Check struct bio and count the number of blocks for the request. */
1571 rq_for_each_segment(bv, req, iter) {
1572 if (bv->bv_len & (blksize - 1))
1573 /* Eckd can only do full blocks. */
1574 return ERR_PTR(-EINVAL);
1575 count += bv->bv_len >> (block->s2b_shift + 9);
1576 #if defined(CONFIG_64BIT)
1577 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
1578 cidaw += bv->bv_len >> (block->s2b_shift + 9);
1582 if (count != last_rec - first_rec + 1)
1583 return ERR_PTR(-EINVAL);
1585 /* use the prefix command if available */
1586 use_prefix = private->features.feature[8] & 0x01;
1588 /* 1x prefix + number of blocks */
1589 cplength = 2 + count;
1590 /* 1x prefix + cidaws*sizeof(long) */
1591 datasize = sizeof(struct PFX_eckd_data) +
1592 sizeof(struct LO_eckd_data) +
1593 cidaw * sizeof(unsigned long);
1595 /* 1x define extent + 1x locate record + number of blocks */
1596 cplength = 2 + count;
1597 /* 1x define extent + 1x locate record + cidaws*sizeof(long) */
1598 datasize = sizeof(struct DE_eckd_data) +
1599 sizeof(struct LO_eckd_data) +
1600 cidaw * sizeof(unsigned long);
1602 /* Find out the number of additional locate record ccws for cdl. */
1603 if (private->uses_cdl && first_rec < 2*blk_per_trk) {
1604 if (last_rec >= 2*blk_per_trk)
1605 count = 2*blk_per_trk - first_rec;
1607 datasize += count*sizeof(struct LO_eckd_data);
1609 /* Allocate the ccw request. */
1610 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1611 cplength, datasize, startdev);
1615 /* First ccw is define extent or prefix. */
1617 if (prefix(ccw++, cqr->data, first_trk,
1618 last_trk, cmd, basedev, startdev) == -EAGAIN) {
1619 /* Clock not in sync and XRC is enabled.
1622 dasd_sfree_request(cqr, startdev);
1623 return ERR_PTR(-EAGAIN);
1625 idaws = (unsigned long *) (cqr->data +
1626 sizeof(struct PFX_eckd_data));
1628 if (define_extent(ccw++, cqr->data, first_trk,
1629 last_trk, cmd, startdev) == -EAGAIN) {
1630 /* Clock not in sync and XRC is enabled.
1633 dasd_sfree_request(cqr, startdev);
1634 return ERR_PTR(-EAGAIN);
1636 idaws = (unsigned long *) (cqr->data +
1637 sizeof(struct DE_eckd_data));
1639 /* Build locate_record+read/write/ccws. */
1640 LO_data = (struct LO_eckd_data *) (idaws + cidaw);
1642 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) {
1643 /* Only standard blocks so there is just one locate record. */
1644 ccw[-1].flags |= CCW_FLAG_CC;
1645 locate_record(ccw++, LO_data++, first_trk, first_offs + 1,
1646 last_rec - recid + 1, cmd, basedev, blksize);
1648 rq_for_each_segment(bv, req, iter) {
1649 dst = page_address(bv->bv_page) + bv->bv_offset;
1650 if (dasd_page_cache) {
1651 char *copy = kmem_cache_alloc(dasd_page_cache,
1652 GFP_DMA | __GFP_NOWARN);
1653 if (copy && rq_data_dir(req) == WRITE)
1654 memcpy(copy + bv->bv_offset, dst, bv->bv_len);
1656 dst = copy + bv->bv_offset;
1658 for (off = 0; off < bv->bv_len; off += blksize) {
1659 sector_t trkid = recid;
1660 unsigned int recoffs = sector_div(trkid, blk_per_trk);
1663 /* Locate record for cdl special block ? */
1664 if (private->uses_cdl && recid < 2*blk_per_trk) {
1665 if (dasd_eckd_cdl_special(blk_per_trk, recid)){
1667 count = dasd_eckd_cdl_reclen(recid);
1668 if (count < blksize &&
1669 rq_data_dir(req) == READ)
1670 memset(dst + count, 0xe5,
1673 ccw[-1].flags |= CCW_FLAG_CC;
1674 locate_record(ccw++, LO_data++,
1676 1, rcmd, basedev, count);
1678 /* Locate record for standard blocks ? */
1679 if (private->uses_cdl && recid == 2*blk_per_trk) {
1680 ccw[-1].flags |= CCW_FLAG_CC;
1681 locate_record(ccw++, LO_data++,
1683 last_rec - recid + 1,
1684 cmd, basedev, count);
1686 /* Read/write ccw. */
1687 ccw[-1].flags |= CCW_FLAG_CC;
1688 ccw->cmd_code = rcmd;
1690 if (idal_is_needed(dst, blksize)) {
1691 ccw->cda = (__u32)(addr_t) idaws;
1692 ccw->flags = CCW_FLAG_IDA;
1693 idaws = idal_create_words(idaws, dst, blksize);
1695 ccw->cda = (__u32)(addr_t) dst;
1703 if (blk_noretry_request(req) ||
1704 block->base->features & DASD_FEATURE_FAILFAST)
1705 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1706 cqr->startdev = startdev;
1707 cqr->memdev = startdev;
1709 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
1710 cqr->lpm = private->path_data.ppm;
1712 cqr->buildclk = get_clock();
1713 cqr->status = DASD_CQR_FILLED;
1718 dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
1720 struct dasd_eckd_private *private;
1722 struct req_iterator iter;
1725 unsigned int blksize, blk_per_trk, off;
1729 if (!dasd_page_cache)
1731 private = (struct dasd_eckd_private *) cqr->block->base->private;
1732 blksize = cqr->block->bp_block;
1733 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize);
1734 recid = req->sector >> cqr->block->s2b_shift;
1736 /* Skip over define extent & locate record. */
1738 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
1740 rq_for_each_segment(bv, req, iter) {
1741 dst = page_address(bv->bv_page) + bv->bv_offset;
1742 for (off = 0; off < bv->bv_len; off += blksize) {
1743 /* Skip locate record. */
1744 if (private->uses_cdl && recid <= 2*blk_per_trk)
1747 if (ccw->flags & CCW_FLAG_IDA)
1748 cda = *((char **)((addr_t) ccw->cda));
1750 cda = (char *)((addr_t) ccw->cda);
1752 if (rq_data_dir(req) == READ)
1753 memcpy(dst, cda, bv->bv_len);
1754 kmem_cache_free(dasd_page_cache,
1755 (void *)((addr_t)cda & PAGE_MASK));
1764 status = cqr->status == DASD_CQR_DONE;
1765 dasd_sfree_request(cqr, cqr->memdev);
1770 * Modify ccw chain in cqr so it can be started on a base device.
1772 * Note that this is not enough to restart the cqr!
1773 * Either reset cqr->startdev as well (summary unit check handling)
1774 * or restart via separate cqr (as in ERP handling).
1776 void dasd_eckd_reset_ccw_to_base_io(struct dasd_ccw_req *cqr)
1779 struct PFX_eckd_data *pfxdata;
1782 pfxdata = cqr->data;
1784 if (ccw->cmd_code == DASD_ECKD_CCW_PFX) {
1785 pfxdata->validity.verify_base = 0;
1786 pfxdata->validity.hyper_pav = 0;
1790 #define DASD_ECKD_CHANQ_MAX_SIZE 4
1792 static struct dasd_ccw_req *dasd_eckd_build_alias_cp(struct dasd_device *base,
1793 struct dasd_block *block,
1794 struct request *req)
1796 struct dasd_eckd_private *private;
1797 struct dasd_device *startdev;
1798 unsigned long flags;
1799 struct dasd_ccw_req *cqr;
1801 startdev = dasd_alias_get_start_dev(base);
1804 private = (struct dasd_eckd_private *) startdev->private;
1805 if (private->count >= DASD_ECKD_CHANQ_MAX_SIZE)
1806 return ERR_PTR(-EBUSY);
1808 spin_lock_irqsave(get_ccwdev_lock(startdev->cdev), flags);
1810 cqr = dasd_eckd_build_cp(startdev, block, req);
1813 spin_unlock_irqrestore(get_ccwdev_lock(startdev->cdev), flags);
1817 static int dasd_eckd_free_alias_cp(struct dasd_ccw_req *cqr,
1818 struct request *req)
1820 struct dasd_eckd_private *private;
1821 unsigned long flags;
1823 spin_lock_irqsave(get_ccwdev_lock(cqr->memdev->cdev), flags);
1824 private = (struct dasd_eckd_private *) cqr->memdev->private;
1826 spin_unlock_irqrestore(get_ccwdev_lock(cqr->memdev->cdev), flags);
1827 return dasd_eckd_free_cp(cqr, req);
1831 dasd_eckd_fill_info(struct dasd_device * device,
1832 struct dasd_information2_t * info)
1834 struct dasd_eckd_private *private;
1836 private = (struct dasd_eckd_private *) device->private;
1837 info->label_block = 2;
1838 info->FBA_layout = private->uses_cdl ? 0 : 1;
1839 info->format = private->uses_cdl ? DASD_FORMAT_CDL : DASD_FORMAT_LDL;
1840 info->characteristics_size = sizeof(struct dasd_eckd_characteristics);
1841 memcpy(info->characteristics, &private->rdc_data,
1842 sizeof(struct dasd_eckd_characteristics));
1843 info->confdata_size = min((unsigned long)private->conf_len,
1844 sizeof(info->configuration_data));
1845 memcpy(info->configuration_data, private->conf_data,
1846 info->confdata_size);
1851 * SECTION: ioctl functions for eckd devices.
1855 * Release device ioctl.
1856 * Buils a channel programm to releases a prior reserved
1857 * (see dasd_eckd_reserve) device.
1860 dasd_eckd_release(struct dasd_device *device)
1862 struct dasd_ccw_req *cqr;
1865 if (!capable(CAP_SYS_ADMIN))
1868 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1871 DEV_MESSAGE(KERN_WARNING, device, "%s",
1872 "Could not allocate initialization request");
1873 return PTR_ERR(cqr);
1875 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RELEASE;
1876 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1877 cqr->cpaddr->count = 32;
1878 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1879 cqr->startdev = device;
1880 cqr->memdev = device;
1881 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1882 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1883 cqr->retries = 2; /* set retry counter to enable basic ERP */
1884 cqr->expires = 2 * HZ;
1885 cqr->buildclk = get_clock();
1886 cqr->status = DASD_CQR_FILLED;
1888 rc = dasd_sleep_on_immediatly(cqr);
1890 dasd_sfree_request(cqr, cqr->memdev);
1895 * Reserve device ioctl.
1896 * Options are set to 'synchronous wait for interrupt' and
1897 * 'timeout the request'. This leads to a terminate IO if
1898 * the interrupt is outstanding for a certain time.
1901 dasd_eckd_reserve(struct dasd_device *device)
1903 struct dasd_ccw_req *cqr;
1906 if (!capable(CAP_SYS_ADMIN))
1909 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1912 DEV_MESSAGE(KERN_WARNING, device, "%s",
1913 "Could not allocate initialization request");
1914 return PTR_ERR(cqr);
1916 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_RESERVE;
1917 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1918 cqr->cpaddr->count = 32;
1919 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1920 cqr->startdev = device;
1921 cqr->memdev = device;
1922 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1923 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1924 cqr->retries = 2; /* set retry counter to enable basic ERP */
1925 cqr->expires = 2 * HZ;
1926 cqr->buildclk = get_clock();
1927 cqr->status = DASD_CQR_FILLED;
1929 rc = dasd_sleep_on_immediatly(cqr);
1931 dasd_sfree_request(cqr, cqr->memdev);
1936 * Steal lock ioctl - unconditional reserve device.
1937 * Buils a channel programm to break a device's reservation.
1938 * (unconditional reserve)
1941 dasd_eckd_steal_lock(struct dasd_device *device)
1943 struct dasd_ccw_req *cqr;
1946 if (!capable(CAP_SYS_ADMIN))
1949 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1952 DEV_MESSAGE(KERN_WARNING, device, "%s",
1953 "Could not allocate initialization request");
1954 return PTR_ERR(cqr);
1956 cqr->cpaddr->cmd_code = DASD_ECKD_CCW_SLCK;
1957 cqr->cpaddr->flags |= CCW_FLAG_SLI;
1958 cqr->cpaddr->count = 32;
1959 cqr->cpaddr->cda = (__u32)(addr_t) cqr->data;
1960 cqr->startdev = device;
1961 cqr->memdev = device;
1962 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
1963 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
1964 cqr->retries = 2; /* set retry counter to enable basic ERP */
1965 cqr->expires = 2 * HZ;
1966 cqr->buildclk = get_clock();
1967 cqr->status = DASD_CQR_FILLED;
1969 rc = dasd_sleep_on_immediatly(cqr);
1971 dasd_sfree_request(cqr, cqr->memdev);
1976 * Read performance statistics
1979 dasd_eckd_performance(struct dasd_device *device, void __user *argp)
1981 struct dasd_psf_prssd_data *prssdp;
1982 struct dasd_rssd_perf_stats_t *stats;
1983 struct dasd_ccw_req *cqr;
1987 cqr = dasd_smalloc_request(dasd_eckd_discipline.name,
1988 1 /* PSF */ + 1 /* RSSD */ ,
1989 (sizeof(struct dasd_psf_prssd_data) +
1990 sizeof(struct dasd_rssd_perf_stats_t)),
1993 DEV_MESSAGE(KERN_WARNING, device, "%s",
1994 "Could not allocate initialization request");
1995 return PTR_ERR(cqr);
1997 cqr->startdev = device;
1998 cqr->memdev = device;
2000 cqr->expires = 10 * HZ;
2002 /* Prepare for Read Subsystem Data */
2003 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
2004 memset(prssdp, 0, sizeof(struct dasd_psf_prssd_data));
2005 prssdp->order = PSF_ORDER_PRSSD;
2006 prssdp->suborder = 0x01; /* Performance Statistics */
2007 prssdp->varies[1] = 0x01; /* Perf Statistics for the Subsystem */
2010 ccw->cmd_code = DASD_ECKD_CCW_PSF;
2011 ccw->count = sizeof(struct dasd_psf_prssd_data);
2012 ccw->flags |= CCW_FLAG_CC;
2013 ccw->cda = (__u32)(addr_t) prssdp;
2015 /* Read Subsystem Data - Performance Statistics */
2016 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
2017 memset(stats, 0, sizeof(struct dasd_rssd_perf_stats_t));
2020 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
2021 ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
2022 ccw->cda = (__u32)(addr_t) stats;
2024 cqr->buildclk = get_clock();
2025 cqr->status = DASD_CQR_FILLED;
2026 rc = dasd_sleep_on(cqr);
2028 prssdp = (struct dasd_psf_prssd_data *) cqr->data;
2029 stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
2030 if (copy_to_user(argp, stats,
2031 sizeof(struct dasd_rssd_perf_stats_t)))
2034 dasd_sfree_request(cqr, cqr->memdev);
2039 * Get attributes (cache operations)
2040 * Returnes the cache attributes used in Define Extend (DE).
2043 dasd_eckd_get_attrib(struct dasd_device *device, void __user *argp)
2045 struct dasd_eckd_private *private =
2046 (struct dasd_eckd_private *)device->private;
2047 struct attrib_data_t attrib = private->attrib;
2050 if (!capable(CAP_SYS_ADMIN))
2056 if (copy_to_user(argp, (long *) &attrib,
2057 sizeof(struct attrib_data_t)))
2064 * Set attributes (cache operations)
2065 * Stores the attributes for cache operation to be used in Define Extend (DE).
2068 dasd_eckd_set_attrib(struct dasd_device *device, void __user *argp)
2070 struct dasd_eckd_private *private =
2071 (struct dasd_eckd_private *)device->private;
2072 struct attrib_data_t attrib;
2074 if (!capable(CAP_SYS_ADMIN))
2079 if (copy_from_user(&attrib, argp, sizeof(struct attrib_data_t)))
2081 private->attrib = attrib;
2083 DEV_MESSAGE(KERN_INFO, device,
2084 "cache operation mode set to %x (%i cylinder prestage)",
2085 private->attrib.operation, private->attrib.nr_cyl);
2090 * Issue syscall I/O to EMC Symmetrix array.
2091 * CCWs are PSF and RSSD
2093 static int dasd_symm_io(struct dasd_device *device, void __user *argp)
2095 struct dasd_symmio_parms usrparm;
2096 char *psf_data, *rssd_result;
2097 struct dasd_ccw_req *cqr;
2101 /* Copy parms from caller */
2103 if (copy_from_user(&usrparm, argp, sizeof(usrparm)))
2105 #ifndef CONFIG_64BIT
2106 /* Make sure pointers are sane even on 31 bit. */
2107 if ((usrparm.psf_data >> 32) != 0 || (usrparm.rssd_result >> 32) != 0) {
2112 /* alloc I/O data area */
2113 psf_data = kzalloc(usrparm.psf_data_len, GFP_KERNEL | GFP_DMA);
2114 rssd_result = kzalloc(usrparm.rssd_result_len, GFP_KERNEL | GFP_DMA);
2115 if (!psf_data || !rssd_result) {
2120 /* get syscall header from user space */
2122 if (copy_from_user(psf_data,
2123 (void __user *)(unsigned long) usrparm.psf_data,
2124 usrparm.psf_data_len))
2127 /* sanity check on syscall header */
2128 if (psf_data[0] != 0x17 && psf_data[1] != 0xce) {
2133 /* setup CCWs for PSF + RSSD */
2134 cqr = dasd_smalloc_request("ECKD", 2 , 0, device);
2136 DEV_MESSAGE(KERN_WARNING, device, "%s",
2137 "Could not allocate initialization request");
2142 cqr->startdev = device;
2143 cqr->memdev = device;
2145 cqr->expires = 10 * HZ;
2146 cqr->buildclk = get_clock();
2147 cqr->status = DASD_CQR_FILLED;
2149 /* Build the ccws */
2153 ccw->cmd_code = DASD_ECKD_CCW_PSF;
2154 ccw->count = usrparm.psf_data_len;
2155 ccw->flags |= CCW_FLAG_CC;
2156 ccw->cda = (__u32)(addr_t) psf_data;
2161 ccw->cmd_code = DASD_ECKD_CCW_RSSD;
2162 ccw->count = usrparm.rssd_result_len;
2163 ccw->flags = CCW_FLAG_SLI ;
2164 ccw->cda = (__u32)(addr_t) rssd_result;
2166 rc = dasd_sleep_on(cqr);
2171 if (copy_to_user((void __user *)(unsigned long) usrparm.rssd_result,
2172 rssd_result, usrparm.rssd_result_len))
2177 dasd_sfree_request(cqr, cqr->memdev);
2182 DBF_DEV_EVENT(DBF_WARNING, device, "Symmetrix ioctl: rc=%d", rc);
2187 dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
2189 struct dasd_device *device = block->base;
2193 return dasd_eckd_get_attrib(device, argp);
2195 return dasd_eckd_set_attrib(device, argp);
2197 return dasd_eckd_performance(device, argp);
2199 return dasd_eckd_release(device);
2201 return dasd_eckd_reserve(device);
2203 return dasd_eckd_steal_lock(device);
2205 return dasd_symm_io(device, argp);
2207 return -ENOIOCTLCMD;
2212 * Dump the range of CCWs into 'page' buffer
2213 * and return number of printed chars.
2216 dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
2222 while (from <= to) {
2223 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2224 " CCW %p: %08X %08X DAT:",
2225 from, ((int *) from)[0], ((int *) from)[1]);
2227 /* get pointer to data (consider IDALs) */
2228 if (from->flags & CCW_FLAG_IDA)
2229 datap = (char *) *((addr_t *) (addr_t) from->cda);
2231 datap = (char *) ((addr_t) from->cda);
2233 /* dump data (max 32 bytes) */
2234 for (count = 0; count < from->count && count < 32; count++) {
2235 if (count % 8 == 0) len += sprintf(page + len, " ");
2236 if (count % 4 == 0) len += sprintf(page + len, " ");
2237 len += sprintf(page + len, "%02x", datap[count]);
2239 len += sprintf(page + len, "\n");
2246 * Print sense data and related channel program.
2247 * Parts are printed because printk buffer is only 1024 bytes.
2249 static void dasd_eckd_dump_sense(struct dasd_device *device,
2250 struct dasd_ccw_req *req, struct irb *irb)
2253 struct ccw1 *first, *last, *fail, *from, *to;
2256 page = (char *) get_zeroed_page(GFP_ATOMIC);
2258 DEV_MESSAGE(KERN_ERR, device, " %s",
2259 "No memory to dump sense data");
2262 /* dump the sense data */
2263 len = sprintf(page, KERN_ERR PRINTK_HEADER
2264 " I/O status report for device %s:\n",
2265 dev_name(&device->cdev->dev));
2266 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2267 " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
2268 irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
2269 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2270 " device %s: Failing CCW: %p\n",
2271 dev_name(&device->cdev->dev),
2272 (void *) (addr_t) irb->scsw.cmd.cpa);
2273 if (irb->esw.esw0.erw.cons) {
2274 for (sl = 0; sl < 4; sl++) {
2275 len += sprintf(page + len, KERN_ERR PRINTK_HEADER
2276 " Sense(hex) %2d-%2d:",
2277 (8 * sl), ((8 * sl) + 7));
2279 for (sct = 0; sct < 8; sct++) {
2280 len += sprintf(page + len, " %02x",
2281 irb->ecw[8 * sl + sct]);
2283 len += sprintf(page + len, "\n");
2286 if (irb->ecw[27] & DASD_SENSE_BIT_0) {
2287 /* 24 Byte Sense Data */
2288 sprintf(page + len, KERN_ERR PRINTK_HEADER
2289 " 24 Byte: %x MSG %x, "
2290 "%s MSGb to SYSOP\n",
2291 irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
2292 irb->ecw[1] & 0x10 ? "" : "no");
2294 /* 32 Byte Sense Data */
2295 sprintf(page + len, KERN_ERR PRINTK_HEADER
2296 " 32 Byte: Format: %x "
2297 "Exception class %x\n",
2298 irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
2301 sprintf(page + len, KERN_ERR PRINTK_HEADER
2302 " SORRY - NO VALID SENSE AVAILABLE\n");
2307 /* req == NULL for unsolicited interrupts */
2308 /* dump the Channel Program (max 140 Bytes per line) */
2309 /* Count CCW and print first CCWs (maximum 1024 % 140 = 7) */
2310 first = req->cpaddr;
2311 for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
2312 to = min(first + 6, last);
2313 len = sprintf(page, KERN_ERR PRINTK_HEADER
2314 " Related CP in req: %p\n", req);
2315 dasd_eckd_dump_ccw_range(first, to, page + len);
2318 /* print failing CCW area (maximum 4) */
2319 /* scsw->cda is either valid or zero */
2322 fail = (struct ccw1 *)(addr_t)
2323 irb->scsw.cmd.cpa; /* failing CCW */
2324 if (from < fail - 2) {
2325 from = fail - 2; /* there is a gap - print header */
2326 len += sprintf(page, KERN_ERR PRINTK_HEADER "......\n");
2328 to = min(fail + 1, last);
2329 len += dasd_eckd_dump_ccw_range(from, to, page + len);
2331 /* print last CCWs (maximum 2) */
2332 from = max(from, ++to);
2333 if (from < last - 1) {
2334 from = last - 1; /* there is a gap - print header */
2335 len += sprintf(page + len, KERN_ERR PRINTK_HEADER "......\n");
2337 len += dasd_eckd_dump_ccw_range(from, last, page + len);
2341 free_page((unsigned long) page);
2345 * max_blocks is dependent on the amount of storage that is available
2346 * in the static io buffer for each device. Currently each device has
2347 * 8192 bytes (=2 pages). For 64 bit one dasd_mchunkt_t structure has
2348 * 24 bytes, the struct dasd_ccw_req has 136 bytes and each block can use
2349 * up to 16 bytes (8 for the ccw and 8 for the idal pointer). In
2350 * addition we have one define extent ccw + 16 bytes of data and one
2351 * locate record ccw + 16 bytes of data. That makes:
2352 * (8192 - 24 - 136 - 8 - 16 - 8 - 16) / 16 = 499 blocks at maximum.
2353 * We want to fit two into the available memory so that we can immediately
2354 * start the next request if one finishes off. That makes 249.5 blocks
2355 * for one request. Give a little safety and the result is 240.
2357 static struct dasd_discipline dasd_eckd_discipline = {
2358 .owner = THIS_MODULE,
2362 .check_device = dasd_eckd_check_characteristics,
2363 .uncheck_device = dasd_eckd_uncheck_device,
2364 .do_analysis = dasd_eckd_do_analysis,
2365 .ready_to_online = dasd_eckd_ready_to_online,
2366 .online_to_ready = dasd_eckd_online_to_ready,
2367 .fill_geometry = dasd_eckd_fill_geometry,
2368 .start_IO = dasd_start_IO,
2369 .term_IO = dasd_term_IO,
2370 .handle_terminated_request = dasd_eckd_handle_terminated_request,
2371 .format_device = dasd_eckd_format_device,
2372 .erp_action = dasd_eckd_erp_action,
2373 .erp_postaction = dasd_eckd_erp_postaction,
2374 .handle_unsolicited_interrupt = dasd_eckd_handle_unsolicited_interrupt,
2375 .build_cp = dasd_eckd_build_alias_cp,
2376 .free_cp = dasd_eckd_free_alias_cp,
2377 .dump_sense = dasd_eckd_dump_sense,
2378 .fill_info = dasd_eckd_fill_info,
2379 .ioctl = dasd_eckd_ioctl,
2383 dasd_eckd_init(void)
2385 ASCEBC(dasd_eckd_discipline.ebcname, 4);
2386 return ccw_driver_register(&dasd_eckd_driver);
2390 dasd_eckd_cleanup(void)
2392 ccw_driver_unregister(&dasd_eckd_driver);
2395 module_init(dasd_eckd_init);
2396 module_exit(dasd_eckd_cleanup);