Merge ../powerpc-merge
[linux-2.6] / drivers / ide / ide-taskfile.c
1 /*
2  * linux/drivers/ide/ide-taskfile.c     Version 0.38    March 05, 2003
3  *
4  *  Copyright (C) 2000-2002     Michael Cornwell <cornwell@acm.org>
5  *  Copyright (C) 2000-2002     Andre Hedrick <andre@linux-ide.org>
6  *  Copyright (C) 2001-2002     Klaus Smolin
7  *                                      IBM Storage Technology Division
8  *  Copyright (C) 2003-2004     Bartlomiej Zolnierkiewicz
9  *
10  *  The big the bad and the ugly.
11  *
12  *  Problems to be fixed because of BH interface or the lack therefore.
13  *
14  *  Fill me in stupid !!!
15  *
16  *  HOST:
17  *      General refers to the Controller and Driver "pair".
18  *  DATA HANDLER:
19  *      Under the context of Linux it generally refers to an interrupt handler.
20  *      However, it correctly describes the 'HOST'
21  *  DATA BLOCK:
22  *      The amount of data needed to be transfered as predefined in the
23  *      setup of the device.
24  *  STORAGE ATOMIC:
25  *      The 'DATA BLOCK' associated to the 'DATA HANDLER', and can be as
26  *      small as a single sector or as large as the entire command block
27  *      request.
28  */
29
30 #include <linux/config.h>
31 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/string.h>
34 #include <linux/kernel.h>
35 #include <linux/timer.h>
36 #include <linux/mm.h>
37 #include <linux/interrupt.h>
38 #include <linux/major.h>
39 #include <linux/errno.h>
40 #include <linux/genhd.h>
41 #include <linux/blkpg.h>
42 #include <linux/slab.h>
43 #include <linux/pci.h>
44 #include <linux/delay.h>
45 #include <linux/hdreg.h>
46 #include <linux/ide.h>
47 #include <linux/bitops.h>
48
49 #include <asm/byteorder.h>
50 #include <asm/irq.h>
51 #include <asm/uaccess.h>
52 #include <asm/io.h>
53
54 static void ata_bswap_data (void *buffer, int wcount)
55 {
56         u16 *p = buffer;
57
58         while (wcount--) {
59                 *p = *p << 8 | *p >> 8; p++;
60                 *p = *p << 8 | *p >> 8; p++;
61         }
62 }
63
64 static void taskfile_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
65 {
66         HWIF(drive)->ata_input_data(drive, buffer, wcount);
67         if (drive->bswap)
68                 ata_bswap_data(buffer, wcount);
69 }
70
71 static void taskfile_output_data(ide_drive_t *drive, void *buffer, u32 wcount)
72 {
73         if (drive->bswap) {
74                 ata_bswap_data(buffer, wcount);
75                 HWIF(drive)->ata_output_data(drive, buffer, wcount);
76                 ata_bswap_data(buffer, wcount);
77         } else {
78                 HWIF(drive)->ata_output_data(drive, buffer, wcount);
79         }
80 }
81
82 int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
83 {
84         ide_task_t args;
85         memset(&args, 0, sizeof(ide_task_t));
86         args.tfRegister[IDE_NSECTOR_OFFSET]     = 0x01;
87         if (drive->media == ide_disk)
88                 args.tfRegister[IDE_COMMAND_OFFSET]     = WIN_IDENTIFY;
89         else
90                 args.tfRegister[IDE_COMMAND_OFFSET]     = WIN_PIDENTIFY;
91         args.command_type = IDE_DRIVE_TASK_IN;
92         args.data_phase   = TASKFILE_IN;
93         args.handler      = &task_in_intr;
94         return ide_raw_taskfile(drive, &args, buf);
95 }
96
97 ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
98 {
99         ide_hwif_t *hwif        = HWIF(drive);
100         task_struct_t *taskfile = (task_struct_t *) task->tfRegister;
101         hob_struct_t *hobfile   = (hob_struct_t *) task->hobRegister;
102         u8 HIHI                 = (drive->addressing == 1) ? 0xE0 : 0xEF;
103
104         /* ALL Command Block Executions SHALL clear nIEN, unless otherwise */
105         if (IDE_CONTROL_REG) {
106                 /* clear nIEN */
107                 hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
108         }
109         SELECT_MASK(drive, 0);
110
111         if (drive->addressing == 1) {
112                 hwif->OUTB(hobfile->feature, IDE_FEATURE_REG);
113                 hwif->OUTB(hobfile->sector_count, IDE_NSECTOR_REG);
114                 hwif->OUTB(hobfile->sector_number, IDE_SECTOR_REG);
115                 hwif->OUTB(hobfile->low_cylinder, IDE_LCYL_REG);
116                 hwif->OUTB(hobfile->high_cylinder, IDE_HCYL_REG);
117         }
118
119         hwif->OUTB(taskfile->feature, IDE_FEATURE_REG);
120         hwif->OUTB(taskfile->sector_count, IDE_NSECTOR_REG);
121         hwif->OUTB(taskfile->sector_number, IDE_SECTOR_REG);
122         hwif->OUTB(taskfile->low_cylinder, IDE_LCYL_REG);
123         hwif->OUTB(taskfile->high_cylinder, IDE_HCYL_REG);
124
125         hwif->OUTB((taskfile->device_head & HIHI) | drive->select.all, IDE_SELECT_REG);
126
127         if (task->handler != NULL) {
128                 if (task->prehandler != NULL) {
129                         hwif->OUTBSYNC(drive, taskfile->command, IDE_COMMAND_REG);
130                         ndelay(400);    /* FIXME */
131                         return task->prehandler(drive, task->rq);
132                 }
133                 ide_execute_command(drive, taskfile->command, task->handler, WAIT_WORSTCASE, NULL);
134                 return ide_started;
135         }
136
137         if (!drive->using_dma)
138                 return ide_stopped;
139
140         switch (taskfile->command) {
141                 case WIN_WRITEDMA_ONCE:
142                 case WIN_WRITEDMA:
143                 case WIN_WRITEDMA_EXT:
144                 case WIN_READDMA_ONCE:
145                 case WIN_READDMA:
146                 case WIN_READDMA_EXT:
147                 case WIN_IDENTIFY_DMA:
148                         if (!hwif->dma_setup(drive)) {
149                                 hwif->dma_exec_cmd(drive, taskfile->command);
150                                 hwif->dma_start(drive);
151                                 return ide_started;
152                         }
153                         break;
154                 default:
155                         if (task->handler == NULL)
156                                 return ide_stopped;
157         }
158
159         return ide_stopped;
160 }
161
162 /*
163  * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
164  */
165 ide_startstop_t set_multmode_intr (ide_drive_t *drive)
166 {
167         ide_hwif_t *hwif = HWIF(drive);
168         u8 stat;
169
170         if (OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) {
171                 drive->mult_count = drive->mult_req;
172         } else {
173                 drive->mult_req = drive->mult_count = 0;
174                 drive->special.b.recalibrate = 1;
175                 (void) ide_dump_status(drive, "set_multmode", stat);
176         }
177         return ide_stopped;
178 }
179
180 /*
181  * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
182  */
183 ide_startstop_t set_geometry_intr (ide_drive_t *drive)
184 {
185         ide_hwif_t *hwif = HWIF(drive);
186         int retries = 5;
187         u8 stat;
188
189         while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
190                 udelay(10);
191
192         if (OK_STAT(stat, READY_STAT, BAD_STAT))
193                 return ide_stopped;
194
195         if (stat & (ERR_STAT|DRQ_STAT))
196                 return ide_error(drive, "set_geometry_intr", stat);
197
198         if (HWGROUP(drive)->handler != NULL)
199                 BUG();
200         ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL);
201         return ide_started;
202 }
203
204 /*
205  * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
206  */
207 ide_startstop_t recal_intr (ide_drive_t *drive)
208 {
209         ide_hwif_t *hwif = HWIF(drive);
210         u8 stat;
211
212         if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG), READY_STAT, BAD_STAT))
213                 return ide_error(drive, "recal_intr", stat);
214         return ide_stopped;
215 }
216
217 /*
218  * Handler for commands without a data phase
219  */
220 ide_startstop_t task_no_data_intr (ide_drive_t *drive)
221 {
222         ide_task_t *args        = HWGROUP(drive)->rq->special;
223         ide_hwif_t *hwif        = HWIF(drive);
224         u8 stat;
225
226         local_irq_enable();
227         if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) {
228                 return ide_error(drive, "task_no_data_intr", stat);
229                 /* calls ide_end_drive_cmd */
230         }
231         if (args)
232                 ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
233
234         return ide_stopped;
235 }
236
237 EXPORT_SYMBOL(task_no_data_intr);
238
239 static u8 wait_drive_not_busy(ide_drive_t *drive)
240 {
241         ide_hwif_t *hwif = HWIF(drive);
242         int retries = 100;
243         u8 stat;
244
245         /*
246          * Last sector was transfered, wait until drive is ready.
247          * This can take up to 10 usec, but we will wait max 1 ms
248          * (drive_cmd_intr() waits that long).
249          */
250         while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
251                 udelay(10);
252
253         if (!retries)
254                 printk(KERN_ERR "%s: drive still BUSY!\n", drive->name);
255
256         return stat;
257 }
258
259 static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
260 {
261         ide_hwif_t *hwif = drive->hwif;
262         struct scatterlist *sg = hwif->sg_table;
263         struct page *page;
264 #ifdef CONFIG_HIGHMEM
265         unsigned long flags;
266 #endif
267         unsigned int offset;
268         u8 *buf;
269
270         page = sg[hwif->cursg].page;
271         offset = sg[hwif->cursg].offset + hwif->cursg_ofs * SECTOR_SIZE;
272
273         /* get the current page and offset */
274         page = nth_page(page, (offset >> PAGE_SHIFT));
275         offset %= PAGE_SIZE;
276
277 #ifdef CONFIG_HIGHMEM
278         local_irq_save(flags);
279 #endif
280         buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset;
281
282         hwif->nleft--;
283         hwif->cursg_ofs++;
284
285         if ((hwif->cursg_ofs * SECTOR_SIZE) == sg[hwif->cursg].length) {
286                 hwif->cursg++;
287                 hwif->cursg_ofs = 0;
288         }
289
290         /* do the actual data transfer */
291         if (write)
292                 taskfile_output_data(drive, buf, SECTOR_WORDS);
293         else
294                 taskfile_input_data(drive, buf, SECTOR_WORDS);
295
296         kunmap_atomic(buf, KM_BIO_SRC_IRQ);
297 #ifdef CONFIG_HIGHMEM
298         local_irq_restore(flags);
299 #endif
300 }
301
302 static void ide_pio_multi(ide_drive_t *drive, unsigned int write)
303 {
304         unsigned int nsect;
305
306         nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count);
307         while (nsect--)
308                 ide_pio_sector(drive, write);
309 }
310
311 static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
312                                      unsigned int write)
313 {
314         if (rq->bio)    /* fs request */
315                 rq->errors = 0;
316
317         switch (drive->hwif->data_phase) {
318         case TASKFILE_MULTI_IN:
319         case TASKFILE_MULTI_OUT:
320                 ide_pio_multi(drive, write);
321                 break;
322         default:
323                 ide_pio_sector(drive, write);
324                 break;
325         }
326 }
327
328 static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
329                                   const char *s, u8 stat)
330 {
331         if (rq->bio) {
332                 ide_hwif_t *hwif = drive->hwif;
333                 int sectors = hwif->nsect - hwif->nleft;
334
335                 switch (hwif->data_phase) {
336                 case TASKFILE_IN:
337                         if (hwif->nleft)
338                                 break;
339                         /* fall through */
340                 case TASKFILE_OUT:
341                         sectors--;
342                         break;
343                 case TASKFILE_MULTI_IN:
344                         if (hwif->nleft)
345                                 break;
346                         /* fall through */
347                 case TASKFILE_MULTI_OUT:
348                         sectors -= drive->mult_count;
349                 default:
350                         break;
351                 }
352
353                 if (sectors > 0) {
354                         ide_driver_t *drv;
355
356                         drv = *(ide_driver_t **)rq->rq_disk->private_data;
357                         drv->end_request(drive, 1, sectors);
358                 }
359         }
360         return ide_error(drive, s, stat);
361 }
362
363 static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
364 {
365         if (rq->flags & REQ_DRIVE_TASKFILE) {
366                 ide_task_t *task = rq->special;
367
368                 if (task->tf_out_flags.all) {
369                         u8 err = drive->hwif->INB(IDE_ERROR_REG);
370                         ide_end_drive_cmd(drive, stat, err);
371                         return;
372                 }
373         }
374
375         ide_end_request(drive, 1, rq->hard_nr_sectors);
376 }
377
378 /*
379  * Handler for command with PIO data-in phase (Read/Read Multiple).
380  */
381 ide_startstop_t task_in_intr (ide_drive_t *drive)
382 {
383         ide_hwif_t *hwif = drive->hwif;
384         struct request *rq = HWGROUP(drive)->rq;
385         u8 stat = hwif->INB(IDE_STATUS_REG);
386
387         /* new way for dealing with premature shared PCI interrupts */
388         if (!OK_STAT(stat, DATA_READY, BAD_R_STAT)) {
389                 if (stat & (ERR_STAT | DRQ_STAT))
390                         return task_error(drive, rq, __FUNCTION__, stat);
391                 /* No data yet, so wait for another IRQ. */
392                 ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
393                 return ide_started;
394         }
395
396         ide_pio_datablock(drive, rq, 0);
397
398         /* If it was the last datablock check status and finish transfer. */
399         if (!hwif->nleft) {
400                 stat = wait_drive_not_busy(drive);
401                 if (!OK_STAT(stat, 0, BAD_R_STAT))
402                         return task_error(drive, rq, __FUNCTION__, stat);
403                 task_end_request(drive, rq, stat);
404                 return ide_stopped;
405         }
406
407         /* Still data left to transfer. */
408         ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
409
410         return ide_started;
411 }
412 EXPORT_SYMBOL(task_in_intr);
413
414 /*
415  * Handler for command with PIO data-out phase (Write/Write Multiple).
416  */
417 static ide_startstop_t task_out_intr (ide_drive_t *drive)
418 {
419         ide_hwif_t *hwif = drive->hwif;
420         struct request *rq = HWGROUP(drive)->rq;
421         u8 stat = hwif->INB(IDE_STATUS_REG);
422
423         if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
424                 return task_error(drive, rq, __FUNCTION__, stat);
425
426         /* Deal with unexpected ATA data phase. */
427         if (((stat & DRQ_STAT) == 0) ^ !hwif->nleft)
428                 return task_error(drive, rq, __FUNCTION__, stat);
429
430         if (!hwif->nleft) {
431                 task_end_request(drive, rq, stat);
432                 return ide_stopped;
433         }
434
435         /* Still data left to transfer. */
436         ide_pio_datablock(drive, rq, 1);
437         ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
438
439         return ide_started;
440 }
441
442 ide_startstop_t pre_task_out_intr (ide_drive_t *drive, struct request *rq)
443 {
444         ide_startstop_t startstop;
445
446         if (ide_wait_stat(&startstop, drive, DATA_READY,
447                           drive->bad_wstat, WAIT_DRQ)) {
448                 printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n",
449                                 drive->name,
450                                 drive->hwif->data_phase ? "MULT" : "",
451                                 drive->addressing ? "_EXT" : "");
452                 return startstop;
453         }
454
455         if (!drive->unmask)
456                 local_irq_disable();
457
458         ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
459         ide_pio_datablock(drive, rq, 1);
460
461         return ide_started;
462 }
463 EXPORT_SYMBOL(pre_task_out_intr);
464
465 static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long data_size, u8 *buf)
466 {
467         struct request rq;
468
469         memset(&rq, 0, sizeof(rq));
470         rq.flags = REQ_DRIVE_TASKFILE;
471         rq.buffer = buf;
472
473         /*
474          * (ks) We transfer currently only whole sectors.
475          * This is suffient for now.  But, it would be great,
476          * if we would find a solution to transfer any size.
477          * To support special commands like READ LONG.
478          */
479         if (args->command_type != IDE_DRIVE_TASK_NO_DATA) {
480                 if (data_size == 0)
481                         rq.nr_sectors = (args->hobRegister[IDE_NSECTOR_OFFSET] << 8) | args->tfRegister[IDE_NSECTOR_OFFSET];
482                 else
483                         rq.nr_sectors = data_size / SECTOR_SIZE;
484
485                 if (!rq.nr_sectors) {
486                         printk(KERN_ERR "%s: in/out command without data\n",
487                                         drive->name);
488                         return -EFAULT;
489                 }
490
491                 rq.hard_nr_sectors = rq.nr_sectors;
492                 rq.hard_cur_sectors = rq.current_nr_sectors = rq.nr_sectors;
493
494                 if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
495                         rq.flags |= REQ_RW;
496         }
497
498         rq.special = args;
499         args->rq = &rq;
500         return ide_do_drive_cmd(drive, &rq, ide_wait);
501 }
502
503 int ide_raw_taskfile (ide_drive_t *drive, ide_task_t *args, u8 *buf)
504 {
505         return ide_diag_taskfile(drive, args, 0, buf);
506 }
507
508 EXPORT_SYMBOL(ide_raw_taskfile);
509
510 int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
511 {
512         ide_task_request_t      *req_task;
513         ide_task_t              args;
514         u8 *outbuf              = NULL;
515         u8 *inbuf               = NULL;
516         task_ioreg_t *argsptr   = args.tfRegister;
517         task_ioreg_t *hobsptr   = args.hobRegister;
518         int err                 = 0;
519         int tasksize            = sizeof(struct ide_task_request_s);
520         int taskin              = 0;
521         int taskout             = 0;
522         u8 io_32bit             = drive->io_32bit;
523         char __user *buf = (char __user *)arg;
524
525 //      printk("IDE Taskfile ...\n");
526
527         req_task = kzalloc(tasksize, GFP_KERNEL);
528         if (req_task == NULL) return -ENOMEM;
529         if (copy_from_user(req_task, buf, tasksize)) {
530                 kfree(req_task);
531                 return -EFAULT;
532         }
533
534         taskout = (int) req_task->out_size;
535         taskin  = (int) req_task->in_size;
536
537         if (taskout) {
538                 int outtotal = tasksize;
539                 outbuf = kzalloc(taskout, GFP_KERNEL);
540                 if (outbuf == NULL) {
541                         err = -ENOMEM;
542                         goto abort;
543                 }
544                 if (copy_from_user(outbuf, buf + outtotal, taskout)) {
545                         err = -EFAULT;
546                         goto abort;
547                 }
548         }
549
550         if (taskin) {
551                 int intotal = tasksize + taskout;
552                 inbuf = kzalloc(taskin, GFP_KERNEL);
553                 if (inbuf == NULL) {
554                         err = -ENOMEM;
555                         goto abort;
556                 }
557                 if (copy_from_user(inbuf, buf + intotal, taskin)) {
558                         err = -EFAULT;
559                         goto abort;
560                 }
561         }
562
563         memset(&args, 0, sizeof(ide_task_t));
564         memcpy(argsptr, req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
565         memcpy(hobsptr, req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE);
566
567         args.tf_in_flags  = req_task->in_flags;
568         args.tf_out_flags = req_task->out_flags;
569         args.data_phase   = req_task->data_phase;
570         args.command_type = req_task->req_cmd;
571
572         drive->io_32bit = 0;
573         switch(req_task->data_phase) {
574                 case TASKFILE_OUT_DMAQ:
575                 case TASKFILE_OUT_DMA:
576                         err = ide_diag_taskfile(drive, &args, taskout, outbuf);
577                         break;
578                 case TASKFILE_IN_DMAQ:
579                 case TASKFILE_IN_DMA:
580                         err = ide_diag_taskfile(drive, &args, taskin, inbuf);
581                         break;
582                 case TASKFILE_MULTI_OUT:
583                         if (!drive->mult_count) {
584                                 /* (hs): give up if multcount is not set */
585                                 printk(KERN_ERR "%s: %s Multimode Write " \
586                                         "multcount is not set\n",
587                                         drive->name, __FUNCTION__);
588                                 err = -EPERM;
589                                 goto abort;
590                         }
591                         /* fall through */
592                 case TASKFILE_OUT:
593                         args.prehandler = &pre_task_out_intr;
594                         args.handler = &task_out_intr;
595                         err = ide_diag_taskfile(drive, &args, taskout, outbuf);
596                         break;
597                 case TASKFILE_MULTI_IN:
598                         if (!drive->mult_count) {
599                                 /* (hs): give up if multcount is not set */
600                                 printk(KERN_ERR "%s: %s Multimode Read failure " \
601                                         "multcount is not set\n",
602                                         drive->name, __FUNCTION__);
603                                 err = -EPERM;
604                                 goto abort;
605                         }
606                         /* fall through */
607                 case TASKFILE_IN:
608                         args.handler = &task_in_intr;
609                         err = ide_diag_taskfile(drive, &args, taskin, inbuf);
610                         break;
611                 case TASKFILE_NO_DATA:
612                         args.handler = &task_no_data_intr;
613                         err = ide_diag_taskfile(drive, &args, 0, NULL);
614                         break;
615                 default:
616                         err = -EFAULT;
617                         goto abort;
618         }
619
620         memcpy(req_task->io_ports, &(args.tfRegister), HDIO_DRIVE_TASK_HDR_SIZE);
621         memcpy(req_task->hob_ports, &(args.hobRegister), HDIO_DRIVE_HOB_HDR_SIZE);
622         req_task->in_flags  = args.tf_in_flags;
623         req_task->out_flags = args.tf_out_flags;
624
625         if (copy_to_user(buf, req_task, tasksize)) {
626                 err = -EFAULT;
627                 goto abort;
628         }
629         if (taskout) {
630                 int outtotal = tasksize;
631                 if (copy_to_user(buf + outtotal, outbuf, taskout)) {
632                         err = -EFAULT;
633                         goto abort;
634                 }
635         }
636         if (taskin) {
637                 int intotal = tasksize + taskout;
638                 if (copy_to_user(buf + intotal, inbuf, taskin)) {
639                         err = -EFAULT;
640                         goto abort;
641                 }
642         }
643 abort:
644         kfree(req_task);
645         kfree(outbuf);
646         kfree(inbuf);
647
648 //      printk("IDE Taskfile ioctl ended. rc = %i\n", err);
649
650         drive->io_32bit = io_32bit;
651
652         return err;
653 }
654
655 int ide_wait_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, u8 feature, u8 sectors, u8 *buf)
656 {
657         struct request rq;
658         u8 buffer[4];
659
660         if (!buf)
661                 buf = buffer;
662         memset(buf, 0, 4 + SECTOR_WORDS * 4 * sectors);
663         ide_init_drive_cmd(&rq);
664         rq.buffer = buf;
665         *buf++ = cmd;
666         *buf++ = nsect;
667         *buf++ = feature;
668         *buf++ = sectors;
669         return ide_do_drive_cmd(drive, &rq, ide_wait);
670 }
671
672 /*
673  * FIXME : this needs to map into at taskfile. <andre@linux-ide.org>
674  */
675 int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
676 {
677         int err = 0;
678         u8 args[4], *argbuf = args;
679         u8 xfer_rate = 0;
680         int argsize = 4;
681         ide_task_t tfargs;
682
683         if (NULL == (void *) arg) {
684                 struct request rq;
685                 ide_init_drive_cmd(&rq);
686                 return ide_do_drive_cmd(drive, &rq, ide_wait);
687         }
688
689         if (copy_from_user(args, (void __user *)arg, 4))
690                 return -EFAULT;
691
692         memset(&tfargs, 0, sizeof(ide_task_t));
693         tfargs.tfRegister[IDE_FEATURE_OFFSET] = args[2];
694         tfargs.tfRegister[IDE_NSECTOR_OFFSET] = args[3];
695         tfargs.tfRegister[IDE_SECTOR_OFFSET]  = args[1];
696         tfargs.tfRegister[IDE_LCYL_OFFSET]    = 0x00;
697         tfargs.tfRegister[IDE_HCYL_OFFSET]    = 0x00;
698         tfargs.tfRegister[IDE_SELECT_OFFSET]  = 0x00;
699         tfargs.tfRegister[IDE_COMMAND_OFFSET] = args[0];
700
701         if (args[3]) {
702                 argsize = 4 + (SECTOR_WORDS * 4 * args[3]);
703                 argbuf = kzalloc(argsize, GFP_KERNEL);
704                 if (argbuf == NULL)
705                         return -ENOMEM;
706         }
707         if (set_transfer(drive, &tfargs)) {
708                 xfer_rate = args[1];
709                 if (ide_ata66_check(drive, &tfargs))
710                         goto abort;
711         }
712
713         err = ide_wait_cmd(drive, args[0], args[1], args[2], args[3], argbuf);
714
715         if (!err && xfer_rate) {
716                 /* active-retuning-calls future */
717                 ide_set_xfer_rate(drive, xfer_rate);
718                 ide_driveid_update(drive);
719         }
720 abort:
721         if (copy_to_user((void __user *)arg, argbuf, argsize))
722                 err = -EFAULT;
723         if (argsize > 4)
724                 kfree(argbuf);
725         return err;
726 }
727
728 static int ide_wait_cmd_task(ide_drive_t *drive, u8 *buf)
729 {
730         struct request rq;
731
732         ide_init_drive_cmd(&rq);
733         rq.flags = REQ_DRIVE_TASK;
734         rq.buffer = buf;
735         return ide_do_drive_cmd(drive, &rq, ide_wait);
736 }
737
738 /*
739  * FIXME : this needs to map into at taskfile. <andre@linux-ide.org>
740  */
741 int ide_task_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
742 {
743         void __user *p = (void __user *)arg;
744         int err = 0;
745         u8 args[7], *argbuf = args;
746         int argsize = 7;
747
748         if (copy_from_user(args, p, 7))
749                 return -EFAULT;
750         err = ide_wait_cmd_task(drive, argbuf);
751         if (copy_to_user(p, argbuf, argsize))
752                 err = -EFAULT;
753         return err;
754 }
755
756 /*
757  * NOTICE: This is additions from IBM to provide a discrete interface,
758  * for selective taskregister access operations.  Nice JOB Klaus!!!
759  * Glad to be able to work and co-develop this with you and IBM.
760  */
761 ide_startstop_t flagged_taskfile (ide_drive_t *drive, ide_task_t *task)
762 {
763         ide_hwif_t *hwif        = HWIF(drive);
764         task_struct_t *taskfile = (task_struct_t *) task->tfRegister;
765         hob_struct_t *hobfile   = (hob_struct_t *) task->hobRegister;
766
767         if (task->data_phase == TASKFILE_MULTI_IN ||
768             task->data_phase == TASKFILE_MULTI_OUT) {
769                 if (!drive->mult_count) {
770                         printk(KERN_ERR "%s: multimode not set!\n", drive->name);
771                         return ide_stopped;
772                 }
773         }
774
775         /*
776          * (ks) Check taskfile in flags.
777          * If set, then execute as it is defined.
778          * If not set, then define default settings.
779          * The default values are:
780          *      read all taskfile registers (except data)
781          *      read the hob registers (sector, nsector, lcyl, hcyl)
782          */
783         if (task->tf_in_flags.all == 0) {
784                 task->tf_in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
785                 if (drive->addressing == 1)
786                         task->tf_in_flags.all |= (IDE_HOB_STD_IN_FLAGS  << 8);
787         }
788
789         /* ALL Command Block Executions SHALL clear nIEN, unless otherwise */
790         if (IDE_CONTROL_REG)
791                 /* clear nIEN */
792                 hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
793         SELECT_MASK(drive, 0);
794
795         if (task->tf_out_flags.b.data) {
796                 u16 data =  taskfile->data + (hobfile->data << 8);
797                 hwif->OUTW(data, IDE_DATA_REG);
798         }
799
800         /* (ks) send hob registers first */
801         if (task->tf_out_flags.b.nsector_hob)
802                 hwif->OUTB(hobfile->sector_count, IDE_NSECTOR_REG);
803         if (task->tf_out_flags.b.sector_hob)
804                 hwif->OUTB(hobfile->sector_number, IDE_SECTOR_REG);
805         if (task->tf_out_flags.b.lcyl_hob)
806                 hwif->OUTB(hobfile->low_cylinder, IDE_LCYL_REG);
807         if (task->tf_out_flags.b.hcyl_hob)
808                 hwif->OUTB(hobfile->high_cylinder, IDE_HCYL_REG);
809
810         /* (ks) Send now the standard registers */
811         if (task->tf_out_flags.b.error_feature)
812                 hwif->OUTB(taskfile->feature, IDE_FEATURE_REG);
813         /* refers to number of sectors to transfer */
814         if (task->tf_out_flags.b.nsector)
815                 hwif->OUTB(taskfile->sector_count, IDE_NSECTOR_REG);
816         /* refers to sector offset or start sector */
817         if (task->tf_out_flags.b.sector)
818                 hwif->OUTB(taskfile->sector_number, IDE_SECTOR_REG);
819         if (task->tf_out_flags.b.lcyl)
820                 hwif->OUTB(taskfile->low_cylinder, IDE_LCYL_REG);
821         if (task->tf_out_flags.b.hcyl)
822                 hwif->OUTB(taskfile->high_cylinder, IDE_HCYL_REG);
823
824         /*
825          * (ks) In the flagged taskfile approch, we will use all specified
826          * registers and the register value will not be changed, except the
827          * select bit (master/slave) in the drive_head register. We must make
828          * sure that the desired drive is selected.
829          */
830         hwif->OUTB(taskfile->device_head | drive->select.all, IDE_SELECT_REG);
831         switch(task->data_phase) {
832
833                 case TASKFILE_OUT_DMAQ:
834                 case TASKFILE_OUT_DMA:
835                 case TASKFILE_IN_DMAQ:
836                 case TASKFILE_IN_DMA:
837                         hwif->dma_setup(drive);
838                         hwif->dma_exec_cmd(drive, taskfile->command);
839                         hwif->dma_start(drive);
840                         break;
841
842                 default:
843                         if (task->handler == NULL)
844                                 return ide_stopped;
845
846                         /* Issue the command */
847                         if (task->prehandler) {
848                                 hwif->OUTBSYNC(drive, taskfile->command, IDE_COMMAND_REG);
849                                 ndelay(400);    /* FIXME */
850                                 return task->prehandler(drive, task->rq);
851                         }
852                         ide_execute_command(drive, taskfile->command, task->handler, WAIT_WORSTCASE, NULL);
853         }
854
855         return ide_started;
856 }