ide: remove ->bus_state field from ide_hwif_t
[linux-2.6] / drivers / ide / ide-taskfile.c
1 /*
2  *  Copyright (C) 2000-2002        Michael Cornwell <cornwell@acm.org>
3  *  Copyright (C) 2000-2002        Andre Hedrick <andre@linux-ide.org>
4  *  Copyright (C) 2001-2002        Klaus Smolin
5  *                                      IBM Storage Technology Division
6  *  Copyright (C) 2003-2004, 2007  Bartlomiej Zolnierkiewicz
7  *
8  *  The big the bad and the ugly.
9  */
10
11 #include <linux/types.h>
12 #include <linux/string.h>
13 #include <linux/kernel.h>
14 #include <linux/sched.h>
15 #include <linux/interrupt.h>
16 #include <linux/errno.h>
17 #include <linux/slab.h>
18 #include <linux/delay.h>
19 #include <linux/hdreg.h>
20 #include <linux/ide.h>
21 #include <linux/scatterlist.h>
22
23 #include <asm/uaccess.h>
24 #include <asm/io.h>
25
26 void ide_tf_dump(const char *s, struct ide_taskfile *tf)
27 {
28 #ifdef DEBUG
29         printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
30                 "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
31                 s, tf->feature, tf->nsect, tf->lbal,
32                 tf->lbam, tf->lbah, tf->device, tf->command);
33         printk("%s: hob: nsect 0x%02x lbal 0x%02x "
34                 "lbam 0x%02x lbah 0x%02x\n",
35                 s, tf->hob_nsect, tf->hob_lbal,
36                 tf->hob_lbam, tf->hob_lbah);
37 #endif
38 }
39
40 int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
41 {
42         ide_task_t args;
43
44         memset(&args, 0, sizeof(ide_task_t));
45         args.tf.nsect = 0x01;
46         if (drive->media == ide_disk)
47                 args.tf.command = ATA_CMD_ID_ATA;
48         else
49                 args.tf.command = ATA_CMD_ID_ATAPI;
50         args.tf_flags   = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
51         args.data_phase = TASKFILE_IN;
52         return ide_raw_taskfile(drive, &args, buf, 1);
53 }
54
55 static ide_startstop_t task_no_data_intr(ide_drive_t *);
56 static ide_startstop_t set_geometry_intr(ide_drive_t *);
57 static ide_startstop_t recal_intr(ide_drive_t *);
58 static ide_startstop_t set_multmode_intr(ide_drive_t *);
59 static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct request *);
60 static ide_startstop_t task_in_intr(ide_drive_t *);
61
62 ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
63 {
64         ide_hwif_t *hwif        = HWIF(drive);
65         struct ide_taskfile *tf = &task->tf;
66         ide_handler_t *handler = NULL;
67         const struct ide_tp_ops *tp_ops = hwif->tp_ops;
68         const struct ide_dma_ops *dma_ops = hwif->dma_ops;
69
70         if (task->data_phase == TASKFILE_MULTI_IN ||
71             task->data_phase == TASKFILE_MULTI_OUT) {
72                 if (!drive->mult_count) {
73                         printk(KERN_ERR "%s: multimode not set!\n",
74                                         drive->name);
75                         return ide_stopped;
76                 }
77         }
78
79         if (task->tf_flags & IDE_TFLAG_FLAGGED)
80                 task->tf_flags |= IDE_TFLAG_FLAGGED_SET_IN_FLAGS;
81
82         if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
83                 ide_tf_dump(drive->name, tf);
84                 tp_ops->set_irq(hwif, 1);
85                 SELECT_MASK(drive, 0);
86                 tp_ops->tf_load(drive, task);
87         }
88
89         switch (task->data_phase) {
90         case TASKFILE_MULTI_OUT:
91         case TASKFILE_OUT:
92                 tp_ops->exec_command(hwif, tf->command);
93                 ndelay(400);    /* FIXME */
94                 return pre_task_out_intr(drive, task->rq);
95         case TASKFILE_MULTI_IN:
96         case TASKFILE_IN:
97                 handler = task_in_intr;
98                 /* fall-through */
99         case TASKFILE_NO_DATA:
100                 if (handler == NULL)
101                         handler = task_no_data_intr;
102                 if (task->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) {
103                         switch (tf->command) {
104                         case ATA_CMD_INIT_DEV_PARAMS:
105                                 handler = set_geometry_intr;
106                                 break;
107                         case ATA_CMD_RESTORE:
108                                 handler = recal_intr;
109                                 break;
110                         case ATA_CMD_SET_MULTI:
111                                 handler = set_multmode_intr;
112                                 break;
113                         }
114                 }
115                 ide_execute_command(drive, tf->command, handler,
116                                     WAIT_WORSTCASE, NULL);
117                 return ide_started;
118         default:
119                 if (drive->using_dma == 0 || dma_ops->dma_setup(drive))
120                         return ide_stopped;
121                 dma_ops->dma_exec_cmd(drive, tf->command);
122                 dma_ops->dma_start(drive);
123                 return ide_started;
124         }
125 }
126 EXPORT_SYMBOL_GPL(do_rw_taskfile);
127
128 /*
129  * set_multmode_intr() is invoked on completion of a ATA_CMD_SET_MULTI cmd.
130  */
131 static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
132 {
133         ide_hwif_t *hwif = drive->hwif;
134         u8 stat;
135
136         local_irq_enable_in_hardirq();
137         stat = hwif->tp_ops->read_status(hwif);
138
139         if (OK_STAT(stat, ATA_DRDY, BAD_STAT))
140                 drive->mult_count = drive->mult_req;
141         else {
142                 drive->mult_req = drive->mult_count = 0;
143                 drive->special.b.recalibrate = 1;
144                 (void) ide_dump_status(drive, "set_multmode", stat);
145         }
146         return ide_stopped;
147 }
148
149 /*
150  * set_geometry_intr() is invoked on completion of a ATA_CMD_INIT_DEV_PARAMS cmd.
151  */
152 static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
153 {
154         ide_hwif_t *hwif = drive->hwif;
155         int retries = 5;
156         u8 stat;
157
158         local_irq_enable_in_hardirq();
159
160         while (1) {
161                 stat = hwif->tp_ops->read_status(hwif);
162                 if ((stat & ATA_BUSY) == 0 || retries-- == 0)
163                         break;
164                 udelay(10);
165         };
166
167         if (OK_STAT(stat, ATA_DRDY, BAD_STAT))
168                 return ide_stopped;
169
170         if (stat & (ATA_ERR | ATA_DRQ))
171                 return ide_error(drive, "set_geometry_intr", stat);
172
173         ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL);
174         return ide_started;
175 }
176
177 /*
178  * recal_intr() is invoked on completion of a ATA_CMD_RESTORE (recalibrate) cmd.
179  */
180 static ide_startstop_t recal_intr(ide_drive_t *drive)
181 {
182         ide_hwif_t *hwif = drive->hwif;
183         u8 stat;
184
185         local_irq_enable_in_hardirq();
186         stat = hwif->tp_ops->read_status(hwif);
187
188         if (!OK_STAT(stat, ATA_DRDY, BAD_STAT))
189                 return ide_error(drive, "recal_intr", stat);
190         return ide_stopped;
191 }
192
193 /*
194  * Handler for commands without a data phase
195  */
196 static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
197 {
198         ide_hwif_t *hwif = drive->hwif;
199         ide_task_t *args = hwif->hwgroup->rq->special;
200         u8 stat;
201
202         local_irq_enable_in_hardirq();
203         stat = hwif->tp_ops->read_status(hwif);
204
205         if (!OK_STAT(stat, ATA_DRDY, BAD_STAT))
206                 return ide_error(drive, "task_no_data_intr", stat);
207                 /* calls ide_end_drive_cmd */
208
209         if (args)
210                 ide_end_drive_cmd(drive, stat, ide_read_error(drive));
211
212         return ide_stopped;
213 }
214
215 static u8 wait_drive_not_busy(ide_drive_t *drive)
216 {
217         ide_hwif_t *hwif = drive->hwif;
218         int retries;
219         u8 stat;
220
221         /*
222          * Last sector was transfered, wait until device is ready.  This can
223          * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
224          */
225         for (retries = 0; retries < 1000; retries++) {
226                 stat = hwif->tp_ops->read_status(hwif);
227
228                 if (stat & ATA_BUSY)
229                         udelay(10);
230                 else
231                         break;
232         }
233
234         if (stat & ATA_BUSY)
235                 printk(KERN_ERR "%s: drive still BUSY!\n", drive->name);
236
237         return stat;
238 }
239
240 static void ide_pio_sector(ide_drive_t *drive, struct request *rq,
241                            unsigned int write)
242 {
243         ide_hwif_t *hwif = drive->hwif;
244         struct scatterlist *sg = hwif->sg_table;
245         struct scatterlist *cursg = hwif->cursg;
246         struct page *page;
247 #ifdef CONFIG_HIGHMEM
248         unsigned long flags;
249 #endif
250         unsigned int offset;
251         u8 *buf;
252
253         cursg = hwif->cursg;
254         if (!cursg) {
255                 cursg = sg;
256                 hwif->cursg = sg;
257         }
258
259         page = sg_page(cursg);
260         offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
261
262         /* get the current page and offset */
263         page = nth_page(page, (offset >> PAGE_SHIFT));
264         offset %= PAGE_SIZE;
265
266 #ifdef CONFIG_HIGHMEM
267         local_irq_save(flags);
268 #endif
269         buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset;
270
271         hwif->nleft--;
272         hwif->cursg_ofs++;
273
274         if ((hwif->cursg_ofs * SECTOR_SIZE) == cursg->length) {
275                 hwif->cursg = sg_next(hwif->cursg);
276                 hwif->cursg_ofs = 0;
277         }
278
279         /* do the actual data transfer */
280         if (write)
281                 hwif->tp_ops->output_data(drive, rq, buf, SECTOR_SIZE);
282         else
283                 hwif->tp_ops->input_data(drive, rq, buf, SECTOR_SIZE);
284
285         kunmap_atomic(buf, KM_BIO_SRC_IRQ);
286 #ifdef CONFIG_HIGHMEM
287         local_irq_restore(flags);
288 #endif
289 }
290
291 static void ide_pio_multi(ide_drive_t *drive, struct request *rq,
292                           unsigned int write)
293 {
294         unsigned int nsect;
295
296         nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count);
297         while (nsect--)
298                 ide_pio_sector(drive, rq, write);
299 }
300
301 static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
302                                      unsigned int write)
303 {
304         u8 saved_io_32bit = drive->io_32bit;
305
306         if (rq->bio)    /* fs request */
307                 rq->errors = 0;
308
309         if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
310                 ide_task_t *task = rq->special;
311
312                 if (task->tf_flags & IDE_TFLAG_IO_16BIT)
313                         drive->io_32bit = 0;
314         }
315
316         touch_softlockup_watchdog();
317
318         switch (drive->hwif->data_phase) {
319         case TASKFILE_MULTI_IN:
320         case TASKFILE_MULTI_OUT:
321                 ide_pio_multi(drive, rq, write);
322                 break;
323         default:
324                 ide_pio_sector(drive, rq, write);
325                 break;
326         }
327
328         drive->io_32bit = saved_io_32bit;
329 }
330
331 static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
332                                   const char *s, u8 stat)
333 {
334         if (rq->bio) {
335                 ide_hwif_t *hwif = drive->hwif;
336                 int sectors = hwif->nsect - hwif->nleft;
337
338                 switch (hwif->data_phase) {
339                 case TASKFILE_IN:
340                         if (hwif->nleft)
341                                 break;
342                         /* fall through */
343                 case TASKFILE_OUT:
344                         sectors--;
345                         break;
346                 case TASKFILE_MULTI_IN:
347                         if (hwif->nleft)
348                                 break;
349                         /* fall through */
350                 case TASKFILE_MULTI_OUT:
351                         sectors -= drive->mult_count;
352                 default:
353                         break;
354                 }
355
356                 if (sectors > 0) {
357                         ide_driver_t *drv;
358
359                         drv = *(ide_driver_t **)rq->rq_disk->private_data;
360                         drv->end_request(drive, 1, sectors);
361                 }
362         }
363         return ide_error(drive, s, stat);
364 }
365
366 void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
367 {
368         if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
369                 u8 err = ide_read_error(drive);
370
371                 ide_end_drive_cmd(drive, stat, err);
372                 return;
373         }
374
375         if (rq->rq_disk) {
376                 ide_driver_t *drv;
377
378                 drv = *(ide_driver_t **)rq->rq_disk->private_data;;
379                 drv->end_request(drive, 1, rq->nr_sectors);
380         } else
381                 ide_end_request(drive, 1, rq->nr_sectors);
382 }
383
384 /*
385  * We got an interrupt on a task_in case, but no errors and no DRQ.
386  *
387  * It might be a spurious irq (shared irq), but it might be a
388  * command that had no output.
389  */
390 static ide_startstop_t task_in_unexpected(ide_drive_t *drive, struct request *rq, u8 stat)
391 {
392         /* Command all done? */
393         if (OK_STAT(stat, ATA_DRDY, ATA_BUSY)) {
394                 task_end_request(drive, rq, stat);
395                 return ide_stopped;
396         }
397
398         /* Assume it was a spurious irq */
399         ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
400         return ide_started;
401 }
402
403 /*
404  * Handler for command with PIO data-in phase (Read/Read Multiple).
405  */
406 static ide_startstop_t task_in_intr(ide_drive_t *drive)
407 {
408         ide_hwif_t *hwif = drive->hwif;
409         struct request *rq = hwif->hwgroup->rq;
410         u8 stat = hwif->tp_ops->read_status(hwif);
411
412         /* Error? */
413         if (stat & ATA_ERR)
414                 return task_error(drive, rq, __func__, stat);
415
416         /* Didn't want any data? Odd. */
417         if ((stat & ATA_DRQ) == 0)
418                 return task_in_unexpected(drive, rq, stat);
419
420         ide_pio_datablock(drive, rq, 0);
421
422         /* Are we done? Check status and finish transfer. */
423         if (!hwif->nleft) {
424                 stat = wait_drive_not_busy(drive);
425                 if (!OK_STAT(stat, 0, BAD_STAT))
426                         return task_error(drive, rq, __func__, stat);
427                 task_end_request(drive, rq, stat);
428                 return ide_stopped;
429         }
430
431         /* Still data left to transfer. */
432         ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
433
434         return ide_started;
435 }
436
437 /*
438  * Handler for command with PIO data-out phase (Write/Write Multiple).
439  */
440 static ide_startstop_t task_out_intr (ide_drive_t *drive)
441 {
442         ide_hwif_t *hwif = drive->hwif;
443         struct request *rq = HWGROUP(drive)->rq;
444         u8 stat = hwif->tp_ops->read_status(hwif);
445
446         if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
447                 return task_error(drive, rq, __func__, stat);
448
449         /* Deal with unexpected ATA data phase. */
450         if (((stat & ATA_DRQ) == 0) ^ !hwif->nleft)
451                 return task_error(drive, rq, __func__, stat);
452
453         if (!hwif->nleft) {
454                 task_end_request(drive, rq, stat);
455                 return ide_stopped;
456         }
457
458         /* Still data left to transfer. */
459         ide_pio_datablock(drive, rq, 1);
460         ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
461
462         return ide_started;
463 }
464
465 static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct request *rq)
466 {
467         ide_startstop_t startstop;
468
469         if (ide_wait_stat(&startstop, drive, ATA_DRQ,
470                           drive->bad_wstat, WAIT_DRQ)) {
471                 printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n",
472                                 drive->name,
473                                 drive->hwif->data_phase ? "MULT" : "",
474                                 drive->addressing ? "_EXT" : "");
475                 return startstop;
476         }
477
478         if (!drive->unmask)
479                 local_irq_disable();
480
481         ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
482         ide_pio_datablock(drive, rq, 1);
483
484         return ide_started;
485 }
486
487 int ide_raw_taskfile(ide_drive_t *drive, ide_task_t *task, u8 *buf, u16 nsect)
488 {
489         struct request *rq;
490         int error;
491
492         rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
493         rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
494         rq->buffer = buf;
495
496         /*
497          * (ks) We transfer currently only whole sectors.
498          * This is suffient for now.  But, it would be great,
499          * if we would find a solution to transfer any size.
500          * To support special commands like READ LONG.
501          */
502         rq->hard_nr_sectors = rq->nr_sectors = nsect;
503         rq->hard_cur_sectors = rq->current_nr_sectors = nsect;
504
505         if (task->tf_flags & IDE_TFLAG_WRITE)
506                 rq->cmd_flags |= REQ_RW;
507
508         rq->special = task;
509         task->rq = rq;
510
511         error = blk_execute_rq(drive->queue, NULL, rq, 0);
512         blk_put_request(rq);
513
514         return error;
515 }
516
517 EXPORT_SYMBOL(ide_raw_taskfile);
518
519 int ide_no_data_taskfile(ide_drive_t *drive, ide_task_t *task)
520 {
521         task->data_phase = TASKFILE_NO_DATA;
522
523         return ide_raw_taskfile(drive, task, NULL, 0);
524 }
525 EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
526
527 #ifdef CONFIG_IDE_TASK_IOCTL
528 int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
529 {
530         ide_task_request_t      *req_task;
531         ide_task_t              args;
532         u8 *outbuf              = NULL;
533         u8 *inbuf               = NULL;
534         u8 *data_buf            = NULL;
535         int err                 = 0;
536         int tasksize            = sizeof(struct ide_task_request_s);
537         unsigned int taskin     = 0;
538         unsigned int taskout    = 0;
539         u16 nsect               = 0;
540         char __user *buf = (char __user *)arg;
541
542 //      printk("IDE Taskfile ...\n");
543
544         req_task = kzalloc(tasksize, GFP_KERNEL);
545         if (req_task == NULL) return -ENOMEM;
546         if (copy_from_user(req_task, buf, tasksize)) {
547                 kfree(req_task);
548                 return -EFAULT;
549         }
550
551         taskout = req_task->out_size;
552         taskin  = req_task->in_size;
553         
554         if (taskin > 65536 || taskout > 65536) {
555                 err = -EINVAL;
556                 goto abort;
557         }
558
559         if (taskout) {
560                 int outtotal = tasksize;
561                 outbuf = kzalloc(taskout, GFP_KERNEL);
562                 if (outbuf == NULL) {
563                         err = -ENOMEM;
564                         goto abort;
565                 }
566                 if (copy_from_user(outbuf, buf + outtotal, taskout)) {
567                         err = -EFAULT;
568                         goto abort;
569                 }
570         }
571
572         if (taskin) {
573                 int intotal = tasksize + taskout;
574                 inbuf = kzalloc(taskin, GFP_KERNEL);
575                 if (inbuf == NULL) {
576                         err = -ENOMEM;
577                         goto abort;
578                 }
579                 if (copy_from_user(inbuf, buf + intotal, taskin)) {
580                         err = -EFAULT;
581                         goto abort;
582                 }
583         }
584
585         memset(&args, 0, sizeof(ide_task_t));
586
587         memcpy(&args.tf_array[0], req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2);
588         memcpy(&args.tf_array[6], req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
589
590         args.data_phase = req_task->data_phase;
591
592         args.tf_flags = IDE_TFLAG_IO_16BIT | IDE_TFLAG_DEVICE |
593                         IDE_TFLAG_IN_TF;
594         if (drive->addressing == 1)
595                 args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_IN_HOB);
596
597         if (req_task->out_flags.all) {
598                 args.tf_flags |= IDE_TFLAG_FLAGGED;
599
600                 if (req_task->out_flags.b.data)
601                         args.tf_flags |= IDE_TFLAG_OUT_DATA;
602
603                 if (req_task->out_flags.b.nsector_hob)
604                         args.tf_flags |= IDE_TFLAG_OUT_HOB_NSECT;
605                 if (req_task->out_flags.b.sector_hob)
606                         args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAL;
607                 if (req_task->out_flags.b.lcyl_hob)
608                         args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAM;
609                 if (req_task->out_flags.b.hcyl_hob)
610                         args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAH;
611
612                 if (req_task->out_flags.b.error_feature)
613                         args.tf_flags |= IDE_TFLAG_OUT_FEATURE;
614                 if (req_task->out_flags.b.nsector)
615                         args.tf_flags |= IDE_TFLAG_OUT_NSECT;
616                 if (req_task->out_flags.b.sector)
617                         args.tf_flags |= IDE_TFLAG_OUT_LBAL;
618                 if (req_task->out_flags.b.lcyl)
619                         args.tf_flags |= IDE_TFLAG_OUT_LBAM;
620                 if (req_task->out_flags.b.hcyl)
621                         args.tf_flags |= IDE_TFLAG_OUT_LBAH;
622         } else {
623                 args.tf_flags |= IDE_TFLAG_OUT_TF;
624                 if (args.tf_flags & IDE_TFLAG_LBA48)
625                         args.tf_flags |= IDE_TFLAG_OUT_HOB;
626         }
627
628         if (req_task->in_flags.b.data)
629                 args.tf_flags |= IDE_TFLAG_IN_DATA;
630
631         switch(req_task->data_phase) {
632                 case TASKFILE_MULTI_OUT:
633                         if (!drive->mult_count) {
634                                 /* (hs): give up if multcount is not set */
635                                 printk(KERN_ERR "%s: %s Multimode Write " \
636                                         "multcount is not set\n",
637                                         drive->name, __func__);
638                                 err = -EPERM;
639                                 goto abort;
640                         }
641                         /* fall through */
642                 case TASKFILE_OUT:
643                         /* fall through */
644                 case TASKFILE_OUT_DMAQ:
645                 case TASKFILE_OUT_DMA:
646                         nsect = taskout / SECTOR_SIZE;
647                         data_buf = outbuf;
648                         break;
649                 case TASKFILE_MULTI_IN:
650                         if (!drive->mult_count) {
651                                 /* (hs): give up if multcount is not set */
652                                 printk(KERN_ERR "%s: %s Multimode Read failure " \
653                                         "multcount is not set\n",
654                                         drive->name, __func__);
655                                 err = -EPERM;
656                                 goto abort;
657                         }
658                         /* fall through */
659                 case TASKFILE_IN:
660                         /* fall through */
661                 case TASKFILE_IN_DMAQ:
662                 case TASKFILE_IN_DMA:
663                         nsect = taskin / SECTOR_SIZE;
664                         data_buf = inbuf;
665                         break;
666                 case TASKFILE_NO_DATA:
667                         break;
668                 default:
669                         err = -EFAULT;
670                         goto abort;
671         }
672
673         if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA)
674                 nsect = 0;
675         else if (!nsect) {
676                 nsect = (args.tf.hob_nsect << 8) | args.tf.nsect;
677
678                 if (!nsect) {
679                         printk(KERN_ERR "%s: in/out command without data\n",
680                                         drive->name);
681                         err = -EFAULT;
682                         goto abort;
683                 }
684         }
685
686         if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE)
687                 args.tf_flags |= IDE_TFLAG_WRITE;
688
689         err = ide_raw_taskfile(drive, &args, data_buf, nsect);
690
691         memcpy(req_task->hob_ports, &args.tf_array[0], HDIO_DRIVE_HOB_HDR_SIZE - 2);
692         memcpy(req_task->io_ports, &args.tf_array[6], HDIO_DRIVE_TASK_HDR_SIZE);
693
694         if ((args.tf_flags & IDE_TFLAG_FLAGGED_SET_IN_FLAGS) &&
695             req_task->in_flags.all == 0) {
696                 req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
697                 if (drive->addressing == 1)
698                         req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
699         }
700
701         if (copy_to_user(buf, req_task, tasksize)) {
702                 err = -EFAULT;
703                 goto abort;
704         }
705         if (taskout) {
706                 int outtotal = tasksize;
707                 if (copy_to_user(buf + outtotal, outbuf, taskout)) {
708                         err = -EFAULT;
709                         goto abort;
710                 }
711         }
712         if (taskin) {
713                 int intotal = tasksize + taskout;
714                 if (copy_to_user(buf + intotal, inbuf, taskin)) {
715                         err = -EFAULT;
716                         goto abort;
717                 }
718         }
719 abort:
720         kfree(req_task);
721         kfree(outbuf);
722         kfree(inbuf);
723
724 //      printk("IDE Taskfile ioctl ended. rc = %i\n", err);
725
726         return err;
727 }
728 #endif
729
730 int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
731 {
732         u8 *buf = NULL;
733         int bufsize = 0, err = 0;
734         u8 args[4], xfer_rate = 0;
735         ide_task_t tfargs;
736         struct ide_taskfile *tf = &tfargs.tf;
737         u16 *id = drive->id;
738
739         if (NULL == (void *) arg) {
740                 struct request *rq;
741
742                 rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
743                 rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
744                 err = blk_execute_rq(drive->queue, NULL, rq, 0);
745                 blk_put_request(rq);
746
747                 return err;
748         }
749
750         if (copy_from_user(args, (void __user *)arg, 4))
751                 return -EFAULT;
752
753         memset(&tfargs, 0, sizeof(ide_task_t));
754         tf->feature = args[2];
755         if (args[0] == ATA_CMD_SMART) {
756                 tf->nsect = args[3];
757                 tf->lbal  = args[1];
758                 tf->lbam  = 0x4f;
759                 tf->lbah  = 0xc2;
760                 tfargs.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_IN_NSECT;
761         } else {
762                 tf->nsect = args[1];
763                 tfargs.tf_flags = IDE_TFLAG_OUT_FEATURE |
764                                   IDE_TFLAG_OUT_NSECT | IDE_TFLAG_IN_NSECT;
765         }
766         tf->command = args[0];
767         tfargs.data_phase = args[3] ? TASKFILE_IN : TASKFILE_NO_DATA;
768
769         if (args[3]) {
770                 tfargs.tf_flags |= IDE_TFLAG_IO_16BIT;
771                 bufsize = SECTOR_SIZE * args[3];
772                 buf = kzalloc(bufsize, GFP_KERNEL);
773                 if (buf == NULL)
774                         return -ENOMEM;
775         }
776
777         if (tf->command == ATA_CMD_SET_FEATURES &&
778             tf->feature == SETFEATURES_XFER &&
779             tf->nsect >= XFER_SW_DMA_0 &&
780             (id[ATA_ID_UDMA_MODES] ||
781              id[ATA_ID_MWDMA_MODES] ||
782              id[ATA_ID_SWDMA_MODES])) {
783                 xfer_rate = args[1];
784                 if (tf->nsect > XFER_UDMA_2 && !eighty_ninty_three(drive)) {
785                         printk(KERN_WARNING "%s: UDMA speeds >UDMA33 cannot "
786                                             "be set\n", drive->name);
787                         goto abort;
788                 }
789         }
790
791         err = ide_raw_taskfile(drive, &tfargs, buf, args[3]);
792
793         args[0] = tf->status;
794         args[1] = tf->error;
795         args[2] = tf->nsect;
796
797         if (!err && xfer_rate) {
798                 /* active-retuning-calls future */
799                 ide_set_xfer_rate(drive, xfer_rate);
800                 ide_driveid_update(drive);
801         }
802 abort:
803         if (copy_to_user((void __user *)arg, &args, 4))
804                 err = -EFAULT;
805         if (buf) {
806                 if (copy_to_user((void __user *)(arg + 4), buf, bufsize))
807                         err = -EFAULT;
808                 kfree(buf);
809         }
810         return err;
811 }
812
813 int ide_task_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
814 {
815         void __user *p = (void __user *)arg;
816         int err = 0;
817         u8 args[7];
818         ide_task_t task;
819
820         if (copy_from_user(args, p, 7))
821                 return -EFAULT;
822
823         memset(&task, 0, sizeof(task));
824         memcpy(&task.tf_array[7], &args[1], 6);
825         task.tf.command = args[0];
826         task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
827
828         err = ide_no_data_taskfile(drive, &task);
829
830         args[0] = task.tf.command;
831         memcpy(&args[1], &task.tf_array[7], 6);
832
833         if (copy_to_user(p, args, 7))
834                 err = -EFAULT;
835
836         return err;
837 }