ide-taskfile.c revision 9a3c49be5c5f7388eefb712be9a383904140532e
1/*
2 * linux/drivers/ide/ide-taskfile.c	Version 0.38	March 05, 2003
3 *
4 *  Copyright (C) 2000-2002	Michael Cornwell <cornwell@acm.org>
5 *  Copyright (C) 2000-2002	Andre Hedrick <andre@linux-ide.org>
6 *  Copyright (C) 2001-2002	Klaus Smolin
7 *					IBM Storage Technology Division
8 *  Copyright (C) 2003-2004	Bartlomiej Zolnierkiewicz
9 *
10 *  The big the bad and the ugly.
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/string.h>
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/mm.h>
19#include <linux/sched.h>
20#include <linux/interrupt.h>
21#include <linux/major.h>
22#include <linux/errno.h>
23#include <linux/genhd.h>
24#include <linux/blkpg.h>
25#include <linux/slab.h>
26#include <linux/pci.h>
27#include <linux/delay.h>
28#include <linux/hdreg.h>
29#include <linux/ide.h>
30#include <linux/bitops.h>
31#include <linux/scatterlist.h>
32
33#include <asm/byteorder.h>
34#include <asm/irq.h>
35#include <asm/uaccess.h>
36#include <asm/io.h>
37
38static void ata_bswap_data (void *buffer, int wcount)
39{
40	u16 *p = buffer;
41
42	while (wcount--) {
43		*p = *p << 8 | *p >> 8; p++;
44		*p = *p << 8 | *p >> 8; p++;
45	}
46}
47
48static void taskfile_input_data(ide_drive_t *drive, void *buffer, u32 wcount)
49{
50	HWIF(drive)->ata_input_data(drive, buffer, wcount);
51	if (drive->bswap)
52		ata_bswap_data(buffer, wcount);
53}
54
55static void taskfile_output_data(ide_drive_t *drive, void *buffer, u32 wcount)
56{
57	if (drive->bswap) {
58		ata_bswap_data(buffer, wcount);
59		HWIF(drive)->ata_output_data(drive, buffer, wcount);
60		ata_bswap_data(buffer, wcount);
61	} else {
62		HWIF(drive)->ata_output_data(drive, buffer, wcount);
63	}
64}
65
66void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
67{
68	ide_hwif_t *hwif = drive->hwif;
69	struct ide_taskfile *tf = &task->tf;
70	u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
71
72	if (IDE_CONTROL_REG)
73		hwif->OUTB(drive->ctl, IDE_CONTROL_REG); /* clear nIEN */
74
75	if ((task->tf_flags & IDE_TFLAG_NO_SELECT_MASK) == 0)
76		SELECT_MASK(drive, 0);
77
78	if (task->tf_flags & IDE_TFLAG_LBA48) {
79		hwif->OUTB(tf->hob_feature, IDE_FEATURE_REG);
80		hwif->OUTB(tf->hob_nsect, IDE_NSECTOR_REG);
81		hwif->OUTB(tf->hob_lbal, IDE_SECTOR_REG);
82		hwif->OUTB(tf->hob_lbam, IDE_LCYL_REG);
83		hwif->OUTB(tf->hob_lbah, IDE_HCYL_REG);
84	}
85
86	hwif->OUTB(tf->feature, IDE_FEATURE_REG);
87	hwif->OUTB(tf->nsect, IDE_NSECTOR_REG);
88	hwif->OUTB(tf->lbal, IDE_SECTOR_REG);
89	hwif->OUTB(tf->lbam, IDE_LCYL_REG);
90	hwif->OUTB(tf->lbah, IDE_HCYL_REG);
91
92	hwif->OUTB((tf->device & HIHI) | drive->select.all, IDE_SELECT_REG);
93}
94
95EXPORT_SYMBOL_GPL(ide_tf_load);
96
97int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
98{
99	ide_task_t args;
100
101	memset(&args, 0, sizeof(ide_task_t));
102	args.tf.nsect = 0x01;
103	if (drive->media == ide_disk)
104		args.tf.command = WIN_IDENTIFY;
105	else
106		args.tf.command = WIN_PIDENTIFY;
107	args.command_type = IDE_DRIVE_TASK_IN;
108	args.data_phase   = TASKFILE_IN;
109	args.handler	  = &task_in_intr;
110	return ide_raw_taskfile(drive, &args, buf);
111}
112
113ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
114{
115	ide_hwif_t *hwif	= HWIF(drive);
116	struct ide_taskfile *tf = &task->tf;
117
118	if (drive->addressing == 1)
119		task->tf_flags |= IDE_TFLAG_LBA48;
120
121	ide_tf_load(drive, task);
122
123	if (task->handler != NULL) {
124		if (task->prehandler != NULL) {
125			hwif->OUTBSYNC(drive, tf->command, IDE_COMMAND_REG);
126			ndelay(400);	/* FIXME */
127			return task->prehandler(drive, task->rq);
128		}
129		ide_execute_command(drive, tf->command, task->handler, WAIT_WORSTCASE, NULL);
130		return ide_started;
131	}
132
133	if (!drive->using_dma)
134		return ide_stopped;
135
136	switch (tf->command) {
137		case WIN_WRITEDMA_ONCE:
138		case WIN_WRITEDMA:
139		case WIN_WRITEDMA_EXT:
140		case WIN_READDMA_ONCE:
141		case WIN_READDMA:
142		case WIN_READDMA_EXT:
143		case WIN_IDENTIFY_DMA:
144			if (!hwif->dma_setup(drive)) {
145				hwif->dma_exec_cmd(drive, tf->command);
146				hwif->dma_start(drive);
147				return ide_started;
148			}
149			break;
150		default:
151			if (task->handler == NULL)
152				return ide_stopped;
153	}
154
155	return ide_stopped;
156}
157
158/*
159 * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
160 */
161ide_startstop_t set_multmode_intr (ide_drive_t *drive)
162{
163	ide_hwif_t *hwif = HWIF(drive);
164	u8 stat;
165
166	if (OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) {
167		drive->mult_count = drive->mult_req;
168	} else {
169		drive->mult_req = drive->mult_count = 0;
170		drive->special.b.recalibrate = 1;
171		(void) ide_dump_status(drive, "set_multmode", stat);
172	}
173	return ide_stopped;
174}
175
176/*
177 * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
178 */
179ide_startstop_t set_geometry_intr (ide_drive_t *drive)
180{
181	ide_hwif_t *hwif = HWIF(drive);
182	int retries = 5;
183	u8 stat;
184
185	while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
186		udelay(10);
187
188	if (OK_STAT(stat, READY_STAT, BAD_STAT))
189		return ide_stopped;
190
191	if (stat & (ERR_STAT|DRQ_STAT))
192		return ide_error(drive, "set_geometry_intr", stat);
193
194	BUG_ON(HWGROUP(drive)->handler != NULL);
195	ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL);
196	return ide_started;
197}
198
199/*
200 * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
201 */
202ide_startstop_t recal_intr (ide_drive_t *drive)
203{
204	ide_hwif_t *hwif = HWIF(drive);
205	u8 stat;
206
207	if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG), READY_STAT, BAD_STAT))
208		return ide_error(drive, "recal_intr", stat);
209	return ide_stopped;
210}
211
212/*
213 * Handler for commands without a data phase
214 */
215ide_startstop_t task_no_data_intr (ide_drive_t *drive)
216{
217	ide_task_t *args	= HWGROUP(drive)->rq->special;
218	ide_hwif_t *hwif	= HWIF(drive);
219	u8 stat;
220
221	local_irq_enable_in_hardirq();
222	if (!OK_STAT(stat = hwif->INB(IDE_STATUS_REG),READY_STAT,BAD_STAT)) {
223		return ide_error(drive, "task_no_data_intr", stat);
224		/* calls ide_end_drive_cmd */
225	}
226	if (args)
227		ide_end_drive_cmd(drive, stat, hwif->INB(IDE_ERROR_REG));
228
229	return ide_stopped;
230}
231
232static u8 wait_drive_not_busy(ide_drive_t *drive)
233{
234	ide_hwif_t *hwif = HWIF(drive);
235	int retries;
236	u8 stat;
237
238	/*
239	 * Last sector was transfered, wait until drive is ready.
240	 * This can take up to 10 usec, but we will wait max 1 ms
241	 * (drive_cmd_intr() waits that long).
242	 */
243	for (retries = 0; retries < 100; retries++) {
244		if ((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT)
245			udelay(10);
246		else
247			break;
248	}
249
250	if (stat & BUSY_STAT)
251		printk(KERN_ERR "%s: drive still BUSY!\n", drive->name);
252
253	return stat;
254}
255
256static void ide_pio_sector(ide_drive_t *drive, unsigned int write)
257{
258	ide_hwif_t *hwif = drive->hwif;
259	struct scatterlist *sg = hwif->sg_table;
260	struct scatterlist *cursg = hwif->cursg;
261	struct page *page;
262#ifdef CONFIG_HIGHMEM
263	unsigned long flags;
264#endif
265	unsigned int offset;
266	u8 *buf;
267
268	cursg = hwif->cursg;
269	if (!cursg) {
270		cursg = sg;
271		hwif->cursg = sg;
272	}
273
274	page = sg_page(cursg);
275	offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
276
277	/* get the current page and offset */
278	page = nth_page(page, (offset >> PAGE_SHIFT));
279	offset %= PAGE_SIZE;
280
281#ifdef CONFIG_HIGHMEM
282	local_irq_save(flags);
283#endif
284	buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset;
285
286	hwif->nleft--;
287	hwif->cursg_ofs++;
288
289	if ((hwif->cursg_ofs * SECTOR_SIZE) == cursg->length) {
290		hwif->cursg = sg_next(hwif->cursg);
291		hwif->cursg_ofs = 0;
292	}
293
294	/* do the actual data transfer */
295	if (write)
296		taskfile_output_data(drive, buf, SECTOR_WORDS);
297	else
298		taskfile_input_data(drive, buf, SECTOR_WORDS);
299
300	kunmap_atomic(buf, KM_BIO_SRC_IRQ);
301#ifdef CONFIG_HIGHMEM
302	local_irq_restore(flags);
303#endif
304}
305
306static void ide_pio_multi(ide_drive_t *drive, unsigned int write)
307{
308	unsigned int nsect;
309
310	nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count);
311	while (nsect--)
312		ide_pio_sector(drive, write);
313}
314
315static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
316				     unsigned int write)
317{
318	if (rq->bio)	/* fs request */
319		rq->errors = 0;
320
321	touch_softlockup_watchdog();
322
323	switch (drive->hwif->data_phase) {
324	case TASKFILE_MULTI_IN:
325	case TASKFILE_MULTI_OUT:
326		ide_pio_multi(drive, write);
327		break;
328	default:
329		ide_pio_sector(drive, write);
330		break;
331	}
332}
333
334static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
335				  const char *s, u8 stat)
336{
337	if (rq->bio) {
338		ide_hwif_t *hwif = drive->hwif;
339		int sectors = hwif->nsect - hwif->nleft;
340
341		switch (hwif->data_phase) {
342		case TASKFILE_IN:
343			if (hwif->nleft)
344				break;
345			/* fall through */
346		case TASKFILE_OUT:
347			sectors--;
348			break;
349		case TASKFILE_MULTI_IN:
350			if (hwif->nleft)
351				break;
352			/* fall through */
353		case TASKFILE_MULTI_OUT:
354			sectors -= drive->mult_count;
355		default:
356			break;
357		}
358
359		if (sectors > 0) {
360			ide_driver_t *drv;
361
362			drv = *(ide_driver_t **)rq->rq_disk->private_data;
363			drv->end_request(drive, 1, sectors);
364		}
365	}
366	return ide_error(drive, s, stat);
367}
368
369static void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
370{
371	HWIF(drive)->cursg = NULL;
372
373	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
374		ide_task_t *task = rq->special;
375
376		if (task->tf_out_flags.all) {
377			u8 err = drive->hwif->INB(IDE_ERROR_REG);
378			ide_end_drive_cmd(drive, stat, err);
379			return;
380		}
381	}
382
383	if (rq->rq_disk) {
384		ide_driver_t *drv;
385
386		drv = *(ide_driver_t **)rq->rq_disk->private_data;;
387		drv->end_request(drive, 1, rq->hard_nr_sectors);
388	} else
389		ide_end_request(drive, 1, rq->hard_nr_sectors);
390}
391
392/*
393 * Handler for command with PIO data-in phase (Read/Read Multiple).
394 */
395ide_startstop_t task_in_intr (ide_drive_t *drive)
396{
397	ide_hwif_t *hwif = drive->hwif;
398	struct request *rq = HWGROUP(drive)->rq;
399	u8 stat = hwif->INB(IDE_STATUS_REG);
400
401	/* new way for dealing with premature shared PCI interrupts */
402	if (!OK_STAT(stat, DATA_READY, BAD_R_STAT)) {
403		if (stat & (ERR_STAT | DRQ_STAT))
404			return task_error(drive, rq, __FUNCTION__, stat);
405		/* No data yet, so wait for another IRQ. */
406		ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
407		return ide_started;
408	}
409
410	ide_pio_datablock(drive, rq, 0);
411
412	/* If it was the last datablock check status and finish transfer. */
413	if (!hwif->nleft) {
414		stat = wait_drive_not_busy(drive);
415		if (!OK_STAT(stat, 0, BAD_R_STAT))
416			return task_error(drive, rq, __FUNCTION__, stat);
417		task_end_request(drive, rq, stat);
418		return ide_stopped;
419	}
420
421	/* Still data left to transfer. */
422	ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
423
424	return ide_started;
425}
426EXPORT_SYMBOL(task_in_intr);
427
428/*
429 * Handler for command with PIO data-out phase (Write/Write Multiple).
430 */
431static ide_startstop_t task_out_intr (ide_drive_t *drive)
432{
433	ide_hwif_t *hwif = drive->hwif;
434	struct request *rq = HWGROUP(drive)->rq;
435	u8 stat = hwif->INB(IDE_STATUS_REG);
436
437	if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
438		return task_error(drive, rq, __FUNCTION__, stat);
439
440	/* Deal with unexpected ATA data phase. */
441	if (((stat & DRQ_STAT) == 0) ^ !hwif->nleft)
442		return task_error(drive, rq, __FUNCTION__, stat);
443
444	if (!hwif->nleft) {
445		task_end_request(drive, rq, stat);
446		return ide_stopped;
447	}
448
449	/* Still data left to transfer. */
450	ide_pio_datablock(drive, rq, 1);
451	ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
452
453	return ide_started;
454}
455
456ide_startstop_t pre_task_out_intr (ide_drive_t *drive, struct request *rq)
457{
458	ide_startstop_t startstop;
459
460	if (ide_wait_stat(&startstop, drive, DATA_READY,
461			  drive->bad_wstat, WAIT_DRQ)) {
462		printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n",
463				drive->name,
464				drive->hwif->data_phase ? "MULT" : "",
465				drive->addressing ? "_EXT" : "");
466		return startstop;
467	}
468
469	if (!drive->unmask)
470		local_irq_disable();
471
472	ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
473	ide_pio_datablock(drive, rq, 1);
474
475	return ide_started;
476}
477EXPORT_SYMBOL(pre_task_out_intr);
478
479static int ide_diag_taskfile(ide_drive_t *drive, ide_task_t *args, unsigned long data_size, u8 *buf)
480{
481	struct request rq;
482
483	memset(&rq, 0, sizeof(rq));
484	rq.ref_count = 1;
485	rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
486	rq.buffer = buf;
487
488	/*
489	 * (ks) We transfer currently only whole sectors.
490	 * This is suffient for now.  But, it would be great,
491	 * if we would find a solution to transfer any size.
492	 * To support special commands like READ LONG.
493	 */
494	if (args->command_type != IDE_DRIVE_TASK_NO_DATA) {
495		if (data_size == 0)
496			rq.nr_sectors = (args->tf.hob_nsect << 8) | args->tf.nsect;
497		else
498			rq.nr_sectors = data_size / SECTOR_SIZE;
499
500		if (!rq.nr_sectors) {
501			printk(KERN_ERR "%s: in/out command without data\n",
502					drive->name);
503			return -EFAULT;
504		}
505
506		rq.hard_nr_sectors = rq.nr_sectors;
507		rq.hard_cur_sectors = rq.current_nr_sectors = rq.nr_sectors;
508
509		if (args->command_type == IDE_DRIVE_TASK_RAW_WRITE)
510			rq.cmd_flags |= REQ_RW;
511	}
512
513	rq.special = args;
514	args->rq = &rq;
515	return ide_do_drive_cmd(drive, &rq, ide_wait);
516}
517
518int ide_raw_taskfile (ide_drive_t *drive, ide_task_t *args, u8 *buf)
519{
520	return ide_diag_taskfile(drive, args, 0, buf);
521}
522
523EXPORT_SYMBOL(ide_raw_taskfile);
524
525int ide_no_data_taskfile(ide_drive_t *drive, ide_task_t *task)
526{
527	task->command_type = IDE_DRIVE_TASK_NO_DATA;
528	task->data_phase   = TASKFILE_NO_DATA;
529	task->handler      = task_no_data_intr;
530
531	return ide_raw_taskfile(drive, task, NULL);
532}
533EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
534
535#ifdef CONFIG_IDE_TASK_IOCTL
536int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
537{
538	ide_task_request_t	*req_task;
539	ide_task_t		args;
540	u8 *outbuf		= NULL;
541	u8 *inbuf		= NULL;
542	int err			= 0;
543	int tasksize		= sizeof(struct ide_task_request_s);
544	unsigned int taskin	= 0;
545	unsigned int taskout	= 0;
546	u8 io_32bit		= drive->io_32bit;
547	char __user *buf = (char __user *)arg;
548
549//	printk("IDE Taskfile ...\n");
550
551	req_task = kzalloc(tasksize, GFP_KERNEL);
552	if (req_task == NULL) return -ENOMEM;
553	if (copy_from_user(req_task, buf, tasksize)) {
554		kfree(req_task);
555		return -EFAULT;
556	}
557
558	taskout = req_task->out_size;
559	taskin  = req_task->in_size;
560
561	if (taskin > 65536 || taskout > 65536) {
562		err = -EINVAL;
563		goto abort;
564	}
565
566	if (taskout) {
567		int outtotal = tasksize;
568		outbuf = kzalloc(taskout, GFP_KERNEL);
569		if (outbuf == NULL) {
570			err = -ENOMEM;
571			goto abort;
572		}
573		if (copy_from_user(outbuf, buf + outtotal, taskout)) {
574			err = -EFAULT;
575			goto abort;
576		}
577	}
578
579	if (taskin) {
580		int intotal = tasksize + taskout;
581		inbuf = kzalloc(taskin, GFP_KERNEL);
582		if (inbuf == NULL) {
583			err = -ENOMEM;
584			goto abort;
585		}
586		if (copy_from_user(inbuf, buf + intotal, taskin)) {
587			err = -EFAULT;
588			goto abort;
589		}
590	}
591
592	memset(&args, 0, sizeof(ide_task_t));
593
594	memcpy(&args.tf_array[0], req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2);
595	memcpy(&args.tf_array[6], req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
596	args.tf_in_flags  = req_task->in_flags;
597	args.tf_out_flags = req_task->out_flags;
598	args.data_phase   = req_task->data_phase;
599	args.command_type = req_task->req_cmd;
600
601	drive->io_32bit = 0;
602	switch(req_task->data_phase) {
603		case TASKFILE_OUT_DMAQ:
604		case TASKFILE_OUT_DMA:
605			err = ide_diag_taskfile(drive, &args, taskout, outbuf);
606			break;
607		case TASKFILE_IN_DMAQ:
608		case TASKFILE_IN_DMA:
609			err = ide_diag_taskfile(drive, &args, taskin, inbuf);
610			break;
611		case TASKFILE_MULTI_OUT:
612			if (!drive->mult_count) {
613				/* (hs): give up if multcount is not set */
614				printk(KERN_ERR "%s: %s Multimode Write " \
615					"multcount is not set\n",
616					drive->name, __FUNCTION__);
617				err = -EPERM;
618				goto abort;
619			}
620			/* fall through */
621		case TASKFILE_OUT:
622			args.prehandler = &pre_task_out_intr;
623			args.handler = &task_out_intr;
624			err = ide_diag_taskfile(drive, &args, taskout, outbuf);
625			break;
626		case TASKFILE_MULTI_IN:
627			if (!drive->mult_count) {
628				/* (hs): give up if multcount is not set */
629				printk(KERN_ERR "%s: %s Multimode Read failure " \
630					"multcount is not set\n",
631					drive->name, __FUNCTION__);
632				err = -EPERM;
633				goto abort;
634			}
635			/* fall through */
636		case TASKFILE_IN:
637			args.handler = &task_in_intr;
638			err = ide_diag_taskfile(drive, &args, taskin, inbuf);
639			break;
640		case TASKFILE_NO_DATA:
641			args.handler = &task_no_data_intr;
642			err = ide_diag_taskfile(drive, &args, 0, NULL);
643			break;
644		default:
645			err = -EFAULT;
646			goto abort;
647	}
648
649	memcpy(req_task->hob_ports, &args.tf_array[0], HDIO_DRIVE_HOB_HDR_SIZE - 2);
650	memcpy(req_task->io_ports, &args.tf_array[6], HDIO_DRIVE_TASK_HDR_SIZE);
651	req_task->in_flags  = args.tf_in_flags;
652	req_task->out_flags = args.tf_out_flags;
653
654	if (copy_to_user(buf, req_task, tasksize)) {
655		err = -EFAULT;
656		goto abort;
657	}
658	if (taskout) {
659		int outtotal = tasksize;
660		if (copy_to_user(buf + outtotal, outbuf, taskout)) {
661			err = -EFAULT;
662			goto abort;
663		}
664	}
665	if (taskin) {
666		int intotal = tasksize + taskout;
667		if (copy_to_user(buf + intotal, inbuf, taskin)) {
668			err = -EFAULT;
669			goto abort;
670		}
671	}
672abort:
673	kfree(req_task);
674	kfree(outbuf);
675	kfree(inbuf);
676
677//	printk("IDE Taskfile ioctl ended. rc = %i\n", err);
678
679	drive->io_32bit = io_32bit;
680
681	return err;
682}
683#endif
684
685int ide_wait_cmd (ide_drive_t *drive, u8 cmd, u8 nsect, u8 feature, u8 sectors, u8 *buf)
686{
687	struct request rq;
688	u8 buffer[4];
689
690	if (!buf)
691		buf = buffer;
692	memset(buf, 0, 4 + SECTOR_WORDS * 4 * sectors);
693	ide_init_drive_cmd(&rq);
694	rq.buffer = buf;
695	*buf++ = cmd;
696	*buf++ = nsect;
697	*buf++ = feature;
698	*buf++ = sectors;
699	return ide_do_drive_cmd(drive, &rq, ide_wait);
700}
701
702int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
703{
704	int err = 0;
705	u8 args[4], *argbuf = args;
706	u8 xfer_rate = 0;
707	int argsize = 4;
708	ide_task_t tfargs;
709	struct ide_taskfile *tf = &tfargs.tf;
710
711	if (NULL == (void *) arg) {
712		struct request rq;
713		ide_init_drive_cmd(&rq);
714		return ide_do_drive_cmd(drive, &rq, ide_wait);
715	}
716
717	if (copy_from_user(args, (void __user *)arg, 4))
718		return -EFAULT;
719
720	memset(&tfargs, 0, sizeof(ide_task_t));
721	tf->feature = args[2];
722	tf->nsect   = args[3];
723	tf->lbal    = args[1];
724	tf->command = args[0];
725
726	if (args[3]) {
727		argsize = 4 + (SECTOR_WORDS * 4 * args[3]);
728		argbuf = kzalloc(argsize, GFP_KERNEL);
729		if (argbuf == NULL)
730			return -ENOMEM;
731	}
732	if (set_transfer(drive, &tfargs)) {
733		xfer_rate = args[1];
734		if (ide_ata66_check(drive, &tfargs))
735			goto abort;
736	}
737
738	err = ide_wait_cmd(drive, args[0], args[1], args[2], args[3], argbuf);
739
740	if (!err && xfer_rate) {
741		/* active-retuning-calls future */
742		ide_set_xfer_rate(drive, xfer_rate);
743		ide_driveid_update(drive);
744	}
745abort:
746	if (copy_to_user((void __user *)arg, argbuf, argsize))
747		err = -EFAULT;
748	if (argsize > 4)
749		kfree(argbuf);
750	return err;
751}
752
753static int ide_wait_cmd_task(ide_drive_t *drive, u8 *buf)
754{
755	struct request rq;
756
757	ide_init_drive_cmd(&rq);
758	rq.cmd_type = REQ_TYPE_ATA_TASK;
759	rq.buffer = buf;
760	return ide_do_drive_cmd(drive, &rq, ide_wait);
761}
762
763int ide_task_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
764{
765	void __user *p = (void __user *)arg;
766	int err = 0;
767	u8 args[7], *argbuf = args;
768	int argsize = 7;
769
770	if (copy_from_user(args, p, 7))
771		return -EFAULT;
772	err = ide_wait_cmd_task(drive, argbuf);
773	if (copy_to_user(p, argbuf, argsize))
774		err = -EFAULT;
775	return err;
776}
777
778/*
779 * NOTICE: This is additions from IBM to provide a discrete interface,
780 * for selective taskregister access operations.  Nice JOB Klaus!!!
781 * Glad to be able to work and co-develop this with you and IBM.
782 */
783ide_startstop_t flagged_taskfile (ide_drive_t *drive, ide_task_t *task)
784{
785	ide_hwif_t *hwif	= HWIF(drive);
786	struct ide_taskfile *tf = &task->tf;
787
788	if (task->data_phase == TASKFILE_MULTI_IN ||
789	    task->data_phase == TASKFILE_MULTI_OUT) {
790		if (!drive->mult_count) {
791			printk(KERN_ERR "%s: multimode not set!\n", drive->name);
792			return ide_stopped;
793		}
794	}
795
796	/*
797	 * (ks) Check taskfile in flags.
798	 * If set, then execute as it is defined.
799	 * If not set, then define default settings.
800	 * The default values are:
801	 *	read all taskfile registers (except data)
802	 *	read the hob registers (sector, nsector, lcyl, hcyl)
803	 */
804	if (task->tf_in_flags.all == 0) {
805		task->tf_in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
806		if (drive->addressing == 1)
807			task->tf_in_flags.all |= (IDE_HOB_STD_IN_FLAGS  << 8);
808        }
809
810	/* ALL Command Block Executions SHALL clear nIEN, unless otherwise */
811	if (IDE_CONTROL_REG)
812		/* clear nIEN */
813		hwif->OUTB(drive->ctl, IDE_CONTROL_REG);
814	SELECT_MASK(drive, 0);
815
816	if (task->tf_out_flags.b.data)
817		hwif->OUTW((tf->hob_data << 8) | tf->data, IDE_DATA_REG);
818
819	/* (ks) send hob registers first */
820	if (task->tf_out_flags.b.nsector_hob)
821		hwif->OUTB(tf->hob_nsect, IDE_NSECTOR_REG);
822	if (task->tf_out_flags.b.sector_hob)
823		hwif->OUTB(tf->hob_lbal, IDE_SECTOR_REG);
824	if (task->tf_out_flags.b.lcyl_hob)
825		hwif->OUTB(tf->hob_lbam, IDE_LCYL_REG);
826	if (task->tf_out_flags.b.hcyl_hob)
827		hwif->OUTB(tf->hob_lbah, IDE_HCYL_REG);
828
829	/* (ks) Send now the standard registers */
830	if (task->tf_out_flags.b.error_feature)
831		hwif->OUTB(tf->feature, IDE_FEATURE_REG);
832	/* refers to number of sectors to transfer */
833	if (task->tf_out_flags.b.nsector)
834		hwif->OUTB(tf->nsect, IDE_NSECTOR_REG);
835	/* refers to sector offset or start sector */
836	if (task->tf_out_flags.b.sector)
837		hwif->OUTB(tf->lbal, IDE_SECTOR_REG);
838	if (task->tf_out_flags.b.lcyl)
839		hwif->OUTB(tf->lbam, IDE_LCYL_REG);
840	if (task->tf_out_flags.b.hcyl)
841		hwif->OUTB(tf->lbah, IDE_HCYL_REG);
842
843        /*
844	 * (ks) In the flagged taskfile approch, we will use all specified
845	 * registers and the register value will not be changed, except the
846	 * select bit (master/slave) in the drive_head register. We must make
847	 * sure that the desired drive is selected.
848	 */
849	hwif->OUTB(tf->device | drive->select.all, IDE_SELECT_REG);
850	switch(task->data_phase) {
851
852   	        case TASKFILE_OUT_DMAQ:
853		case TASKFILE_OUT_DMA:
854		case TASKFILE_IN_DMAQ:
855		case TASKFILE_IN_DMA:
856			if (!drive->using_dma)
857				break;
858
859			if (!hwif->dma_setup(drive)) {
860				hwif->dma_exec_cmd(drive, tf->command);
861				hwif->dma_start(drive);
862				return ide_started;
863			}
864			break;
865
866	        default:
867 			if (task->handler == NULL)
868				return ide_stopped;
869
870			/* Issue the command */
871			if (task->prehandler) {
872				hwif->OUTBSYNC(drive, tf->command, IDE_COMMAND_REG);
873				ndelay(400);	/* FIXME */
874				return task->prehandler(drive, task->rq);
875			}
876			ide_execute_command(drive, tf->command, task->handler, WAIT_WORSTCASE, NULL);
877			return ide_started;
878	}
879
880	return ide_stopped;
881}
882