ide-taskfile.c revision 089c5c7e0089c3461545be936bcd236cbf16b79a
1/*
2 *  Copyright (C) 2000-2002	   Michael Cornwell <cornwell@acm.org>
3 *  Copyright (C) 2000-2002	   Andre Hedrick <andre@linux-ide.org>
4 *  Copyright (C) 2001-2002	   Klaus Smolin
5 *					IBM Storage Technology Division
6 *  Copyright (C) 2003-2004, 2007  Bartlomiej Zolnierkiewicz
7 *
8 *  The big the bad and the ugly.
9 */
10
11#include <linux/module.h>
12#include <linux/types.h>
13#include <linux/string.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/mm.h>
17#include <linux/sched.h>
18#include <linux/interrupt.h>
19#include <linux/major.h>
20#include <linux/errno.h>
21#include <linux/genhd.h>
22#include <linux/blkpg.h>
23#include <linux/slab.h>
24#include <linux/pci.h>
25#include <linux/delay.h>
26#include <linux/hdreg.h>
27#include <linux/ide.h>
28#include <linux/bitops.h>
29#include <linux/scatterlist.h>
30
31#include <asm/byteorder.h>
32#include <asm/irq.h>
33#include <asm/uaccess.h>
34#include <asm/io.h>
35
36void ide_tf_dump(const char *s, struct ide_taskfile *tf)
37{
38#ifdef DEBUG
39	printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
40		"lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
41		s, tf->feature, tf->nsect, tf->lbal,
42		tf->lbam, tf->lbah, tf->device, tf->command);
43	printk("%s: hob: nsect 0x%02x lbal 0x%02x "
44		"lbam 0x%02x lbah 0x%02x\n",
45		s, tf->hob_nsect, tf->hob_lbal,
46		tf->hob_lbam, tf->hob_lbah);
47#endif
48}
49
50void ide_tf_load(ide_drive_t *drive, ide_task_t *task)
51{
52	ide_hwif_t *hwif = drive->hwif;
53	struct ide_io_ports *io_ports = &hwif->io_ports;
54	struct ide_taskfile *tf = &task->tf;
55	u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
56
57	if (task->tf_flags & IDE_TFLAG_FLAGGED)
58		HIHI = 0xFF;
59
60	ide_set_irq(drive, 1);
61
62	if ((task->tf_flags & IDE_TFLAG_NO_SELECT_MASK) == 0)
63		SELECT_MASK(drive, 0);
64
65	if (task->tf_flags & IDE_TFLAG_OUT_DATA)
66		hwif->OUTW((tf->hob_data << 8) | tf->data, io_ports->data_addr);
67
68	if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
69		hwif->OUTB(tf->hob_feature, io_ports->feature_addr);
70	if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
71		hwif->OUTB(tf->hob_nsect, io_ports->nsect_addr);
72	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
73		hwif->OUTB(tf->hob_lbal, io_ports->lbal_addr);
74	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
75		hwif->OUTB(tf->hob_lbam, io_ports->lbam_addr);
76	if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
77		hwif->OUTB(tf->hob_lbah, io_ports->lbah_addr);
78
79	if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
80		hwif->OUTB(tf->feature, io_ports->feature_addr);
81	if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
82		hwif->OUTB(tf->nsect, io_ports->nsect_addr);
83	if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
84		hwif->OUTB(tf->lbal, io_ports->lbal_addr);
85	if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
86		hwif->OUTB(tf->lbam, io_ports->lbam_addr);
87	if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
88		hwif->OUTB(tf->lbah, io_ports->lbah_addr);
89
90	if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
91		hwif->OUTB((tf->device & HIHI) | drive->select.all,
92			   io_ports->device_addr);
93}
94
95int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
96{
97	ide_task_t args;
98
99	memset(&args, 0, sizeof(ide_task_t));
100	args.tf.nsect = 0x01;
101	if (drive->media == ide_disk)
102		args.tf.command = WIN_IDENTIFY;
103	else
104		args.tf.command = WIN_PIDENTIFY;
105	args.tf_flags	= IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
106	args.data_phase	= TASKFILE_IN;
107	return ide_raw_taskfile(drive, &args, buf, 1);
108}
109
110static int inline task_dma_ok(ide_task_t *task)
111{
112	if (blk_fs_request(task->rq) || (task->tf_flags & IDE_TFLAG_FLAGGED))
113		return 1;
114
115	switch (task->tf.command) {
116		case WIN_WRITEDMA_ONCE:
117		case WIN_WRITEDMA:
118		case WIN_WRITEDMA_EXT:
119		case WIN_READDMA_ONCE:
120		case WIN_READDMA:
121		case WIN_READDMA_EXT:
122		case WIN_IDENTIFY_DMA:
123			return 1;
124	}
125
126	return 0;
127}
128
129static ide_startstop_t task_no_data_intr(ide_drive_t *);
130static ide_startstop_t set_geometry_intr(ide_drive_t *);
131static ide_startstop_t recal_intr(ide_drive_t *);
132static ide_startstop_t set_multmode_intr(ide_drive_t *);
133static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct request *);
134static ide_startstop_t task_in_intr(ide_drive_t *);
135
136ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
137{
138	ide_hwif_t *hwif	= HWIF(drive);
139	struct ide_taskfile *tf = &task->tf;
140	ide_handler_t *handler = NULL;
141	const struct ide_dma_ops *dma_ops = hwif->dma_ops;
142
143	if (task->data_phase == TASKFILE_MULTI_IN ||
144	    task->data_phase == TASKFILE_MULTI_OUT) {
145		if (!drive->mult_count) {
146			printk(KERN_ERR "%s: multimode not set!\n",
147					drive->name);
148			return ide_stopped;
149		}
150	}
151
152	if (task->tf_flags & IDE_TFLAG_FLAGGED)
153		task->tf_flags |= IDE_TFLAG_FLAGGED_SET_IN_FLAGS;
154
155	if ((task->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
156		ide_tf_dump(drive->name, tf);
157		ide_tf_load(drive, task);
158	}
159
160	switch (task->data_phase) {
161	case TASKFILE_MULTI_OUT:
162	case TASKFILE_OUT:
163		hwif->OUTBSYNC(drive, tf->command, hwif->io_ports.command_addr);
164		ndelay(400);	/* FIXME */
165		return pre_task_out_intr(drive, task->rq);
166	case TASKFILE_MULTI_IN:
167	case TASKFILE_IN:
168		handler = task_in_intr;
169		/* fall-through */
170	case TASKFILE_NO_DATA:
171		if (handler == NULL)
172			handler = task_no_data_intr;
173		/* WIN_{SPECIFY,RESTORE,SETMULT} use custom handlers */
174		if (task->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) {
175			switch (tf->command) {
176			case WIN_SPECIFY: handler = set_geometry_intr;	break;
177			case WIN_RESTORE: handler = recal_intr;		break;
178			case WIN_SETMULT: handler = set_multmode_intr;	break;
179			}
180		}
181		ide_execute_command(drive, tf->command, handler,
182				    WAIT_WORSTCASE, NULL);
183		return ide_started;
184	default:
185		if (task_dma_ok(task) == 0 || drive->using_dma == 0 ||
186		    dma_ops->dma_setup(drive))
187			return ide_stopped;
188		dma_ops->dma_exec_cmd(drive, tf->command);
189		dma_ops->dma_start(drive);
190		return ide_started;
191	}
192}
193EXPORT_SYMBOL_GPL(do_rw_taskfile);
194
195/*
196 * set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
197 */
198static ide_startstop_t set_multmode_intr(ide_drive_t *drive)
199{
200	u8 stat = ide_read_status(drive);
201
202	if (OK_STAT(stat, READY_STAT, BAD_STAT))
203		drive->mult_count = drive->mult_req;
204	else {
205		drive->mult_req = drive->mult_count = 0;
206		drive->special.b.recalibrate = 1;
207		(void) ide_dump_status(drive, "set_multmode", stat);
208	}
209	return ide_stopped;
210}
211
212/*
213 * set_geometry_intr() is invoked on completion of a WIN_SPECIFY cmd.
214 */
215static ide_startstop_t set_geometry_intr(ide_drive_t *drive)
216{
217	int retries = 5;
218	u8 stat;
219
220	while (((stat = ide_read_status(drive)) & BUSY_STAT) && retries--)
221		udelay(10);
222
223	if (OK_STAT(stat, READY_STAT, BAD_STAT))
224		return ide_stopped;
225
226	if (stat & (ERR_STAT|DRQ_STAT))
227		return ide_error(drive, "set_geometry_intr", stat);
228
229	BUG_ON(HWGROUP(drive)->handler != NULL);
230	ide_set_handler(drive, &set_geometry_intr, WAIT_WORSTCASE, NULL);
231	return ide_started;
232}
233
234/*
235 * recal_intr() is invoked on completion of a WIN_RESTORE (recalibrate) cmd.
236 */
237static ide_startstop_t recal_intr(ide_drive_t *drive)
238{
239	u8 stat = ide_read_status(drive);
240
241	if (!OK_STAT(stat, READY_STAT, BAD_STAT))
242		return ide_error(drive, "recal_intr", stat);
243	return ide_stopped;
244}
245
246/*
247 * Handler for commands without a data phase
248 */
249static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
250{
251	ide_task_t *args	= HWGROUP(drive)->rq->special;
252	u8 stat;
253
254	local_irq_enable_in_hardirq();
255	stat = ide_read_status(drive);
256
257	if (!OK_STAT(stat, READY_STAT, BAD_STAT))
258		return ide_error(drive, "task_no_data_intr", stat);
259		/* calls ide_end_drive_cmd */
260
261	if (args)
262		ide_end_drive_cmd(drive, stat, ide_read_error(drive));
263
264	return ide_stopped;
265}
266
267static u8 wait_drive_not_busy(ide_drive_t *drive)
268{
269	int retries;
270	u8 stat;
271
272	/*
273	 * Last sector was transfered, wait until drive is ready.
274	 * This can take up to 10 usec, but we will wait max 1 ms.
275	 */
276	for (retries = 0; retries < 100; retries++) {
277		stat = ide_read_status(drive);
278
279		if (stat & BUSY_STAT)
280			udelay(10);
281		else
282			break;
283	}
284
285	if (stat & BUSY_STAT)
286		printk(KERN_ERR "%s: drive still BUSY!\n", drive->name);
287
288	return stat;
289}
290
291static void ide_pio_sector(ide_drive_t *drive, struct request *rq,
292			   unsigned int write)
293{
294	ide_hwif_t *hwif = drive->hwif;
295	struct scatterlist *sg = hwif->sg_table;
296	struct scatterlist *cursg = hwif->cursg;
297	struct page *page;
298#ifdef CONFIG_HIGHMEM
299	unsigned long flags;
300#endif
301	unsigned int offset;
302	u8 *buf;
303
304	cursg = hwif->cursg;
305	if (!cursg) {
306		cursg = sg;
307		hwif->cursg = sg;
308	}
309
310	page = sg_page(cursg);
311	offset = cursg->offset + hwif->cursg_ofs * SECTOR_SIZE;
312
313	/* get the current page and offset */
314	page = nth_page(page, (offset >> PAGE_SHIFT));
315	offset %= PAGE_SIZE;
316
317#ifdef CONFIG_HIGHMEM
318	local_irq_save(flags);
319#endif
320	buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset;
321
322	hwif->nleft--;
323	hwif->cursg_ofs++;
324
325	if ((hwif->cursg_ofs * SECTOR_SIZE) == cursg->length) {
326		hwif->cursg = sg_next(hwif->cursg);
327		hwif->cursg_ofs = 0;
328	}
329
330	/* do the actual data transfer */
331	if (write)
332		hwif->output_data(drive, rq, buf, SECTOR_SIZE);
333	else
334		hwif->input_data(drive, rq, buf, SECTOR_SIZE);
335
336	kunmap_atomic(buf, KM_BIO_SRC_IRQ);
337#ifdef CONFIG_HIGHMEM
338	local_irq_restore(flags);
339#endif
340}
341
342static void ide_pio_multi(ide_drive_t *drive, struct request *rq,
343			  unsigned int write)
344{
345	unsigned int nsect;
346
347	nsect = min_t(unsigned int, drive->hwif->nleft, drive->mult_count);
348	while (nsect--)
349		ide_pio_sector(drive, rq, write);
350}
351
352static void ide_pio_datablock(ide_drive_t *drive, struct request *rq,
353				     unsigned int write)
354{
355	u8 saved_io_32bit = drive->io_32bit;
356
357	if (rq->bio)	/* fs request */
358		rq->errors = 0;
359
360	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
361		ide_task_t *task = rq->special;
362
363		if (task->tf_flags & IDE_TFLAG_IO_16BIT)
364			drive->io_32bit = 0;
365	}
366
367	touch_softlockup_watchdog();
368
369	switch (drive->hwif->data_phase) {
370	case TASKFILE_MULTI_IN:
371	case TASKFILE_MULTI_OUT:
372		ide_pio_multi(drive, rq, write);
373		break;
374	default:
375		ide_pio_sector(drive, rq, write);
376		break;
377	}
378
379	drive->io_32bit = saved_io_32bit;
380}
381
382static ide_startstop_t task_error(ide_drive_t *drive, struct request *rq,
383				  const char *s, u8 stat)
384{
385	if (rq->bio) {
386		ide_hwif_t *hwif = drive->hwif;
387		int sectors = hwif->nsect - hwif->nleft;
388
389		switch (hwif->data_phase) {
390		case TASKFILE_IN:
391			if (hwif->nleft)
392				break;
393			/* fall through */
394		case TASKFILE_OUT:
395			sectors--;
396			break;
397		case TASKFILE_MULTI_IN:
398			if (hwif->nleft)
399				break;
400			/* fall through */
401		case TASKFILE_MULTI_OUT:
402			sectors -= drive->mult_count;
403		default:
404			break;
405		}
406
407		if (sectors > 0) {
408			ide_driver_t *drv;
409
410			drv = *(ide_driver_t **)rq->rq_disk->private_data;
411			drv->end_request(drive, 1, sectors);
412		}
413	}
414	return ide_error(drive, s, stat);
415}
416
417void task_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
418{
419	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
420		u8 err = ide_read_error(drive);
421
422		ide_end_drive_cmd(drive, stat, err);
423		return;
424	}
425
426	if (rq->rq_disk) {
427		ide_driver_t *drv;
428
429		drv = *(ide_driver_t **)rq->rq_disk->private_data;;
430		drv->end_request(drive, 1, rq->nr_sectors);
431	} else
432		ide_end_request(drive, 1, rq->nr_sectors);
433}
434
435/*
436 * We got an interrupt on a task_in case, but no errors and no DRQ.
437 *
438 * It might be a spurious irq (shared irq), but it might be a
439 * command that had no output.
440 */
441static ide_startstop_t task_in_unexpected(ide_drive_t *drive, struct request *rq, u8 stat)
442{
443	/* Command all done? */
444	if (OK_STAT(stat, READY_STAT, BUSY_STAT)) {
445		task_end_request(drive, rq, stat);
446		return ide_stopped;
447	}
448
449	/* Assume it was a spurious irq */
450	ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
451	return ide_started;
452}
453
454/*
455 * Handler for command with PIO data-in phase (Read/Read Multiple).
456 */
457static ide_startstop_t task_in_intr(ide_drive_t *drive)
458{
459	ide_hwif_t *hwif = drive->hwif;
460	struct request *rq = HWGROUP(drive)->rq;
461	u8 stat = ide_read_status(drive);
462
463	/* Error? */
464	if (stat & ERR_STAT)
465		return task_error(drive, rq, __func__, stat);
466
467	/* Didn't want any data? Odd. */
468	if (!(stat & DRQ_STAT))
469		return task_in_unexpected(drive, rq, stat);
470
471	ide_pio_datablock(drive, rq, 0);
472
473	/* Are we done? Check status and finish transfer. */
474	if (!hwif->nleft) {
475		stat = wait_drive_not_busy(drive);
476		if (!OK_STAT(stat, 0, BAD_STAT))
477			return task_error(drive, rq, __func__, stat);
478		task_end_request(drive, rq, stat);
479		return ide_stopped;
480	}
481
482	/* Still data left to transfer. */
483	ide_set_handler(drive, &task_in_intr, WAIT_WORSTCASE, NULL);
484
485	return ide_started;
486}
487
488/*
489 * Handler for command with PIO data-out phase (Write/Write Multiple).
490 */
491static ide_startstop_t task_out_intr (ide_drive_t *drive)
492{
493	ide_hwif_t *hwif = drive->hwif;
494	struct request *rq = HWGROUP(drive)->rq;
495	u8 stat = ide_read_status(drive);
496
497	if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
498		return task_error(drive, rq, __func__, stat);
499
500	/* Deal with unexpected ATA data phase. */
501	if (((stat & DRQ_STAT) == 0) ^ !hwif->nleft)
502		return task_error(drive, rq, __func__, stat);
503
504	if (!hwif->nleft) {
505		task_end_request(drive, rq, stat);
506		return ide_stopped;
507	}
508
509	/* Still data left to transfer. */
510	ide_pio_datablock(drive, rq, 1);
511	ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
512
513	return ide_started;
514}
515
516static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, struct request *rq)
517{
518	ide_startstop_t startstop;
519
520	if (ide_wait_stat(&startstop, drive, DRQ_STAT,
521			  drive->bad_wstat, WAIT_DRQ)) {
522		printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n",
523				drive->name,
524				drive->hwif->data_phase ? "MULT" : "",
525				drive->addressing ? "_EXT" : "");
526		return startstop;
527	}
528
529	if (!drive->unmask)
530		local_irq_disable();
531
532	ide_set_handler(drive, &task_out_intr, WAIT_WORSTCASE, NULL);
533	ide_pio_datablock(drive, rq, 1);
534
535	return ide_started;
536}
537
538int ide_raw_taskfile(ide_drive_t *drive, ide_task_t *task, u8 *buf, u16 nsect)
539{
540	struct request rq;
541
542	memset(&rq, 0, sizeof(rq));
543	rq.ref_count = 1;
544	rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
545	rq.buffer = buf;
546
547	/*
548	 * (ks) We transfer currently only whole sectors.
549	 * This is suffient for now.  But, it would be great,
550	 * if we would find a solution to transfer any size.
551	 * To support special commands like READ LONG.
552	 */
553	rq.hard_nr_sectors = rq.nr_sectors = nsect;
554	rq.hard_cur_sectors = rq.current_nr_sectors = nsect;
555
556	if (task->tf_flags & IDE_TFLAG_WRITE)
557		rq.cmd_flags |= REQ_RW;
558
559	rq.special = task;
560	task->rq = &rq;
561
562	return ide_do_drive_cmd(drive, &rq, ide_wait);
563}
564
565EXPORT_SYMBOL(ide_raw_taskfile);
566
567int ide_no_data_taskfile(ide_drive_t *drive, ide_task_t *task)
568{
569	task->data_phase = TASKFILE_NO_DATA;
570
571	return ide_raw_taskfile(drive, task, NULL, 0);
572}
573EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
574
575#ifdef CONFIG_IDE_TASK_IOCTL
576int ide_taskfile_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
577{
578	ide_task_request_t	*req_task;
579	ide_task_t		args;
580	u8 *outbuf		= NULL;
581	u8 *inbuf		= NULL;
582	u8 *data_buf		= NULL;
583	int err			= 0;
584	int tasksize		= sizeof(struct ide_task_request_s);
585	unsigned int taskin	= 0;
586	unsigned int taskout	= 0;
587	u16 nsect		= 0;
588	char __user *buf = (char __user *)arg;
589
590//	printk("IDE Taskfile ...\n");
591
592	req_task = kzalloc(tasksize, GFP_KERNEL);
593	if (req_task == NULL) return -ENOMEM;
594	if (copy_from_user(req_task, buf, tasksize)) {
595		kfree(req_task);
596		return -EFAULT;
597	}
598
599	taskout = req_task->out_size;
600	taskin  = req_task->in_size;
601
602	if (taskin > 65536 || taskout > 65536) {
603		err = -EINVAL;
604		goto abort;
605	}
606
607	if (taskout) {
608		int outtotal = tasksize;
609		outbuf = kzalloc(taskout, GFP_KERNEL);
610		if (outbuf == NULL) {
611			err = -ENOMEM;
612			goto abort;
613		}
614		if (copy_from_user(outbuf, buf + outtotal, taskout)) {
615			err = -EFAULT;
616			goto abort;
617		}
618	}
619
620	if (taskin) {
621		int intotal = tasksize + taskout;
622		inbuf = kzalloc(taskin, GFP_KERNEL);
623		if (inbuf == NULL) {
624			err = -ENOMEM;
625			goto abort;
626		}
627		if (copy_from_user(inbuf, buf + intotal, taskin)) {
628			err = -EFAULT;
629			goto abort;
630		}
631	}
632
633	memset(&args, 0, sizeof(ide_task_t));
634
635	memcpy(&args.tf_array[0], req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2);
636	memcpy(&args.tf_array[6], req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE);
637
638	args.data_phase = req_task->data_phase;
639
640	args.tf_flags = IDE_TFLAG_IO_16BIT | IDE_TFLAG_DEVICE |
641			IDE_TFLAG_IN_TF;
642	if (drive->addressing == 1)
643		args.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_IN_HOB);
644
645	if (req_task->out_flags.all) {
646		args.tf_flags |= IDE_TFLAG_FLAGGED;
647
648		if (req_task->out_flags.b.data)
649			args.tf_flags |= IDE_TFLAG_OUT_DATA;
650
651		if (req_task->out_flags.b.nsector_hob)
652			args.tf_flags |= IDE_TFLAG_OUT_HOB_NSECT;
653		if (req_task->out_flags.b.sector_hob)
654			args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAL;
655		if (req_task->out_flags.b.lcyl_hob)
656			args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAM;
657		if (req_task->out_flags.b.hcyl_hob)
658			args.tf_flags |= IDE_TFLAG_OUT_HOB_LBAH;
659
660		if (req_task->out_flags.b.error_feature)
661			args.tf_flags |= IDE_TFLAG_OUT_FEATURE;
662		if (req_task->out_flags.b.nsector)
663			args.tf_flags |= IDE_TFLAG_OUT_NSECT;
664		if (req_task->out_flags.b.sector)
665			args.tf_flags |= IDE_TFLAG_OUT_LBAL;
666		if (req_task->out_flags.b.lcyl)
667			args.tf_flags |= IDE_TFLAG_OUT_LBAM;
668		if (req_task->out_flags.b.hcyl)
669			args.tf_flags |= IDE_TFLAG_OUT_LBAH;
670	} else {
671		args.tf_flags |= IDE_TFLAG_OUT_TF;
672		if (args.tf_flags & IDE_TFLAG_LBA48)
673			args.tf_flags |= IDE_TFLAG_OUT_HOB;
674	}
675
676	if (req_task->in_flags.b.data)
677		args.tf_flags |= IDE_TFLAG_IN_DATA;
678
679	switch(req_task->data_phase) {
680		case TASKFILE_MULTI_OUT:
681			if (!drive->mult_count) {
682				/* (hs): give up if multcount is not set */
683				printk(KERN_ERR "%s: %s Multimode Write " \
684					"multcount is not set\n",
685					drive->name, __func__);
686				err = -EPERM;
687				goto abort;
688			}
689			/* fall through */
690		case TASKFILE_OUT:
691			/* fall through */
692		case TASKFILE_OUT_DMAQ:
693		case TASKFILE_OUT_DMA:
694			nsect = taskout / SECTOR_SIZE;
695			data_buf = outbuf;
696			break;
697		case TASKFILE_MULTI_IN:
698			if (!drive->mult_count) {
699				/* (hs): give up if multcount is not set */
700				printk(KERN_ERR "%s: %s Multimode Read failure " \
701					"multcount is not set\n",
702					drive->name, __func__);
703				err = -EPERM;
704				goto abort;
705			}
706			/* fall through */
707		case TASKFILE_IN:
708			/* fall through */
709		case TASKFILE_IN_DMAQ:
710		case TASKFILE_IN_DMA:
711			nsect = taskin / SECTOR_SIZE;
712			data_buf = inbuf;
713			break;
714		case TASKFILE_NO_DATA:
715			break;
716		default:
717			err = -EFAULT;
718			goto abort;
719	}
720
721	if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA)
722		nsect = 0;
723	else if (!nsect) {
724		nsect = (args.tf.hob_nsect << 8) | args.tf.nsect;
725
726		if (!nsect) {
727			printk(KERN_ERR "%s: in/out command without data\n",
728					drive->name);
729			err = -EFAULT;
730			goto abort;
731		}
732	}
733
734	if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE)
735		args.tf_flags |= IDE_TFLAG_WRITE;
736
737	err = ide_raw_taskfile(drive, &args, data_buf, nsect);
738
739	memcpy(req_task->hob_ports, &args.tf_array[0], HDIO_DRIVE_HOB_HDR_SIZE - 2);
740	memcpy(req_task->io_ports, &args.tf_array[6], HDIO_DRIVE_TASK_HDR_SIZE);
741
742	if ((args.tf_flags & IDE_TFLAG_FLAGGED_SET_IN_FLAGS) &&
743	    req_task->in_flags.all == 0) {
744		req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
745		if (drive->addressing == 1)
746			req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
747	}
748
749	if (copy_to_user(buf, req_task, tasksize)) {
750		err = -EFAULT;
751		goto abort;
752	}
753	if (taskout) {
754		int outtotal = tasksize;
755		if (copy_to_user(buf + outtotal, outbuf, taskout)) {
756			err = -EFAULT;
757			goto abort;
758		}
759	}
760	if (taskin) {
761		int intotal = tasksize + taskout;
762		if (copy_to_user(buf + intotal, inbuf, taskin)) {
763			err = -EFAULT;
764			goto abort;
765		}
766	}
767abort:
768	kfree(req_task);
769	kfree(outbuf);
770	kfree(inbuf);
771
772//	printk("IDE Taskfile ioctl ended. rc = %i\n", err);
773
774	return err;
775}
776#endif
777
778int ide_cmd_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
779{
780	u8 *buf = NULL;
781	int bufsize = 0, err = 0;
782	u8 args[4], xfer_rate = 0;
783	ide_task_t tfargs;
784	struct ide_taskfile *tf = &tfargs.tf;
785	struct hd_driveid *id = drive->id;
786
787	if (NULL == (void *) arg) {
788		struct request rq;
789
790		ide_init_drive_cmd(&rq);
791		rq.cmd_type = REQ_TYPE_ATA_TASKFILE;
792
793		return ide_do_drive_cmd(drive, &rq, ide_wait);
794	}
795
796	if (copy_from_user(args, (void __user *)arg, 4))
797		return -EFAULT;
798
799	memset(&tfargs, 0, sizeof(ide_task_t));
800	tf->feature = args[2];
801	if (args[0] == WIN_SMART) {
802		tf->nsect = args[3];
803		tf->lbal  = args[1];
804		tf->lbam  = 0x4f;
805		tf->lbah  = 0xc2;
806		tfargs.tf_flags = IDE_TFLAG_OUT_TF | IDE_TFLAG_IN_NSECT;
807	} else {
808		tf->nsect = args[1];
809		tfargs.tf_flags = IDE_TFLAG_OUT_FEATURE |
810				  IDE_TFLAG_OUT_NSECT | IDE_TFLAG_IN_NSECT;
811	}
812	tf->command = args[0];
813	tfargs.data_phase = args[3] ? TASKFILE_IN : TASKFILE_NO_DATA;
814
815	if (args[3]) {
816		tfargs.tf_flags |= IDE_TFLAG_IO_16BIT;
817		bufsize = SECTOR_WORDS * 4 * args[3];
818		buf = kzalloc(bufsize, GFP_KERNEL);
819		if (buf == NULL)
820			return -ENOMEM;
821	}
822
823	if (tf->command == WIN_SETFEATURES &&
824	    tf->feature == SETFEATURES_XFER &&
825	    tf->nsect >= XFER_SW_DMA_0 &&
826	    (id->dma_ultra || id->dma_mword || id->dma_1word)) {
827		xfer_rate = args[1];
828		if (tf->nsect > XFER_UDMA_2 && !eighty_ninty_three(drive)) {
829			printk(KERN_WARNING "%s: UDMA speeds >UDMA33 cannot "
830					    "be set\n", drive->name);
831			goto abort;
832		}
833	}
834
835	err = ide_raw_taskfile(drive, &tfargs, buf, args[3]);
836
837	args[0] = tf->status;
838	args[1] = tf->error;
839	args[2] = tf->nsect;
840
841	if (!err && xfer_rate) {
842		/* active-retuning-calls future */
843		ide_set_xfer_rate(drive, xfer_rate);
844		ide_driveid_update(drive);
845	}
846abort:
847	if (copy_to_user((void __user *)arg, &args, 4))
848		err = -EFAULT;
849	if (buf) {
850		if (copy_to_user((void __user *)(arg + 4), buf, bufsize))
851			err = -EFAULT;
852		kfree(buf);
853	}
854	return err;
855}
856
857int ide_task_ioctl (ide_drive_t *drive, unsigned int cmd, unsigned long arg)
858{
859	void __user *p = (void __user *)arg;
860	int err = 0;
861	u8 args[7];
862	ide_task_t task;
863
864	if (copy_from_user(args, p, 7))
865		return -EFAULT;
866
867	memset(&task, 0, sizeof(task));
868	memcpy(&task.tf_array[7], &args[1], 6);
869	task.tf.command = args[0];
870	task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
871
872	err = ide_no_data_taskfile(drive, &task);
873
874	args[0] = task.tf.command;
875	memcpy(&args[1], &task.tf_array[7], 6);
876
877	if (copy_to_user(p, args, 7))
878		err = -EFAULT;
879
880	return err;
881}
882