ide-taskfile.c revision b788ee9c6561fd9219a503216284d61036a0dc0b
1/*
2 *  Copyright (C) 2000-2002	   Michael Cornwell <cornwell@acm.org>
3 *  Copyright (C) 2000-2002	   Andre Hedrick <andre@linux-ide.org>
4 *  Copyright (C) 2001-2002	   Klaus Smolin
5 *					IBM Storage Technology Division
6 *  Copyright (C) 2003-2004, 2007  Bartlomiej Zolnierkiewicz
7 *
8 *  The big the bad and the ugly.
9 */
10
11#include <linux/types.h>
12#include <linux/string.h>
13#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/interrupt.h>
16#include <linux/errno.h>
17#include <linux/slab.h>
18#include <linux/delay.h>
19#include <linux/hdreg.h>
20#include <linux/ide.h>
21#include <linux/scatterlist.h>
22
23#include <asm/uaccess.h>
24#include <asm/io.h>
25
26void ide_tf_dump(const char *s, struct ide_taskfile *tf)
27{
28#ifdef DEBUG
29	printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x "
30		"lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n",
31		s, tf->feature, tf->nsect, tf->lbal,
32		tf->lbam, tf->lbah, tf->device, tf->command);
33	printk("%s: hob: nsect 0x%02x lbal 0x%02x "
34		"lbam 0x%02x lbah 0x%02x\n",
35		s, tf->hob_nsect, tf->hob_lbal,
36		tf->hob_lbam, tf->hob_lbah);
37#endif
38}
39
40int taskfile_lib_get_identify (ide_drive_t *drive, u8 *buf)
41{
42	struct ide_cmd cmd;
43
44	memset(&cmd, 0, sizeof(cmd));
45	cmd.tf.nsect = 0x01;
46	if (drive->media == ide_disk)
47		cmd.tf.command = ATA_CMD_ID_ATA;
48	else
49		cmd.tf.command = ATA_CMD_ID_ATAPI;
50	cmd.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE;
51	cmd.protocol = ATA_PROT_PIO;
52
53	return ide_raw_taskfile(drive, &cmd, buf, 1);
54}
55
56static ide_startstop_t task_no_data_intr(ide_drive_t *);
57static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct ide_cmd *);
58static ide_startstop_t task_pio_intr(ide_drive_t *);
59
60ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd)
61{
62	ide_hwif_t *hwif = drive->hwif;
63	struct ide_cmd *cmd = &hwif->cmd;
64	struct ide_taskfile *tf = &cmd->tf;
65	ide_handler_t *handler = NULL;
66	const struct ide_tp_ops *tp_ops = hwif->tp_ops;
67	const struct ide_dma_ops *dma_ops = hwif->dma_ops;
68
69	if (orig_cmd->protocol == ATA_PROT_PIO &&
70	    (orig_cmd->tf_flags & IDE_TFLAG_MULTI_PIO) &&
71	    drive->mult_count == 0) {
72		printk(KERN_ERR "%s: multimode not set!\n", drive->name);
73		return ide_stopped;
74	}
75
76	if (orig_cmd->ftf_flags & IDE_FTFLAG_FLAGGED)
77		orig_cmd->ftf_flags |= IDE_FTFLAG_SET_IN_FLAGS;
78
79	memcpy(cmd, orig_cmd, sizeof(*cmd));
80
81	if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) {
82		ide_tf_dump(drive->name, tf);
83		tp_ops->set_irq(hwif, 1);
84		SELECT_MASK(drive, 0);
85		tp_ops->tf_load(drive, cmd);
86	}
87
88	switch (cmd->protocol) {
89	case ATA_PROT_PIO:
90		if (cmd->tf_flags & IDE_TFLAG_WRITE) {
91			tp_ops->exec_command(hwif, tf->command);
92			ndelay(400);	/* FIXME */
93			return pre_task_out_intr(drive, cmd);
94		}
95		handler = task_pio_intr;
96		/* fall-through */
97	case ATA_PROT_NODATA:
98		if (handler == NULL)
99			handler = task_no_data_intr;
100		ide_execute_command(drive, tf->command, handler,
101				    WAIT_WORSTCASE, NULL);
102		return ide_started;
103	case ATA_PROT_DMA:
104		if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0 ||
105		    ide_build_sglist(drive, cmd) == 0 ||
106		    dma_ops->dma_setup(drive, cmd))
107			return ide_stopped;
108		dma_ops->dma_exec_cmd(drive, tf->command);
109		dma_ops->dma_start(drive);
110	default:
111		return ide_started;
112	}
113}
114EXPORT_SYMBOL_GPL(do_rw_taskfile);
115
116static ide_startstop_t task_no_data_intr(ide_drive_t *drive)
117{
118	ide_hwif_t *hwif = drive->hwif;
119	struct ide_cmd *cmd = &hwif->cmd;
120	struct ide_taskfile *tf = &cmd->tf;
121	int custom = (cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0;
122	int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1;
123	u8 stat;
124
125	local_irq_enable_in_hardirq();
126
127	while (1) {
128		stat = hwif->tp_ops->read_status(hwif);
129		if ((stat & ATA_BUSY) == 0 || retries-- == 0)
130			break;
131		udelay(10);
132	};
133
134	if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) {
135		if (custom && tf->command == ATA_CMD_SET_MULTI) {
136			drive->mult_req = drive->mult_count = 0;
137			drive->special.b.recalibrate = 1;
138			(void)ide_dump_status(drive, __func__, stat);
139			return ide_stopped;
140		} else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) {
141			if ((stat & (ATA_ERR | ATA_DRQ)) == 0) {
142				ide_set_handler(drive, &task_no_data_intr,
143						WAIT_WORSTCASE, NULL);
144				return ide_started;
145			}
146		}
147		return ide_error(drive, "task_no_data_intr", stat);
148	}
149
150	if (custom && tf->command == ATA_CMD_SET_MULTI)
151		drive->mult_count = drive->mult_req;
152
153	if (custom == 0 || tf->command == ATA_CMD_IDLEIMMEDIATE ||
154	    tf->command == ATA_CMD_CHK_POWER) {
155		struct request *rq = hwif->rq;
156
157		if (blk_pm_request(rq))
158			ide_complete_pm_rq(drive, rq);
159		else
160			ide_finish_cmd(drive, cmd, stat);
161	}
162
163	return ide_stopped;
164}
165
166static u8 wait_drive_not_busy(ide_drive_t *drive)
167{
168	ide_hwif_t *hwif = drive->hwif;
169	int retries;
170	u8 stat;
171
172	/*
173	 * Last sector was transfered, wait until device is ready.  This can
174	 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms.
175	 */
176	for (retries = 0; retries < 1000; retries++) {
177		stat = hwif->tp_ops->read_status(hwif);
178
179		if (stat & ATA_BUSY)
180			udelay(10);
181		else
182			break;
183	}
184
185	if (stat & ATA_BUSY)
186		printk(KERN_ERR "%s: drive still BUSY!\n", drive->name);
187
188	return stat;
189}
190
191static void ide_pio_sector(ide_drive_t *drive, struct ide_cmd *cmd,
192			   unsigned int write)
193{
194	ide_hwif_t *hwif = drive->hwif;
195	struct scatterlist *sg = hwif->sg_table;
196	struct scatterlist *cursg = cmd->cursg;
197	struct page *page;
198#ifdef CONFIG_HIGHMEM
199	unsigned long flags;
200#endif
201	unsigned int offset;
202	u8 *buf;
203
204	cursg = cmd->cursg;
205	if (!cursg) {
206		cursg = sg;
207		cmd->cursg = sg;
208	}
209
210	page = sg_page(cursg);
211	offset = cursg->offset + cmd->cursg_ofs * SECTOR_SIZE;
212
213	/* get the current page and offset */
214	page = nth_page(page, (offset >> PAGE_SHIFT));
215	offset %= PAGE_SIZE;
216
217#ifdef CONFIG_HIGHMEM
218	local_irq_save(flags);
219#endif
220	buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset;
221
222	cmd->nleft--;
223	cmd->cursg_ofs++;
224
225	if ((cmd->cursg_ofs * SECTOR_SIZE) == cursg->length) {
226		cmd->cursg = sg_next(cmd->cursg);
227		cmd->cursg_ofs = 0;
228	}
229
230	/* do the actual data transfer */
231	if (write)
232		hwif->tp_ops->output_data(drive, cmd, buf, SECTOR_SIZE);
233	else
234		hwif->tp_ops->input_data(drive, cmd, buf, SECTOR_SIZE);
235
236	kunmap_atomic(buf, KM_BIO_SRC_IRQ);
237#ifdef CONFIG_HIGHMEM
238	local_irq_restore(flags);
239#endif
240}
241
242static void ide_pio_multi(ide_drive_t *drive, struct ide_cmd *cmd,
243			  unsigned int write)
244{
245	unsigned int nsect;
246
247	nsect = min_t(unsigned int, cmd->nleft, drive->mult_count);
248	while (nsect--)
249		ide_pio_sector(drive, cmd, write);
250}
251
252static void ide_pio_datablock(ide_drive_t *drive, struct ide_cmd *cmd,
253			      unsigned int write)
254{
255	u8 saved_io_32bit = drive->io_32bit;
256
257	if (cmd->tf_flags & IDE_TFLAG_FS)
258		cmd->rq->errors = 0;
259
260	if (cmd->tf_flags & IDE_TFLAG_IO_16BIT)
261		drive->io_32bit = 0;
262
263	touch_softlockup_watchdog();
264
265	if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO)
266		ide_pio_multi(drive, cmd, write);
267	else
268		ide_pio_sector(drive, cmd, write);
269
270	drive->io_32bit = saved_io_32bit;
271}
272
273static void ide_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd)
274{
275	if (cmd->tf_flags & IDE_TFLAG_FS) {
276		int sectors = cmd->nsect - cmd->nleft;
277
278		if (cmd->protocol == ATA_PROT_PIO &&
279		    ((cmd->tf_flags & IDE_TFLAG_WRITE) || cmd->nleft == 0)) {
280			if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO)
281				sectors -= drive->mult_count;
282			else
283				sectors--;
284		}
285
286		if (sectors > 0)
287			ide_complete_rq(drive, 0, sectors << 9);
288	}
289}
290
291void ide_finish_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat)
292{
293	struct request *rq = drive->hwif->rq;
294	u8 err = ide_read_error(drive);
295
296	ide_complete_cmd(drive, cmd, stat, err);
297	rq->errors = err;
298	ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq));
299}
300
301/*
302 * Handler for command with PIO data phase.
303 */
304static ide_startstop_t task_pio_intr(ide_drive_t *drive)
305{
306	ide_hwif_t *hwif = drive->hwif;
307	struct ide_cmd *cmd = &drive->hwif->cmd;
308	u8 stat = hwif->tp_ops->read_status(hwif);
309	u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE);
310
311	if (write == 0) {
312		/* Error? */
313		if (stat & ATA_ERR)
314			goto out_err;
315
316		/* Didn't want any data? Odd. */
317		if ((stat & ATA_DRQ) == 0) {
318			/* Command all done? */
319			if (OK_STAT(stat, ATA_DRDY, ATA_BUSY))
320				goto out_end;
321
322			/* Assume it was a spurious irq */
323			goto out_wait;
324		}
325	} else {
326		if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat))
327			goto out_err;
328
329		/* Deal with unexpected ATA data phase. */
330		if (((stat & ATA_DRQ) == 0) ^ (cmd->nleft == 0))
331			goto out_err;
332	}
333
334	if (write && cmd->nleft == 0)
335		goto out_end;
336
337	/* Still data left to transfer. */
338	ide_pio_datablock(drive, cmd, write);
339
340	/* Are we done? Check status and finish transfer. */
341	if (write == 0 && cmd->nleft == 0) {
342		stat = wait_drive_not_busy(drive);
343		if (!OK_STAT(stat, 0, BAD_STAT))
344			goto out_err;
345
346		goto out_end;
347	}
348out_wait:
349	/* Still data left to transfer. */
350	ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE, NULL);
351	return ide_started;
352out_end:
353	if ((cmd->tf_flags & IDE_TFLAG_FS) == 0)
354		ide_finish_cmd(drive, cmd, stat);
355	else
356		ide_complete_rq(drive, 0, cmd->rq->nr_sectors << 9);
357	return ide_stopped;
358out_err:
359	ide_error_cmd(drive, cmd);
360	return ide_error(drive, __func__, stat);
361}
362
363static ide_startstop_t pre_task_out_intr(ide_drive_t *drive,
364					 struct ide_cmd *cmd)
365{
366	ide_startstop_t startstop;
367
368	if (ide_wait_stat(&startstop, drive, ATA_DRQ,
369			  drive->bad_wstat, WAIT_DRQ)) {
370		printk(KERN_ERR "%s: no DRQ after issuing %sWRITE%s\n",
371			drive->name,
372			(cmd->tf_flags & IDE_TFLAG_MULTI_PIO) ? "MULT" : "",
373			(drive->dev_flags & IDE_DFLAG_LBA48) ? "_EXT" : "");
374		return startstop;
375	}
376
377	if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
378		local_irq_disable();
379
380	ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE, NULL);
381
382	ide_pio_datablock(drive, cmd, 1);
383
384	return ide_started;
385}
386
387int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf,
388		     u16 nsect)
389{
390	struct request *rq;
391	int error;
392
393	rq = blk_get_request(drive->queue, READ, __GFP_WAIT);
394	rq->cmd_type = REQ_TYPE_ATA_TASKFILE;
395	rq->buffer = buf;
396
397	/*
398	 * (ks) We transfer currently only whole sectors.
399	 * This is suffient for now.  But, it would be great,
400	 * if we would find a solution to transfer any size.
401	 * To support special commands like READ LONG.
402	 */
403	rq->hard_nr_sectors = rq->nr_sectors = nsect;
404	rq->hard_cur_sectors = rq->current_nr_sectors = nsect;
405
406	if (cmd->tf_flags & IDE_TFLAG_WRITE)
407		rq->cmd_flags |= REQ_RW;
408
409	rq->special = cmd;
410	cmd->rq = rq;
411
412	error = blk_execute_rq(drive->queue, NULL, rq, 0);
413	blk_put_request(rq);
414
415	return error;
416}
417
418EXPORT_SYMBOL(ide_raw_taskfile);
419
420int ide_no_data_taskfile(ide_drive_t *drive, struct ide_cmd *cmd)
421{
422	cmd->protocol = ATA_PROT_NODATA;
423
424	return ide_raw_taskfile(drive, cmd, NULL, 0);
425}
426EXPORT_SYMBOL_GPL(ide_no_data_taskfile);
427
428#ifdef CONFIG_IDE_TASK_IOCTL
429int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg)
430{
431	ide_task_request_t	*req_task;
432	struct ide_cmd		cmd;
433	u8 *outbuf		= NULL;
434	u8 *inbuf		= NULL;
435	u8 *data_buf		= NULL;
436	int err			= 0;
437	int tasksize		= sizeof(struct ide_task_request_s);
438	unsigned int taskin	= 0;
439	unsigned int taskout	= 0;
440	u16 nsect		= 0;
441	char __user *buf = (char __user *)arg;
442
443//	printk("IDE Taskfile ...\n");
444
445	req_task = kzalloc(tasksize, GFP_KERNEL);
446	if (req_task == NULL) return -ENOMEM;
447	if (copy_from_user(req_task, buf, tasksize)) {
448		kfree(req_task);
449		return -EFAULT;
450	}
451
452	taskout = req_task->out_size;
453	taskin  = req_task->in_size;
454
455	if (taskin > 65536 || taskout > 65536) {
456		err = -EINVAL;
457		goto abort;
458	}
459
460	if (taskout) {
461		int outtotal = tasksize;
462		outbuf = kzalloc(taskout, GFP_KERNEL);
463		if (outbuf == NULL) {
464			err = -ENOMEM;
465			goto abort;
466		}
467		if (copy_from_user(outbuf, buf + outtotal, taskout)) {
468			err = -EFAULT;
469			goto abort;
470		}
471	}
472
473	if (taskin) {
474		int intotal = tasksize + taskout;
475		inbuf = kzalloc(taskin, GFP_KERNEL);
476		if (inbuf == NULL) {
477			err = -ENOMEM;
478			goto abort;
479		}
480		if (copy_from_user(inbuf, buf + intotal, taskin)) {
481			err = -EFAULT;
482			goto abort;
483		}
484	}
485
486	memset(&cmd, 0, sizeof(cmd));
487
488	memcpy(&cmd.tf_array[0], req_task->hob_ports,
489	       HDIO_DRIVE_HOB_HDR_SIZE - 2);
490	memcpy(&cmd.tf_array[6], req_task->io_ports,
491	       HDIO_DRIVE_TASK_HDR_SIZE);
492
493	cmd.tf_flags   = IDE_TFLAG_IO_16BIT | IDE_TFLAG_DEVICE |
494			 IDE_TFLAG_IN_TF;
495
496	if (drive->dev_flags & IDE_DFLAG_LBA48)
497		cmd.tf_flags |= (IDE_TFLAG_LBA48 | IDE_TFLAG_IN_HOB);
498
499	if (req_task->out_flags.all) {
500		cmd.ftf_flags |= IDE_FTFLAG_FLAGGED;
501
502		if (req_task->out_flags.b.data)
503			cmd.ftf_flags |= IDE_FTFLAG_OUT_DATA;
504
505		if (req_task->out_flags.b.nsector_hob)
506			cmd.tf_flags |= IDE_TFLAG_OUT_HOB_NSECT;
507		if (req_task->out_flags.b.sector_hob)
508			cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAL;
509		if (req_task->out_flags.b.lcyl_hob)
510			cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAM;
511		if (req_task->out_flags.b.hcyl_hob)
512			cmd.tf_flags |= IDE_TFLAG_OUT_HOB_LBAH;
513
514		if (req_task->out_flags.b.error_feature)
515			cmd.tf_flags |= IDE_TFLAG_OUT_FEATURE;
516		if (req_task->out_flags.b.nsector)
517			cmd.tf_flags |= IDE_TFLAG_OUT_NSECT;
518		if (req_task->out_flags.b.sector)
519			cmd.tf_flags |= IDE_TFLAG_OUT_LBAL;
520		if (req_task->out_flags.b.lcyl)
521			cmd.tf_flags |= IDE_TFLAG_OUT_LBAM;
522		if (req_task->out_flags.b.hcyl)
523			cmd.tf_flags |= IDE_TFLAG_OUT_LBAH;
524	} else {
525		cmd.tf_flags |= IDE_TFLAG_OUT_TF;
526		if (cmd.tf_flags & IDE_TFLAG_LBA48)
527			cmd.tf_flags |= IDE_TFLAG_OUT_HOB;
528	}
529
530	if (req_task->in_flags.b.data)
531		cmd.ftf_flags |= IDE_FTFLAG_IN_DATA;
532
533	if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE) {
534		/* fixup data phase if needed */
535		if (req_task->data_phase == TASKFILE_IN_DMAQ ||
536		    req_task->data_phase == TASKFILE_IN_DMA)
537			cmd.tf_flags |= IDE_TFLAG_WRITE;
538	}
539
540	cmd.protocol = ATA_PROT_DMA;
541
542	switch (req_task->data_phase) {
543		case TASKFILE_MULTI_OUT:
544			if (!drive->mult_count) {
545				/* (hs): give up if multcount is not set */
546				printk(KERN_ERR "%s: %s Multimode Write " \
547					"multcount is not set\n",
548					drive->name, __func__);
549				err = -EPERM;
550				goto abort;
551			}
552			cmd.tf_flags |= IDE_TFLAG_MULTI_PIO;
553			/* fall through */
554		case TASKFILE_OUT:
555			cmd.protocol = ATA_PROT_PIO;
556			/* fall through */
557		case TASKFILE_OUT_DMAQ:
558		case TASKFILE_OUT_DMA:
559			cmd.tf_flags |= IDE_TFLAG_WRITE;
560			nsect = taskout / SECTOR_SIZE;
561			data_buf = outbuf;
562			break;
563		case TASKFILE_MULTI_IN:
564			if (!drive->mult_count) {
565				/* (hs): give up if multcount is not set */
566				printk(KERN_ERR "%s: %s Multimode Read failure " \
567					"multcount is not set\n",
568					drive->name, __func__);
569				err = -EPERM;
570				goto abort;
571			}
572			cmd.tf_flags |= IDE_TFLAG_MULTI_PIO;
573			/* fall through */
574		case TASKFILE_IN:
575			cmd.protocol = ATA_PROT_PIO;
576			/* fall through */
577		case TASKFILE_IN_DMAQ:
578		case TASKFILE_IN_DMA:
579			nsect = taskin / SECTOR_SIZE;
580			data_buf = inbuf;
581			break;
582		case TASKFILE_NO_DATA:
583			cmd.protocol = ATA_PROT_NODATA;
584			break;
585		default:
586			err = -EFAULT;
587			goto abort;
588	}
589
590	if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA)
591		nsect = 0;
592	else if (!nsect) {
593		nsect = (cmd.tf.hob_nsect << 8) | cmd.tf.nsect;
594
595		if (!nsect) {
596			printk(KERN_ERR "%s: in/out command without data\n",
597					drive->name);
598			err = -EFAULT;
599			goto abort;
600		}
601	}
602
603	err = ide_raw_taskfile(drive, &cmd, data_buf, nsect);
604
605	memcpy(req_task->hob_ports, &cmd.tf_array[0],
606	       HDIO_DRIVE_HOB_HDR_SIZE - 2);
607	memcpy(req_task->io_ports, &cmd.tf_array[6],
608	       HDIO_DRIVE_TASK_HDR_SIZE);
609
610	if ((cmd.ftf_flags & IDE_FTFLAG_SET_IN_FLAGS) &&
611	    req_task->in_flags.all == 0) {
612		req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS;
613		if (drive->dev_flags & IDE_DFLAG_LBA48)
614			req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8);
615	}
616
617	if (copy_to_user(buf, req_task, tasksize)) {
618		err = -EFAULT;
619		goto abort;
620	}
621	if (taskout) {
622		int outtotal = tasksize;
623		if (copy_to_user(buf + outtotal, outbuf, taskout)) {
624			err = -EFAULT;
625			goto abort;
626		}
627	}
628	if (taskin) {
629		int intotal = tasksize + taskout;
630		if (copy_to_user(buf + intotal, inbuf, taskin)) {
631			err = -EFAULT;
632			goto abort;
633		}
634	}
635abort:
636	kfree(req_task);
637	kfree(outbuf);
638	kfree(inbuf);
639
640//	printk("IDE Taskfile ioctl ended. rc = %i\n", err);
641
642	return err;
643}
644#endif
645