virthba.c revision a3acc83a4a2b8e1a6f8f3c5bdcfa3bb5f5f9e338
1/* virthba.c
2 *
3 * Copyright � 2010 - 2013 UNISYS CORPORATION
4 * All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or (at
9 * your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
14 * NON INFRINGEMENT.  See the GNU General Public License for more
15 * details.
16 */
17
18#define EXPORT_SYMTAB
19
20/* if you want to turn on some debugging of write device data or read
21 * device data, define these two undefs.  You will probably want to
22 * customize the code which is here since it was written assuming
23 * reading and writing a specific data file df.64M.txt which is a
24 * 64Megabyte file created by Art Nilson using a scritp I wrote called
25 * cr_test_data.pl.  The data file consists of 256 byte lines of text
26 * which start with an 8 digit sequence number, a colon, and then
27 * letters after that */
28
29#undef DBGINF
30
31#include <linux/kernel.h>
32#ifdef CONFIG_MODVERSIONS
33#include <config/modversions.h>
34#endif
35
36#include "uniklog.h"
37#include "diagnostics/appos_subsystems.h"
38#include "uisutils.h"
39#include "uisqueue.h"
40#include "uisthread.h"
41
42#include <linux/module.h>
43#include <linux/init.h>
44#include <linux/pci.h>
45#include <linux/spinlock.h>
46#include <linux/device.h>
47#include <linux/slab.h>
48#include <scsi/scsi.h>
49#include <scsi/scsi_host.h>
50#include <scsi/scsi_cmnd.h>
51#include <scsi/scsi_device.h>
52#include <asm/param.h>
53#include <linux/proc_fs.h>
54#include <linux/types.h>
55
56#include "virthba.h"
57#include "virtpci.h"
58#include "visorchipset.h"
59#include "version.h"
60#include "guestlinuxdebug.h"
61/* this is shorter than using __FILE__ (full path name) in
62 * debug/info/error messages
63 */
64#define CURRENT_FILE_PC VIRT_HBA_PC_virthba_c
65#define __MYFILE__ "virthba.c"
66
67/* NOTE:  L1_CACHE_BYTES >=128 */
68#define DEVICE_ATTRIBUTE struct device_attribute
69
70/*****************************************************/
71/* Forward declarations                              */
72/*****************************************************/
73static int virthba_probe(struct virtpci_dev *dev,
74			 const struct pci_device_id *id);
75static void virthba_remove(struct virtpci_dev *dev);
76static int virthba_abort_handler(struct scsi_cmnd *scsicmd);
77static int virthba_bus_reset_handler(struct scsi_cmnd *scsicmd);
78static int virthba_device_reset_handler(struct scsi_cmnd *scsicmd);
79static int virthba_host_reset_handler(struct scsi_cmnd *scsicmd);
80static const char *virthba_get_info(struct Scsi_Host *shp);
81static int virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
82static int virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
83				     void (*virthba_cmnd_done)(struct scsi_cmnd *));
84
85#ifdef DEF_SCSI_QCMD
86DEF_SCSI_QCMD(virthba_queue_command)
87#else
88#define virthba_queue_command virthba_queue_command_lck
89#endif
90
91
92static int virthba_slave_alloc(struct scsi_device *scsidev);
93static int virthba_slave_configure(struct scsi_device *scsidev);
94static void virthba_slave_destroy(struct scsi_device *scsidev);
95static int process_incoming_rsps(void *);
96static int virthba_serverup(struct virtpci_dev *virtpcidev);
97static int virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state);
98static void doDiskAddRemove(struct work_struct *work);
99static void virthba_serverdown_complete(struct work_struct *work);
100
101static ssize_t info_proc_read(struct file *file, char __user *buf,
102			      size_t len, loff_t *offset);
103static ssize_t rqwu_proc_write(struct file *file, const char __user *buffer,
104			       size_t count, loff_t *ppos);
105static ssize_t enable_ints_read(struct file *file, char __user *buffer,
106				size_t count, loff_t *ppos);
107static ssize_t enable_ints_write(struct file *file, const char __user *buffer,
108				 size_t count, loff_t *ppos);
109
110/*****************************************************/
111/* Globals                                           */
112/*****************************************************/
113
114static int rsltq_wait_usecs = 4000;	/* Default 4ms */
115static unsigned int MaxBuffLen;
116
117/* Module options */
118static char *virthba_options = "NONE";
119
120static const struct pci_device_id virthba_id_table[] = {
121	{PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_VIRTHBA)},
122	{0},
123};
124
125/* export virthba_id_table */
126MODULE_DEVICE_TABLE(pci, virthba_id_table);
127
128static struct workqueue_struct *virthba_serverdown_workqueue;
129
130static struct virtpci_driver virthba_driver = {
131	.name = "uisvirthba",
132	.version = VERSION,
133	.vertag = NULL,
134	.build_date = __DATE__,
135	.build_time = __TIME__,
136	.id_table = virthba_id_table,
137	.probe = virthba_probe,
138	.remove = virthba_remove,
139	.resume = virthba_serverup,
140	.suspend = virthba_serverdown
141};
142
143/* The Send and Recive Buffers of the IO Queue may both be full */
144#define MAX_PENDING_REQUESTS (MIN_NUMSIGNALS*2)
145#define INTERRUPT_VECTOR_MASK 0x3F
146
147struct scsipending {
148	char cmdtype;		/* Type of pointer that is being stored */
149	void *sent;		/* The Data being tracked */
150	/* struct scsi_cmnd *type for virthba_queue_command */
151	/* struct uiscmdrsp *type for management commands */
152};
153
154#define VIRTHBA_ERROR_COUNT 30
155#define IOS_ERROR_THRESHOLD 1000
156struct virtdisk_info {
157	U32 valid;
158	U32 channel, id, lun;	/* Disk Path */
159	atomic_t ios_threshold;
160	atomic_t error_count;
161	struct virtdisk_info *next;
162};
163/* Each Scsi_Host has a host_data area that contains this struct. */
164struct virthba_info {
165	struct Scsi_Host *scsihost;
166	struct virtpci_dev *virtpcidev;
167	struct list_head dev_info_list;
168	struct chaninfo chinfo;
169	struct InterruptInfo intr;	/* use recvInterrupt info to receive
170					   interrupts when IOs complete */
171	int interrupt_vector;
172	struct scsipending pending[MAX_PENDING_REQUESTS]; /* Tracks the requests
173							     that have been */
174	/* forwarded to the IOVM and haven't returned yet */
175	unsigned int nextinsert;	/* Start search for next pending
176					   free slot here */
177	spinlock_t privlock;
178	bool serverdown;
179	bool serverchangingstate;
180	unsigned long long acquire_failed_cnt;
181	unsigned long long interrupts_rcvd;
182	unsigned long long interrupts_notme;
183	unsigned long long interrupts_disabled;
184	struct work_struct serverdown_completion;
185	U64 __iomem *flags_addr;
186	atomic_t interrupt_rcvd;
187	wait_queue_head_t rsp_queue;
188	struct virtdisk_info head;
189};
190
191/* Work Data for DARWorkQ */
192struct diskaddremove {
193	U8 add;			/* 0-remove, 1-add */
194	struct Scsi_Host *shost; /* Scsi Host for this virthba instance */
195	U32 channel, id, lun;	/* Disk Path */
196	struct diskaddremove *next;
197};
198
199#define virtpci_dev_to_virthba_virthba_get_info(d) \
200	container_of(d, struct virthba_info, virtpcidev)
201
202static DEVICE_ATTRIBUTE *virthba_shost_attrs[];
203static struct scsi_host_template virthba_driver_template = {
204	.name = "Unisys Virtual HBA",
205	.proc_name = "uisvirthba",
206	.info = virthba_get_info,
207	.ioctl = virthba_ioctl,
208	.queuecommand = virthba_queue_command,
209	.eh_abort_handler = virthba_abort_handler,
210	.eh_device_reset_handler = virthba_device_reset_handler,
211	.eh_bus_reset_handler = virthba_bus_reset_handler,
212	.eh_host_reset_handler = virthba_host_reset_handler,
213	.shost_attrs = virthba_shost_attrs,
214
215#define VIRTHBA_MAX_CMNDS 128
216	.can_queue = VIRTHBA_MAX_CMNDS,
217	.sg_tablesize = 64,	/* largest number of address/length pairs */
218	.this_id = -1,
219	.slave_alloc = virthba_slave_alloc,
220	.slave_configure = virthba_slave_configure,
221	.slave_destroy = virthba_slave_destroy,
222	.use_clustering = ENABLE_CLUSTERING,
223};
224
225struct virthba_devices_open {
226	struct virthba_info *virthbainfo;
227};
228
229static const struct file_operations proc_info_fops = {
230	.read = info_proc_read,
231};
232
233static const struct file_operations proc_rqwu_fops = {
234	.write = rqwu_proc_write,
235};
236
237static const struct file_operations proc_enable_ints_fops = {
238	.read = enable_ints_read,
239	.write = enable_ints_write,
240};
241
242
243#define VIRTHBASOPENMAX 1
244/* array of open devices maintained by open() and close(); */
245static struct virthba_devices_open VirtHbasOpen[VIRTHBASOPENMAX];
246static struct proc_dir_entry *virthba_proc_dir;
247static struct proc_dir_entry *info_proc_entry;
248static struct proc_dir_entry *rqwaitus_proc_entry;
249static struct proc_dir_entry *enable_ints_proc_entry;
250#define INFO_PROC_ENTRY_FN "info"
251#define ENABLE_INTS_ENTRY_FN "enable_ints"
252#define RQWU_PROC_ENTRY_FN "rqwait_usecs"
253#define DIR_PROC_ENTRY "virthba"
254
255/*****************************************************/
256/* Local Functions				     */
257/*****************************************************/
258static int
259add_scsipending_entry(struct virthba_info *vhbainfo, char cmdtype, void *new)
260{
261	unsigned long flags;
262	int insert_location;
263
264	spin_lock_irqsave(&vhbainfo->privlock, flags);
265	insert_location = vhbainfo->nextinsert;
266	while (vhbainfo->pending[insert_location].sent != NULL) {
267		insert_location = (insert_location + 1) % MAX_PENDING_REQUESTS;
268		if (insert_location == (int) vhbainfo->nextinsert) {
269			LOGERR("Queue should be full. insert_location<<%d>>  Unable to find open slot for pending commands.\n",
270			     insert_location);
271			spin_unlock_irqrestore(&vhbainfo->privlock, flags);
272			return -1;
273		}
274	}
275
276	vhbainfo->pending[insert_location].cmdtype = cmdtype;
277	vhbainfo->pending[insert_location].sent = new;
278	vhbainfo->nextinsert = (insert_location + 1) % MAX_PENDING_REQUESTS;
279	spin_unlock_irqrestore(&vhbainfo->privlock, flags);
280
281	return insert_location;
282}
283
284static unsigned int
285add_scsipending_entry_with_wait(struct virthba_info *vhbainfo, char cmdtype,
286				void *new)
287{
288	int insert_location = add_scsipending_entry(vhbainfo, cmdtype, new);
289
290	while (insert_location == -1) {
291		LOGERR("Failed to find empty queue slot.  Waiting to try again\n");
292		set_current_state(TASK_INTERRUPTIBLE);
293		schedule_timeout(msecs_to_jiffies(10));
294		insert_location = add_scsipending_entry(vhbainfo, cmdtype, new);
295	}
296
297	return (unsigned int) insert_location;
298}
299
300static void *
301del_scsipending_entry(struct virthba_info *vhbainfo, uintptr_t del)
302{
303	unsigned long flags;
304	void *sent = NULL;
305
306	if (del >= MAX_PENDING_REQUESTS) {
307		LOGERR("Invalid queue position <<%lu>> given to delete. MAX_PENDING_REQUESTS <<%d>>\n",
308		     (unsigned long) del, MAX_PENDING_REQUESTS);
309	} else {
310		spin_lock_irqsave(&vhbainfo->privlock, flags);
311
312		if (vhbainfo->pending[del].sent == NULL)
313			LOGERR("Deleting already cleared queue entry at <<%lu>>.\n",
314			     (unsigned long) del);
315
316		sent = vhbainfo->pending[del].sent;
317
318		vhbainfo->pending[del].cmdtype = 0;
319		vhbainfo->pending[del].sent = NULL;
320		spin_unlock_irqrestore(&vhbainfo->privlock, flags);
321	}
322
323	return sent;
324}
325
326/* DARWorkQ (Disk Add/Remove) */
327static struct work_struct DARWorkQ;
328static struct diskaddremove *DARWorkQHead;
329static spinlock_t DARWorkQLock;
330static unsigned short DARWorkQSched;
331#define QUEUE_DISKADDREMOVE(dar) { \
332	spin_lock_irqsave(&DARWorkQLock, flags); \
333	if (!DARWorkQHead) { \
334		DARWorkQHead = dar; \
335		dar->next = NULL; \
336	} \
337	else { \
338		dar->next = DARWorkQHead; \
339		DARWorkQHead = dar; \
340	} \
341	if (!DARWorkQSched) { \
342		schedule_work(&DARWorkQ); \
343		DARWorkQSched = 1; \
344	} \
345	spin_unlock_irqrestore(&DARWorkQLock, flags); \
346}
347
348static inline void
349SendDiskAddRemove(struct diskaddremove *dar)
350{
351	struct scsi_device *sdev;
352	int error;
353
354	sdev = scsi_device_lookup(dar->shost, dar->channel, dar->id, dar->lun);
355	if (sdev) {
356		if (!(dar->add))
357			scsi_remove_device(sdev);
358	} else if (dar->add) {
359		error =
360		    scsi_add_device(dar->shost, dar->channel, dar->id,
361				    dar->lun);
362		if (error)
363			LOGERR("Failed scsi_add_device: host_no=%d[chan=%d:id=%d:lun=%d]\n",
364			     dar->shost->host_no, dar->channel, dar->id,
365			     dar->lun);
366	} else
367		LOGERR("Failed scsi_device_lookup:[chan=%d:id=%d:lun=%d]\n",
368		       dar->channel, dar->id, dar->lun);
369	kfree(dar);
370}
371
372/*****************************************************/
373/* DARWorkQ Handler Thread                           */
374/*****************************************************/
375static void
376doDiskAddRemove(struct work_struct *work)
377{
378	struct diskaddremove *dar;
379	struct diskaddremove *tmphead;
380	int i = 0;
381	unsigned long flags;
382
383	spin_lock_irqsave(&DARWorkQLock, flags);
384	tmphead = DARWorkQHead;
385	DARWorkQHead = NULL;
386	DARWorkQSched = 0;
387	spin_unlock_irqrestore(&DARWorkQLock, flags);
388	while (tmphead) {
389		dar = tmphead;
390		tmphead = dar->next;
391		SendDiskAddRemove(dar);
392		i++;
393	}
394}
395
396/*****************************************************/
397/* Routine to add entry to DARWorkQ                  */
398/*****************************************************/
399static void
400process_disk_notify(struct Scsi_Host *shost, struct uiscmdrsp *cmdrsp)
401{
402	struct diskaddremove *dar;
403	unsigned long flags;
404
405	dar = kzalloc(sizeof(struct diskaddremove), GFP_ATOMIC);
406	if (dar) {
407		dar->add = cmdrsp->disknotify.add;
408		dar->shost = shost;
409		dar->channel = cmdrsp->disknotify.channel;
410		dar->id = cmdrsp->disknotify.id;
411		dar->lun = cmdrsp->disknotify.lun;
412		QUEUE_DISKADDREMOVE(dar);
413	} else {
414		LOGERR("kmalloc failed for dar. host_no=%d[chan=%d:id=%d:lun=%d]\n",
415		     shost->host_no, cmdrsp->disknotify.channel,
416		     cmdrsp->disknotify.id, cmdrsp->disknotify.lun);
417	}
418}
419
420/*****************************************************/
421/* Probe Remove Functions                            */
422/*****************************************************/
423static irqreturn_t
424virthba_ISR(int irq, void *dev_id)
425{
426	struct virthba_info *virthbainfo = (struct virthba_info *) dev_id;
427	CHANNEL_HEADER __iomem *pChannelHeader;
428	SIGNAL_QUEUE_HEADER __iomem *pqhdr;
429	U64 mask;
430	unsigned long long rc1;
431
432	if (virthbainfo == NULL)
433		return IRQ_NONE;
434	virthbainfo->interrupts_rcvd++;
435	pChannelHeader = virthbainfo->chinfo.queueinfo->chan;
436	if (((readq(&pChannelHeader->Features)
437	      & ULTRA_IO_IOVM_IS_OK_WITH_DRIVER_DISABLING_INTS) != 0)
438	    && ((readq(&pChannelHeader->Features) &
439		 ULTRA_IO_DRIVER_DISABLES_INTS) !=
440		0)) {
441		virthbainfo->interrupts_disabled++;
442		mask = ~ULTRA_CHANNEL_ENABLE_INTS;
443		rc1 = uisqueue_InterlockedAnd(virthbainfo->flags_addr, mask);
444	}
445	if (visor_signalqueue_empty(pChannelHeader, IOCHAN_FROM_IOPART)) {
446		virthbainfo->interrupts_notme++;
447		return IRQ_NONE;
448	}
449	pqhdr = (SIGNAL_QUEUE_HEADER __iomem *)
450		((char __iomem *) pChannelHeader +
451		 readq(&pChannelHeader->oChannelSpace)) + IOCHAN_FROM_IOPART;
452	writeq(readq(&pqhdr->NumInterruptsReceived) + 1,
453	       &pqhdr->NumInterruptsReceived);
454	atomic_set(&virthbainfo->interrupt_rcvd, 1);
455	wake_up_interruptible(&virthbainfo->rsp_queue);
456	return IRQ_HANDLED;
457}
458
459static int
460virthba_probe(struct virtpci_dev *virtpcidev, const struct pci_device_id *id)
461{
462	int error;
463	struct Scsi_Host *scsihost;
464	struct virthba_info *virthbainfo;
465	int rsp;
466	int i;
467	irq_handler_t handler = virthba_ISR;
468	CHANNEL_HEADER __iomem *pChannelHeader;
469	SIGNAL_QUEUE_HEADER __iomem *pqhdr;
470	U64 mask;
471
472	LOGVER("entering virthba_probe...\n");
473	LOGVER("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
474	       virtpcidev->deviceNo);
475
476	LOGINF("entering virthba_probe...\n");
477	LOGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
478	       virtpcidev->deviceNo);
479	POSTCODE_LINUX_2(VHBA_PROBE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
480	/* call scsi_host_alloc to register a scsi host adapter
481	 * instance - this virthba that has just been created is an
482	 * instance of a scsi host adapter. This scsi_host_alloc
483	 * function allocates a new Scsi_Host struct & performs basic
484	 * initializatoin.  The host is not published to the scsi
485	 * midlayer until scsi_add_host is called.
486	 */
487	DBGINF("calling scsi_host_alloc.\n");
488
489	/* arg 2 passed in length of extra space we want allocated
490	 * with scsi_host struct for our own use scsi_host_alloc
491	 * assign host_no
492	 */
493	scsihost = scsi_host_alloc(&virthba_driver_template,
494				   sizeof(struct virthba_info));
495	if (scsihost == NULL)
496		return -ENODEV;
497
498	DBGINF("scsihost: 0x%p, scsihost->this_id: %d, host_no: %d.\n",
499	       scsihost, scsihost->this_id, scsihost->host_no);
500
501	scsihost->this_id = UIS_MAGIC_VHBA;
502	/* linux treats max-channel differently than max-id & max-lun.
503	 * In the latter cases, those two values result in 0 to max-1
504	 * (inclusive) being scanned. But in the case of channels, the
505	 * scan is 0 to max (inclusive); so we will subtract one from
506	 * the max-channel value.
507	 */
508	LOGINF("virtpcidev->scsi.max.max_channel=%u, max_id=%u, max_lun=%u, cmd_per_lun=%u, max_io_size=%u\n",
509	     (unsigned) virtpcidev->scsi.max.max_channel - 1,
510	     (unsigned) virtpcidev->scsi.max.max_id,
511	     (unsigned) virtpcidev->scsi.max.max_lun,
512	     (unsigned) virtpcidev->scsi.max.cmd_per_lun,
513	     (unsigned) virtpcidev->scsi.max.max_io_size);
514	scsihost->max_channel = (unsigned) virtpcidev->scsi.max.max_channel;
515	scsihost->max_id = (unsigned) virtpcidev->scsi.max.max_id;
516	scsihost->max_lun = (unsigned) virtpcidev->scsi.max.max_lun;
517	scsihost->cmd_per_lun = (unsigned) virtpcidev->scsi.max.cmd_per_lun;
518	scsihost->max_sectors =
519	    (unsigned short) (virtpcidev->scsi.max.max_io_size >> 9);
520	scsihost->sg_tablesize =
521	    (unsigned short) (virtpcidev->scsi.max.max_io_size / PAGE_SIZE);
522	if (scsihost->sg_tablesize > MAX_PHYS_INFO)
523		scsihost->sg_tablesize = MAX_PHYS_INFO;
524	LOGINF("scsihost->max_channel=%u, max_id=%u, max_lun=%u, cmd_per_lun=%u, max_sectors=%hu, sg_tablesize=%hu\n",
525	     scsihost->max_channel, scsihost->max_id, scsihost->max_lun,
526	     scsihost->cmd_per_lun, scsihost->max_sectors,
527	     scsihost->sg_tablesize);
528	LOGINF("scsihost->can_queue=%u, scsihost->cmd_per_lun=%u, max_sectors=%hu, sg_tablesize=%hu\n",
529	     scsihost->can_queue, scsihost->cmd_per_lun, scsihost->max_sectors,
530	     scsihost->sg_tablesize);
531
532	DBGINF("calling scsi_add_host\n");
533
534	/* this creates "host%d" in sysfs.  If 2nd argument is NULL,
535	 * then this generic /sys/devices/platform/host?  device is
536	 * created and /sys/scsi_host/host? ->
537	 * /sys/devices/platform/host?  If 2nd argument is not NULL,
538	 * then this generic /sys/devices/<path>/host? is created and
539	 * host? points to that device instead.
540	 */
541	error = scsi_add_host(scsihost, &virtpcidev->generic_dev);
542	if (error) {
543		LOGERR("scsi_add_host ****FAILED 0x%x  TBD - RECOVER\n", error);
544		POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
545		/* decr refcount on scsihost which was incremented by
546		 * scsi_add_host so the scsi_host gets deleted
547		 */
548		scsi_host_put(scsihost);
549		return -ENODEV;
550	}
551
552	virthbainfo = (struct virthba_info *) scsihost->hostdata;
553	memset(virthbainfo, 0, sizeof(struct virthba_info));
554	for (i = 0; i < VIRTHBASOPENMAX; i++) {
555		if (VirtHbasOpen[i].virthbainfo == NULL) {
556			VirtHbasOpen[i].virthbainfo = virthbainfo;
557			break;
558		}
559	}
560	virthbainfo->interrupt_vector = -1;
561	virthbainfo->chinfo.queueinfo = &virtpcidev->queueinfo;
562	virthbainfo->virtpcidev = virtpcidev;
563	spin_lock_init(&virthbainfo->chinfo.insertlock);
564
565	DBGINF("generic_dev: 0x%p, queueinfo: 0x%p.\n",
566	       &virtpcidev->generic_dev, &virtpcidev->queueinfo);
567
568	init_waitqueue_head(&virthbainfo->rsp_queue);
569	spin_lock_init(&virthbainfo->privlock);
570	memset(&virthbainfo->pending, 0, sizeof(virthbainfo->pending));
571	virthbainfo->serverdown = false;
572	virthbainfo->serverchangingstate = false;
573
574	virthbainfo->intr = virtpcidev->intr;
575	/* save of host within virthba_info */
576	virthbainfo->scsihost = scsihost;
577
578	/* save of host within virtpci_dev */
579	virtpcidev->scsi.scsihost = scsihost;
580
581	/* Setup workqueue for serverdown messages */
582	INIT_WORK(&virthbainfo->serverdown_completion,
583		  virthba_serverdown_complete);
584
585	writeq(readq(&virthbainfo->chinfo.queueinfo->chan->Features) |
586	       ULTRA_IO_CHANNEL_IS_POLLING,
587	       &virthbainfo->chinfo.queueinfo->chan->Features);
588	/* start thread that will receive scsicmnd responses */
589	DBGINF("starting rsp thread -- queueinfo: 0x%p, threadinfo: 0x%p.\n",
590	       virthbainfo->chinfo.queueinfo, &virthbainfo->chinfo.threadinfo);
591
592	pChannelHeader = virthbainfo->chinfo.queueinfo->chan;
593	pqhdr = (SIGNAL_QUEUE_HEADER __iomem *)
594		((char __iomem *)pChannelHeader +
595		 readq(&pChannelHeader->oChannelSpace)) + IOCHAN_FROM_IOPART;
596	virthbainfo->flags_addr = &pqhdr->FeatureFlags;
597
598	if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
599			     process_incoming_rsps,
600			     virthbainfo, "vhba_incoming")) {
601		LOGERR("uisthread_start rsp ****FAILED\n");
602		/* decr refcount on scsihost which was incremented by
603		 * scsi_add_host so the scsi_host gets deleted
604		 */
605		POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
606		scsi_host_put(scsihost);
607		return -ENODEV;
608	}
609	LOGINF("sendInterruptHandle=0x%16llX",
610	       virthbainfo->intr.sendInterruptHandle);
611	LOGINF("recvInterruptHandle=0x%16llX",
612	       virthbainfo->intr.recvInterruptHandle);
613	LOGINF("recvInterruptVector=0x%8X",
614	       virthbainfo->intr.recvInterruptVector);
615	LOGINF("recvInterruptShared=0x%2X",
616	       virthbainfo->intr.recvInterruptShared);
617	LOGINF("scsihost.hostt->name=%s", scsihost->hostt->name);
618	virthbainfo->interrupt_vector =
619	    virthbainfo->intr.recvInterruptHandle & INTERRUPT_VECTOR_MASK;
620	rsp = request_irq(virthbainfo->interrupt_vector, handler, IRQF_SHARED,
621			  scsihost->hostt->name, virthbainfo);
622	if (rsp != 0) {
623		LOGERR("request_irq(%d) uislib_virthba_ISR request failed with rsp=%d\n",
624		       virthbainfo->interrupt_vector, rsp);
625		virthbainfo->interrupt_vector = -1;
626		POSTCODE_LINUX_2(VHBA_PROBE_FAILURE_PC, POSTCODE_SEVERITY_ERR);
627	} else {
628		U64 __iomem *Features_addr =
629		    &virthbainfo->chinfo.queueinfo->chan->Features;
630		LOGERR("request_irq(%d) uislib_virthba_ISR request succeeded\n",
631		       virthbainfo->interrupt_vector);
632		mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
633			 ULTRA_IO_DRIVER_DISABLES_INTS);
634		uisqueue_InterlockedAnd(Features_addr, mask);
635		mask = ULTRA_IO_DRIVER_ENABLES_INTS;
636		uisqueue_InterlockedOr(Features_addr, mask);
637		rsltq_wait_usecs = 4000000;
638	}
639
640	DBGINF("calling scsi_scan_host.\n");
641	scsi_scan_host(scsihost);
642	DBGINF("return from scsi_scan_host.\n");
643
644	LOGINF("virthba added scsihost:0x%p\n", scsihost);
645	POSTCODE_LINUX_2(VHBA_PROBE_EXIT_PC, POSTCODE_SEVERITY_INFO);
646	return 0;
647}
648
649static void
650virthba_remove(struct virtpci_dev *virtpcidev)
651{
652	struct virthba_info *virthbainfo;
653	struct Scsi_Host *scsihost =
654	    (struct Scsi_Host *) virtpcidev->scsi.scsihost;
655
656	LOGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
657	       virtpcidev->deviceNo);
658	virthbainfo = (struct virthba_info *) scsihost->hostdata;
659	if (virthbainfo->interrupt_vector != -1)
660		free_irq(virthbainfo->interrupt_vector, virthbainfo);
661	LOGINF("Removing virtpcidev: 0x%p, virthbainfo: 0x%p\n", virtpcidev,
662	       virthbainfo);
663
664	DBGINF("removing scsihost: 0x%p, scsihost->this_id: %d\n", scsihost,
665	       scsihost->this_id);
666	scsi_remove_host(scsihost);
667
668	DBGINF("stopping thread.\n");
669	uisthread_stop(&virthbainfo->chinfo.threadinfo);
670
671	DBGINF("calling scsi_host_put\n");
672
673	/* decr refcount on scsihost which was incremented by
674	 * scsi_add_host so the scsi_host gets deleted
675	 */
676	scsi_host_put(scsihost);
677	LOGINF("virthba removed scsi_host.\n");
678}
679
680static int
681forward_vdiskmgmt_command(VDISK_MGMT_TYPES vdiskcmdtype,
682			  struct Scsi_Host *scsihost,
683			  struct uisscsi_dest *vdest)
684{
685	struct uiscmdrsp *cmdrsp;
686	struct virthba_info *virthbainfo =
687	    (struct virthba_info *) scsihost->hostdata;
688	int notifyresult = 0xffff;
689	wait_queue_head_t notifyevent;
690
691	LOGINF("vDiskMgmt:%d %d:%d:%d\n", vdiskcmdtype,
692	       vdest->channel, vdest->id, vdest->lun);
693
694	if (virthbainfo->serverdown || virthbainfo->serverchangingstate) {
695		DBGINF("Server is down/changing state. Returning Failure.\n");
696		return FAILED;
697	}
698
699	cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
700	if (cmdrsp == NULL) {
701		LOGERR("kmalloc of cmdrsp failed.\n");
702		return FAILED;	/* reject */
703	}
704
705	init_waitqueue_head(&notifyevent);
706
707	/* issue VDISK_MGMT_CMD
708	 * set type to command - as opposed to task mgmt
709	 */
710	cmdrsp->cmdtype = CMD_VDISKMGMT_TYPE;
711	/* specify the event that has to be triggered when this cmd is
712	 * complete
713	 */
714	cmdrsp->vdiskmgmt.notify = (void *) &notifyevent;
715	cmdrsp->vdiskmgmt.notifyresult = (void *) &notifyresult;
716
717	/* save destination */
718	cmdrsp->vdiskmgmt.vdisktype = vdiskcmdtype;
719	cmdrsp->vdiskmgmt.vdest.channel = vdest->channel;
720	cmdrsp->vdiskmgmt.vdest.id = vdest->id;
721	cmdrsp->vdiskmgmt.vdest.lun = vdest->lun;
722	cmdrsp->vdiskmgmt.scsicmd =
723	    (void *) (uintptr_t)
724		add_scsipending_entry_with_wait(virthbainfo, CMD_VDISKMGMT_TYPE,
725						(void *) cmdrsp);
726
727	uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
728					     cmdrsp, IOCHAN_TO_IOPART,
729					     &virthbainfo->chinfo.insertlock,
730					     DONT_ISSUE_INTERRUPT, (U64) NULL,
731					     OK_TO_WAIT, "vhba");
732	LOGINF("VdiskMgmt waiting on event notifyevent=0x%p\n",
733	       cmdrsp->scsitaskmgmt.notify);
734	wait_event(notifyevent, notifyresult != 0xffff);
735	LOGINF("VdiskMgmt complete; result:%d\n", cmdrsp->vdiskmgmt.result);
736	kfree(cmdrsp);
737	return SUCCESS;
738}
739
740/*****************************************************/
741/* Scsi Host support functions                       */
742/*****************************************************/
743
744static int
745forward_taskmgmt_command(TASK_MGMT_TYPES tasktype, struct scsi_device *scsidev)
746{
747	struct uiscmdrsp *cmdrsp;
748	struct virthba_info *virthbainfo =
749	    (struct virthba_info *) scsidev->host->hostdata;
750	int notifyresult = 0xffff;
751	wait_queue_head_t notifyevent;
752
753	LOGINF("TaskMgmt:%d %d:%d:%d\n", tasktype,
754	       scsidev->channel, scsidev->id, scsidev->lun);
755
756	if (virthbainfo->serverdown || virthbainfo->serverchangingstate) {
757		DBGINF("Server is down/changing state. Returning Failure.\n");
758		return FAILED;
759	}
760
761	cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
762	if (cmdrsp == NULL) {
763		LOGERR("kmalloc of cmdrsp failed.\n");
764		return FAILED;	/* reject */
765	}
766
767	init_waitqueue_head(&notifyevent);
768
769	/* issue TASK_MGMT_ABORT_TASK */
770	/* set type to command - as opposed to task mgmt */
771	cmdrsp->cmdtype = CMD_SCSITASKMGMT_TYPE;
772	/* specify the event that has to be triggered when this */
773	/* cmd is complete */
774	cmdrsp->scsitaskmgmt.notify = (void *) &notifyevent;
775	cmdrsp->scsitaskmgmt.notifyresult = (void *) &notifyresult;
776
777	/* save destination */
778	cmdrsp->scsitaskmgmt.tasktype = tasktype;
779	cmdrsp->scsitaskmgmt.vdest.channel = scsidev->channel;
780	cmdrsp->scsitaskmgmt.vdest.id = scsidev->id;
781	cmdrsp->scsitaskmgmt.vdest.lun = scsidev->lun;
782	cmdrsp->scsitaskmgmt.scsicmd =
783	    (void *) (uintptr_t)
784		add_scsipending_entry_with_wait(virthbainfo,
785						CMD_SCSITASKMGMT_TYPE,
786						(void *) cmdrsp);
787
788	uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
789					     cmdrsp, IOCHAN_TO_IOPART,
790					     &virthbainfo->chinfo.insertlock,
791					     DONT_ISSUE_INTERRUPT, (U64) NULL,
792					     OK_TO_WAIT, "vhba");
793	LOGINF("TaskMgmt waiting on event notifyevent=0x%p\n",
794	       cmdrsp->scsitaskmgmt.notify);
795	wait_event(notifyevent, notifyresult != 0xffff);
796	LOGINF("TaskMgmt complete; result:%d\n", cmdrsp->scsitaskmgmt.result);
797	kfree(cmdrsp);
798	return SUCCESS;
799}
800
801/* The abort handler returns SUCCESS if it has succeeded to make LLDD
802 * and all related hardware forget about the scmd.
803 */
804static int
805virthba_abort_handler(struct scsi_cmnd *scsicmd)
806{
807	/* issue TASK_MGMT_ABORT_TASK */
808	struct scsi_device *scsidev;
809	struct virtdisk_info *vdisk;
810
811	scsidev = scsicmd->device;
812	for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head;
813	     vdisk->next; vdisk = vdisk->next) {
814		if ((scsidev->channel == vdisk->channel)
815		    && (scsidev->id == vdisk->id)
816		    && (scsidev->lun == vdisk->lun)) {
817			if (atomic_read(&vdisk->error_count) <
818			    VIRTHBA_ERROR_COUNT) {
819				atomic_inc(&vdisk->error_count);
820				POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
821						 POSTCODE_SEVERITY_INFO);
822			} else
823				atomic_set(&vdisk->ios_threshold,
824					   IOS_ERROR_THRESHOLD);
825		}
826	}
827	return forward_taskmgmt_command(TASK_MGMT_ABORT_TASK, scsicmd->device);
828}
829
830static int
831virthba_bus_reset_handler(struct scsi_cmnd *scsicmd)
832{
833	/* issue TASK_MGMT_TARGET_RESET for each target on the bus */
834	struct scsi_device *scsidev;
835	struct virtdisk_info *vdisk;
836
837	scsidev = scsicmd->device;
838	for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head;
839	     vdisk->next; vdisk = vdisk->next) {
840		if ((scsidev->channel == vdisk->channel)
841		    && (scsidev->id == vdisk->id)
842		    && (scsidev->lun == vdisk->lun)) {
843			if (atomic_read(&vdisk->error_count) <
844			    VIRTHBA_ERROR_COUNT) {
845				atomic_inc(&vdisk->error_count);
846				POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
847						 POSTCODE_SEVERITY_INFO);
848			} else
849				atomic_set(&vdisk->ios_threshold,
850					   IOS_ERROR_THRESHOLD);
851		}
852	}
853	return forward_taskmgmt_command(TASK_MGMT_BUS_RESET, scsicmd->device);
854}
855
856static int
857virthba_device_reset_handler(struct scsi_cmnd *scsicmd)
858{
859	/* issue TASK_MGMT_LUN_RESET */
860	struct scsi_device *scsidev;
861	struct virtdisk_info *vdisk;
862
863	scsidev = scsicmd->device;
864	for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head;
865	     vdisk->next; vdisk = vdisk->next) {
866		if ((scsidev->channel == vdisk->channel)
867		    && (scsidev->id == vdisk->id)
868		    && (scsidev->lun == vdisk->lun)) {
869			if (atomic_read(&vdisk->error_count) <
870			    VIRTHBA_ERROR_COUNT) {
871				atomic_inc(&vdisk->error_count);
872				POSTCODE_LINUX_2(VHBA_COMMAND_HANDLER_PC,
873						 POSTCODE_SEVERITY_INFO);
874			} else
875				atomic_set(&vdisk->ios_threshold,
876					   IOS_ERROR_THRESHOLD);
877		}
878	}
879	return forward_taskmgmt_command(TASK_MGMT_LUN_RESET, scsicmd->device);
880}
881
882static int
883virthba_host_reset_handler(struct scsi_cmnd *scsicmd)
884{
885	/* issue TASK_MGMT_TARGET_RESET for each target on each bus for host */
886	LOGERR("virthba_host_reset_handler Not yet implemented\n");
887	return SUCCESS;
888}
889
890static char virthba_get_info_str[256];
891
892static const char *
893virthba_get_info(struct Scsi_Host *shp)
894{
895	/* Return version string */
896	sprintf(virthba_get_info_str, "virthba, version %s\n", VIRTHBA_VERSION);
897	return virthba_get_info_str;
898}
899
900static int
901virthba_ioctl(struct scsi_device *dev, int cmd, void __user *arg)
902{
903	DBGINF("In virthba_ioctl: ioctl: cmd=0x%x\n", cmd);
904	return -EINVAL;
905}
906
907/* This returns SCSI_MLQUEUE_DEVICE_BUSY if the signal queue to IOpart
908 * is full.
909 */
910static int
911virthba_queue_command_lck(struct scsi_cmnd *scsicmd,
912			  void (*virthba_cmnd_done)(struct scsi_cmnd *))
913{
914	struct scsi_device *scsidev = scsicmd->device;
915	int insert_location;
916	unsigned char op;
917	unsigned char *cdb = scsicmd->cmnd;
918	struct Scsi_Host *scsihost = scsidev->host;
919	struct uiscmdrsp *cmdrsp;
920	unsigned int i;
921	struct virthba_info *virthbainfo =
922	    (struct virthba_info *) scsihost->hostdata;
923	struct scatterlist *sg = NULL;
924	struct scatterlist *sgl = NULL;
925	int sg_failed = 0;
926
927	if (virthbainfo->serverdown || virthbainfo->serverchangingstate) {
928		DBGINF("Server is down/changing state. Returning SCSI_MLQUEUE_DEVICE_BUSY.\n");
929		return SCSI_MLQUEUE_DEVICE_BUSY;
930	}
931
932	cmdrsp = kzalloc(SIZEOF_CMDRSP, GFP_ATOMIC);
933	if (cmdrsp == NULL) {
934		LOGERR("kmalloc of cmdrsp failed.\n");
935		return 1;	/* reject the command */
936	}
937
938	/* now saving everything we need from scsi_cmd into cmdrsp
939	 * before we queue cmdrsp set type to command - as opposed to
940	 * task mgmt
941	 */
942	cmdrsp->cmdtype = CMD_SCSI_TYPE;
943	/* save the pending insertion location.  Deletion from pending
944	 * will return the scsicmd pointer for completion
945	 */
946	insert_location =
947	    add_scsipending_entry(virthbainfo, CMD_SCSI_TYPE, (void *) scsicmd);
948	if (insert_location != -1) {
949		cmdrsp->scsi.scsicmd = (void *) (uintptr_t) insert_location;
950	} else {
951		LOGERR("Queue is full. Returning busy.\n");
952		kfree(cmdrsp);
953		return SCSI_MLQUEUE_DEVICE_BUSY;
954	}
955	/* save done function that we have call when cmd is complete */
956	scsicmd->scsi_done = virthba_cmnd_done;
957	/* save destination */
958	cmdrsp->scsi.vdest.channel = scsidev->channel;
959	cmdrsp->scsi.vdest.id = scsidev->id;
960	cmdrsp->scsi.vdest.lun = scsidev->lun;
961	/* save datadir */
962	cmdrsp->scsi.data_dir = scsicmd->sc_data_direction;
963	memcpy(cmdrsp->scsi.cmnd, cdb, MAX_CMND_SIZE);
964
965	cmdrsp->scsi.bufflen = scsi_bufflen(scsicmd);
966
967	/* keep track of the max buffer length so far. */
968	if (cmdrsp->scsi.bufflen > MaxBuffLen)
969		MaxBuffLen = cmdrsp->scsi.bufflen;
970
971	if (scsi_sg_count(scsicmd) > MAX_PHYS_INFO) {
972		LOGERR("scsicmd use_sg:%d greater than MAX:%d\n",
973		       scsi_sg_count(scsicmd), MAX_PHYS_INFO);
974		del_scsipending_entry(virthbainfo, (uintptr_t) insert_location);
975		kfree(cmdrsp);
976		return 1;	/* reject the command */
977	}
978
979	/* This is what we USED to do when we assumed we were running */
980	/* uissd & virthba on the same Linux system. */
981	/* cmdrsp->scsi.buffer = scsicmd->request_buffer; */
982	/* The following code does NOT make that assumption. */
983	/* convert buffer to phys information */
984	if (scsi_sg_count(scsicmd) == 0) {
985		if (scsi_bufflen(scsicmd) > 0) {
986			LOGERR("**** FAILED No scatter list for bufflen > 0\n");
987			BUG_ON(scsi_sg_count(scsicmd) == 0);
988		}
989		DBGINF("No sg; buffer:0x%p bufflen:%d\n",
990		       scsi_sglist(scsicmd), scsi_bufflen(scsicmd));
991	} else {
992		/* buffer is scatterlist - copy it out */
993		sgl = scsi_sglist(scsicmd);
994
995		for_each_sg(sgl, sg, scsi_sg_count(scsicmd), i) {
996
997			cmdrsp->scsi.gpi_list[i].address = sg_phys(sg);
998			cmdrsp->scsi.gpi_list[i].length = sg->length;
999			if ((i != 0) && (sg->offset != 0))
1000				LOGINF("Offset on a sg_entry other than zero =<<%d>>.\n",
1001				     sg->offset);
1002		}
1003
1004		if (sg_failed) {
1005			LOGERR("Start sg_list dump (entries %d, bufflen %d)...\n",
1006			     scsi_sg_count(scsicmd), cmdrsp->scsi.bufflen);
1007			for_each_sg(sgl, sg, scsi_sg_count(scsicmd), i) {
1008				LOGERR("   Entry(%d): page->[0x%p], phys->[0x%Lx], off(%d), len(%d)\n",
1009				     i, sg_page(sg),
1010				     (unsigned long long) sg_phys(sg),
1011				     sg->offset, sg->length);
1012			}
1013			LOGERR("Done sg_list dump.\n");
1014			/* BUG(); ***** For now, let it fail in uissd
1015			 * if it is a problem, as it might just
1016			 * work
1017			 */
1018		}
1019
1020		cmdrsp->scsi.guest_phys_entries = scsi_sg_count(scsicmd);
1021	}
1022
1023	op = cdb[0];
1024	i = uisqueue_put_cmdrsp_with_lock_client(virthbainfo->chinfo.queueinfo,
1025						 cmdrsp, IOCHAN_TO_IOPART,
1026						 &virthbainfo->chinfo.
1027						 insertlock,
1028						 DONT_ISSUE_INTERRUPT,
1029						 (U64) NULL, DONT_WAIT, "vhba");
1030	if (i == 0) {
1031		/* queue must be full - and we said don't wait - return busy */
1032		LOGERR("uisqueue_put_cmdrsp_with_lock ****FAILED\n");
1033		kfree(cmdrsp);
1034		del_scsipending_entry(virthbainfo, (uintptr_t) insert_location);
1035		return SCSI_MLQUEUE_DEVICE_BUSY;
1036	}
1037
1038	/* we're done with cmdrsp space - data from it has been copied
1039	 * into channel - free it now.
1040	 */
1041	kfree(cmdrsp);
1042	return 0;		/* non-zero implies host/device is busy */
1043}
1044
1045static int
1046virthba_slave_alloc(struct scsi_device *scsidev)
1047{
1048	/* this called by the midlayer before scan for new devices -
1049	 * LLD can alloc any struc & do init if needed.
1050	 */
1051	struct virtdisk_info *vdisk;
1052	struct virtdisk_info *tmpvdisk;
1053	struct virthba_info *virthbainfo;
1054	struct Scsi_Host *scsihost = (struct Scsi_Host *) scsidev->host;
1055
1056	virthbainfo = (struct virthba_info *) scsihost->hostdata;
1057	if (!virthbainfo) {
1058		LOGERR("Could not find virthba_info for scsihost\n");
1059		return 0;	/* even though we errored, treat as success */
1060	}
1061	for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) {
1062		if (vdisk->next->valid &&
1063		    (vdisk->next->channel == scsidev->channel) &&
1064		    (vdisk->next->id == scsidev->id) &&
1065		    (vdisk->next->lun == scsidev->lun))
1066			return 0;
1067	}
1068	tmpvdisk = kzalloc(sizeof(struct virtdisk_info), GFP_ATOMIC);
1069	if (!tmpvdisk) {	/* error allocating */
1070		LOGERR("Could not allocate memory for disk\n");
1071		return 0;
1072	}
1073
1074	tmpvdisk->channel = scsidev->channel;
1075	tmpvdisk->id = scsidev->id;
1076	tmpvdisk->lun = scsidev->lun;
1077	tmpvdisk->valid = 1;
1078	vdisk->next = tmpvdisk;
1079	return 0;		/* success */
1080}
1081
1082static int
1083virthba_slave_configure(struct scsi_device *scsidev)
1084{
1085	return 0;		/* success */
1086}
1087
1088static void
1089virthba_slave_destroy(struct scsi_device *scsidev)
1090{
1091	/* midlevel calls this after device has been quiesced and
1092	 * before it is to be deleted.
1093	 */
1094	struct virtdisk_info *vdisk, *delvdisk;
1095	struct virthba_info *virthbainfo;
1096	struct Scsi_Host *scsihost = (struct Scsi_Host *) scsidev->host;
1097
1098	virthbainfo = (struct virthba_info *) scsihost->hostdata;
1099	if (!virthbainfo)
1100		LOGERR("Could not find virthba_info for scsihost\n");
1101	for (vdisk = &virthbainfo->head; vdisk->next; vdisk = vdisk->next) {
1102		if (vdisk->next->valid &&
1103		    (vdisk->next->channel == scsidev->channel) &&
1104		    (vdisk->next->id == scsidev->id) &&
1105		    (vdisk->next->lun == scsidev->lun)) {
1106			delvdisk = vdisk->next;
1107			vdisk->next = vdisk->next->next;
1108			kfree(delvdisk);
1109			return;
1110		}
1111	}
1112	return;
1113}
1114
1115/*****************************************************/
1116/* Scsi Cmnd support thread                          */
1117/*****************************************************/
1118
1119static void
1120do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
1121{
1122	struct virtdisk_info *vdisk;
1123	struct scsi_device *scsidev;
1124	struct sense_data *sd;
1125
1126	scsidev = scsicmd->device;
1127	memcpy(scsicmd->sense_buffer, cmdrsp->scsi.sensebuf, MAX_SENSE_SIZE);
1128	sd = (struct sense_data *) scsicmd->sense_buffer;
1129
1130	/* Do not log errors for disk-not-present inquiries */
1131	if ((cmdrsp->scsi.cmnd[0] == INQUIRY) &&
1132	    (host_byte(cmdrsp->scsi.linuxstat) == DID_NO_CONNECT) &&
1133	    (cmdrsp->scsi.addlstat == ADDL_SEL_TIMEOUT))
1134		return;
1135
1136	/* Okay see what our error_count is here.... */
1137	for (vdisk = &((struct virthba_info *) scsidev->host->hostdata)->head;
1138	     vdisk->next; vdisk = vdisk->next) {
1139		if ((scsidev->channel != vdisk->channel)
1140		    || (scsidev->id != vdisk->id)
1141		    || (scsidev->lun != vdisk->lun))
1142			continue;
1143
1144		if (atomic_read(&vdisk->error_count) < VIRTHBA_ERROR_COUNT) {
1145			atomic_inc(&vdisk->error_count);
1146			LOGERR("SCSICMD ****FAILED scsicmd:0x%p op:0x%x <%d:%d:%d:%d> 0x%x-0x%x-0x%x-0x%x-0x%x.\n",
1147			       scsicmd, cmdrsp->scsi.cmnd[0],
1148			       scsidev->host->host_no, scsidev->id,
1149			       scsidev->channel, scsidev->lun,
1150			       cmdrsp->scsi.linuxstat, sd->Valid, sd->SenseKey,
1151			       sd->AdditionalSenseCode,
1152			       sd->AdditionalSenseCodeQualifier);
1153			if (atomic_read(&vdisk->error_count) ==
1154			    VIRTHBA_ERROR_COUNT) {
1155				LOGERR("Throtling SCSICMD errors disk <%d:%d:%d:%d>\n",
1156				     scsidev->host->host_no, scsidev->id,
1157				     scsidev->channel, scsidev->lun);
1158			}
1159			atomic_set(&vdisk->ios_threshold, IOS_ERROR_THRESHOLD);
1160		}
1161	}
1162}
1163
1164static void
1165do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
1166{
1167	struct scsi_device *scsidev;
1168	unsigned char buf[36];
1169	struct scatterlist *sg;
1170	unsigned int i;
1171	char *thispage;
1172	char *thispage_orig;
1173	int bufind = 0;
1174	struct virtdisk_info *vdisk;
1175
1176	scsidev = scsicmd->device;
1177	if ((cmdrsp->scsi.cmnd[0] == INQUIRY)
1178	    && (cmdrsp->scsi.bufflen >= MIN_INQUIRY_RESULT_LEN)) {
1179		if (cmdrsp->scsi.no_disk_result == 0)
1180			return;
1181
1182		/* Linux scsi code is weird; it wants
1183		 * a device at Lun 0 to issue report
1184		 * luns, but we don't want a disk
1185		 * there so we'll present a processor
1186		 * there. */
1187		SET_NO_DISK_INQUIRY_RESULT(buf, cmdrsp->scsi.bufflen,
1188					   scsidev->lun,
1189					   DEV_DISK_CAPABLE_NOT_PRESENT,
1190					   DEV_NOT_CAPABLE);
1191
1192		if (scsi_sg_count(scsicmd) == 0) {
1193			if (scsi_bufflen(scsicmd) > 0) {
1194				LOGERR("**** FAILED No scatter list for bufflen > 0\n");
1195				BUG_ON(scsi_sg_count(scsicmd) ==
1196				       0);
1197			}
1198			memcpy(scsi_sglist(scsicmd), buf,
1199			       cmdrsp->scsi.bufflen);
1200			return;
1201		}
1202
1203		sg = scsi_sglist(scsicmd);
1204		for (i = 0; i < scsi_sg_count(scsicmd); i++) {
1205			DBGVER("copying OUT OF buf into 0x%p %d\n",
1206			     sg_page(sg + i), sg[i].length);
1207			thispage_orig = kmap_atomic(sg_page(sg + i));
1208			thispage = (void *) ((unsigned long)thispage_orig |
1209					     sg[i].offset);
1210			memcpy(thispage, buf + bufind, sg[i].length);
1211			kunmap_atomic(thispage_orig);
1212			bufind += sg[i].length;
1213		}
1214	} else {
1215
1216		vdisk = &((struct virthba_info *)scsidev->host->hostdata)->head;
1217		for ( ; vdisk->next; vdisk = vdisk->next) {
1218			if ((scsidev->channel != vdisk->channel)
1219			    || (scsidev->id != vdisk->id)
1220			    || (scsidev->lun != vdisk->lun))
1221				continue;
1222
1223			if (atomic_read(&vdisk->ios_threshold) > 0) {
1224				atomic_dec(&vdisk->ios_threshold);
1225				if (atomic_read(&vdisk->ios_threshold) == 0) {
1226					LOGERR("Resetting error count for disk\n");
1227					atomic_set(&vdisk->error_count, 0);
1228				}
1229			}
1230		}
1231	}
1232}
1233
1234static void
1235complete_scsi_command(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
1236{
1237	DBGINF("cmdrsp: 0x%p, scsistat:0x%x.\n", cmdrsp, cmdrsp->scsi.scsistat);
1238
1239	/* take what we need out of cmdrsp and complete the scsicmd */
1240	scsicmd->result = cmdrsp->scsi.linuxstat;
1241	if (cmdrsp->scsi.linuxstat)
1242		do_scsi_linuxstat(cmdrsp, scsicmd);
1243	else
1244		do_scsi_nolinuxstat(cmdrsp, scsicmd);
1245
1246	if (scsicmd->scsi_done) {
1247		DBGVER("Scsi_DONE\n");
1248		scsicmd->scsi_done(scsicmd);
1249	}
1250}
1251
1252static inline void
1253complete_vdiskmgmt_command(struct uiscmdrsp *cmdrsp)
1254{
1255	/* copy the result of the taskmgmt and */
1256	/* wake up the error handler that is waiting for this */
1257	*(int *) cmdrsp->vdiskmgmt.notifyresult = cmdrsp->vdiskmgmt.result;
1258	wake_up_all((wait_queue_head_t *) cmdrsp->vdiskmgmt.notify);
1259	LOGINF("set notify result to %d\n", cmdrsp->vdiskmgmt.result);
1260}
1261
1262static inline void
1263complete_taskmgmt_command(struct uiscmdrsp *cmdrsp)
1264{
1265	/* copy the result of the taskmgmt and */
1266	/* wake up the error handler that is waiting for this */
1267	*(int *) cmdrsp->scsitaskmgmt.notifyresult =
1268	    cmdrsp->scsitaskmgmt.result;
1269	wake_up_all((wait_queue_head_t *) cmdrsp->scsitaskmgmt.notify);
1270	LOGINF("set notify result to %d\n", cmdrsp->scsitaskmgmt.result);
1271}
1272
1273static void
1274drain_queue(struct virthba_info *virthbainfo, struct chaninfo *dc,
1275		struct uiscmdrsp *cmdrsp)
1276{
1277	unsigned long flags;
1278	int qrslt = 0;
1279	struct scsi_cmnd *scsicmd;
1280	struct Scsi_Host *shost = virthbainfo->scsihost;
1281
1282	while (1) {
1283		spin_lock_irqsave(&virthbainfo->chinfo.insertlock, flags);
1284		if (!ULTRA_CHANNEL_CLIENT_ACQUIRE_OS(dc->queueinfo->chan,
1285						     "vhba", NULL)) {
1286			spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock,
1287					       flags);
1288			virthbainfo->acquire_failed_cnt++;
1289			break;
1290		}
1291		qrslt = uisqueue_get_cmdrsp(dc->queueinfo, cmdrsp,
1292					    IOCHAN_FROM_IOPART);
1293		ULTRA_CHANNEL_CLIENT_RELEASE_OS(dc->queueinfo->chan,
1294						"vhba", NULL);
1295		spin_unlock_irqrestore(&virthbainfo->chinfo.insertlock, flags);
1296		if (qrslt == 0)
1297			break;
1298		if (cmdrsp->cmdtype == CMD_SCSI_TYPE) {
1299			/* scsicmd location is returned by the
1300			 * deletion
1301			 */
1302			scsicmd = del_scsipending_entry(virthbainfo,
1303					(uintptr_t) cmdrsp->scsi.scsicmd);
1304			if (!scsicmd)
1305				break;
1306			/* complete the orig cmd */
1307			complete_scsi_command(cmdrsp, scsicmd);
1308		} else if (cmdrsp->cmdtype == CMD_SCSITASKMGMT_TYPE) {
1309			if (!del_scsipending_entry(virthbainfo,
1310				   (uintptr_t) cmdrsp->scsitaskmgmt.scsicmd))
1311				break;
1312			complete_taskmgmt_command(cmdrsp);
1313		} else if (cmdrsp->cmdtype == CMD_NOTIFYGUEST_TYPE) {
1314			/* The vHba pointer has no meaning in
1315			 * a Client/Guest Partition. Let's be
1316			 * safe and set it to NULL now.  Do
1317			 * not use it here! */
1318			cmdrsp->disknotify.vHba = NULL;
1319			process_disk_notify(shost, cmdrsp);
1320		} else if (cmdrsp->cmdtype == CMD_VDISKMGMT_TYPE) {
1321			if (!del_scsipending_entry(virthbainfo,
1322				   (uintptr_t) cmdrsp->vdiskmgmt.scsicmd))
1323				break;
1324			complete_vdiskmgmt_command(cmdrsp);
1325		} else
1326			LOGERR("Invalid cmdtype %d\n", cmdrsp->cmdtype);
1327		/* cmdrsp is now available for reuse */
1328	}
1329}
1330
1331
1332/* main function for the thread that waits for scsi commands to arrive
1333 * in a specified queue
1334 */
1335static int
1336process_incoming_rsps(void *v)
1337{
1338	struct virthba_info *virthbainfo = v;
1339	struct chaninfo *dc = &virthbainfo->chinfo;
1340	struct uiscmdrsp *cmdrsp = NULL;
1341	const int SZ = sizeof(struct uiscmdrsp);
1342	U64 mask;
1343	unsigned long long rc1;
1344
1345	UIS_DAEMONIZE("vhba_incoming");
1346	/* alloc once and reuse */
1347	cmdrsp = kmalloc(SZ, GFP_ATOMIC);
1348	if (cmdrsp == NULL) {
1349		LOGERR("process_incoming_rsps ****FAILED to malloc - thread exiting\n");
1350		complete_and_exit(&dc->threadinfo.has_stopped, 0);
1351		return 0;
1352	}
1353	mask = ULTRA_CHANNEL_ENABLE_INTS;
1354	while (1) {
1355		wait_event_interruptible_timeout(virthbainfo->rsp_queue,
1356			 (atomic_read(&virthbainfo->interrupt_rcvd) == 1),
1357					 usecs_to_jiffies(rsltq_wait_usecs));
1358		atomic_set(&virthbainfo->interrupt_rcvd, 0);
1359		/* drain queue */
1360		drain_queue(virthbainfo, dc, cmdrsp);
1361		rc1 = uisqueue_InterlockedOr(virthbainfo->flags_addr, mask);
1362		if (dc->threadinfo.should_stop)
1363			break;
1364	}
1365
1366	kfree(cmdrsp);
1367
1368	DBGINF("exiting processing incoming rsps.\n");
1369	complete_and_exit(&dc->threadinfo.has_stopped, 0);
1370}
1371
1372/*****************************************************/
1373/* proc filesystem functions						 */
1374/*****************************************************/
1375
1376static ssize_t
1377info_proc_read(struct file *file, char __user *buf, size_t len, loff_t *offset)
1378{
1379	int length = 0;
1380	U64 phys_flags_addr;
1381	int i;
1382	struct virthba_info *virthbainfo;
1383	char *vbuf;
1384	loff_t pos = *offset;
1385
1386	if (pos < 0)
1387		return -EINVAL;
1388
1389	if (pos > 0 || !len)
1390		return 0;
1391
1392	vbuf = kzalloc(len, GFP_KERNEL);
1393	if (!vbuf)
1394		return -ENOMEM;
1395
1396	for (i = 0; i < VIRTHBASOPENMAX; i++) {
1397		if (VirtHbasOpen[i].virthbainfo == NULL)
1398			continue;
1399
1400		virthbainfo = VirtHbasOpen[i].virthbainfo;
1401		length += sprintf(vbuf + length, "CHANSOCK is not defined.\n");
1402
1403		length += sprintf(vbuf + length, "MaxBuffLen:%d\n", MaxBuffLen);
1404
1405		length += sprintf(vbuf + length, "\nvirthba result queue poll wait:%d usecs.\n",
1406				  rsltq_wait_usecs);
1407
1408		length += sprintf(vbuf + length,
1409				  "\nModule build: Date:%s Time:%s\n",
1410				  __DATE__, __TIME__);
1411		length += sprintf(vbuf + length, "\ninterrupts_rcvd = %llu, interrupts_disabled = %llu\n",
1412				  virthbainfo->interrupts_rcvd,
1413				  virthbainfo->interrupts_disabled);
1414		length += sprintf(vbuf + length, "\ninterrupts_notme = %llu,\n",
1415				  virthbainfo->interrupts_notme);
1416		phys_flags_addr = virt_to_phys((__force  void *)
1417					       virthbainfo->flags_addr);
1418		length += sprintf(vbuf + length, "flags_addr = %p, phys_flags_addr=0x%016llx, FeatureFlags=%llu\n",
1419			  virthbainfo->flags_addr, phys_flags_addr,
1420				  (__le64)readq(virthbainfo->flags_addr));
1421		length += sprintf(vbuf + length, "acquire_failed_cnt:%llu\n",
1422				  virthbainfo->acquire_failed_cnt);
1423		length += sprintf(vbuf + length, "\n");
1424	}
1425	if (copy_to_user(buf, vbuf, length)) {
1426		kfree(vbuf);
1427		return -EFAULT;
1428	}
1429
1430	kfree(vbuf);
1431	*offset += length;
1432	return length;
1433}
1434
1435static ssize_t
1436enable_ints_read(struct file *file, char __user *buffer,
1437		  size_t count, loff_t *ppos)
1438{
1439	return 0;
1440}
1441
1442static ssize_t
1443enable_ints_write(struct file *file, const char __user *buffer,
1444		  size_t count, loff_t *ppos)
1445{
1446	char buf[4];
1447	int i, new_value;
1448	struct virthba_info *virthbainfo;
1449	U64 __iomem *Features_addr;
1450	U64 mask;
1451
1452	if (count >= ARRAY_SIZE(buf))
1453		return -EINVAL;
1454
1455	buf[count] = '\0';
1456	if (copy_from_user(buf, buffer, count)) {
1457		LOGERR("copy_from_user failed. buf<<%.*s>> count<<%lu>>\n",
1458		       (int) count, buf, count);
1459		return -EFAULT;
1460	}
1461
1462	i = sscanf(buf, "%d", &new_value);
1463
1464	if (i < 1) {
1465		LOGERR("Failed to scan value for enable_ints, buf<<%.*s>>",
1466		       (int) count, buf);
1467		return -EFAULT;
1468	}
1469
1470	/* set all counts to new_value usually 0 */
1471	for (i = 0; i < VIRTHBASOPENMAX; i++) {
1472		if (VirtHbasOpen[i].virthbainfo != NULL) {
1473			virthbainfo = VirtHbasOpen[i].virthbainfo;
1474			Features_addr =
1475				&virthbainfo->chinfo.queueinfo->chan->Features;
1476			if (new_value == 1) {
1477				mask = ~(ULTRA_IO_CHANNEL_IS_POLLING |
1478					 ULTRA_IO_DRIVER_DISABLES_INTS);
1479				uisqueue_InterlockedAnd(Features_addr, mask);
1480				mask = ULTRA_IO_DRIVER_ENABLES_INTS;
1481				uisqueue_InterlockedOr(Features_addr, mask);
1482				rsltq_wait_usecs = 4000000;
1483			} else {
1484				mask = ~(ULTRA_IO_DRIVER_ENABLES_INTS |
1485					 ULTRA_IO_DRIVER_DISABLES_INTS);
1486				uisqueue_InterlockedAnd(Features_addr, mask);
1487				mask = ULTRA_IO_CHANNEL_IS_POLLING;
1488				uisqueue_InterlockedOr(Features_addr, mask);
1489				rsltq_wait_usecs = 4000;
1490			}
1491		}
1492	}
1493	return count;
1494}
1495
1496static ssize_t
1497rqwu_proc_write(struct file *file, const char __user *buffer,
1498		size_t count, loff_t *ppos)
1499{
1500	char buf[16];
1501	int i, usecs;
1502
1503	if (count >= ARRAY_SIZE(buf))
1504		return -EINVAL;
1505
1506	if (copy_from_user(buf, buffer, count)) {
1507		LOGERR("copy_from_user failed. buf<<%.*s>> count<<%lu>>\n",
1508		       (int) count, buf, count);
1509		return -EFAULT;
1510	}
1511
1512	i = sscanf(buf, "%d", &usecs);
1513
1514	if (i < 1) {
1515		LOGERR("Failed to scan value for rqwait_usecs buf<<%.*s>>",
1516		       (int) count, buf);
1517		return -EFAULT;
1518	}
1519
1520	/* set global wait time */
1521	rsltq_wait_usecs = usecs;
1522	return count;
1523}
1524
1525/* As per VirtpciFunc returns 1 for success and 0 for failure */
1526static int
1527virthba_serverup(struct virtpci_dev *virtpcidev)
1528{
1529	struct virthba_info *virthbainfo =
1530	    (struct virthba_info *) ((struct Scsi_Host *) virtpcidev->scsi.
1531				     scsihost)->hostdata;
1532
1533	DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
1534	       virtpcidev->deviceNo);
1535
1536	if (!virthbainfo->serverdown) {
1537		DBGINF("Server up message recieved while server is already up.\n");
1538		return 1;
1539	}
1540	if (virthbainfo->serverchangingstate) {
1541		LOGERR("Server already processing change state message\n");
1542		return 0;
1543	}
1544
1545	virthbainfo->serverchangingstate = true;
1546	/* Must transition channel to ATTACHED state BEFORE we
1547	 * can start using the device again
1548	 */
1549	ULTRA_CHANNEL_CLIENT_TRANSITION(virthbainfo->chinfo.queueinfo->chan,
1550					dev_name(&virtpcidev->generic_dev),
1551					CHANNELCLI_ATTACHED, NULL);
1552
1553	/* Start Processing the IOVM Response Queue Again */
1554	if (!uisthread_start(&virthbainfo->chinfo.threadinfo,
1555			     process_incoming_rsps,
1556			     virthbainfo, "vhba_incoming")) {
1557		LOGERR("uisthread_start rsp ****FAILED\n");
1558		return 0;
1559	}
1560	virthbainfo->serverdown = false;
1561	virthbainfo->serverchangingstate = false;
1562
1563	return 1;
1564}
1565
1566static void
1567virthba_serverdown_complete(struct work_struct *work)
1568{
1569	struct virthba_info *virthbainfo;
1570	struct virtpci_dev *virtpcidev;
1571	int i;
1572	struct scsipending *pendingdel = NULL;
1573	struct scsi_cmnd *scsicmd = NULL;
1574	struct uiscmdrsp *cmdrsp;
1575	unsigned long flags;
1576
1577	virthbainfo = container_of(work, struct virthba_info,
1578				   serverdown_completion);
1579
1580	/* Stop Using the IOVM Response Queue (queue should be drained
1581	 * by the end)
1582	 */
1583	uisthread_stop(&virthbainfo->chinfo.threadinfo);
1584
1585	/* Fail Commands that weren't completed */
1586	spin_lock_irqsave(&virthbainfo->privlock, flags);
1587	for (i = 0; i < MAX_PENDING_REQUESTS; i++) {
1588		pendingdel = &(virthbainfo->pending[i]);
1589		switch (pendingdel->cmdtype) {
1590		case CMD_SCSI_TYPE:
1591			scsicmd = (struct scsi_cmnd *) pendingdel->sent;
1592			scsicmd->result = (DID_RESET << 16);
1593			if (scsicmd->scsi_done)
1594				scsicmd->scsi_done(scsicmd);
1595			break;
1596		case CMD_SCSITASKMGMT_TYPE:
1597			cmdrsp = (struct uiscmdrsp *) pendingdel->sent;
1598			DBGINF("cmdrsp=0x%x, notify=0x%x\n", cmdrsp,
1599			       cmdrsp->scsitaskmgmt.notify);
1600			*(int *) cmdrsp->scsitaskmgmt.notifyresult =
1601			    TASK_MGMT_FAILED;
1602			wake_up_all((wait_queue_head_t *)
1603				    cmdrsp->scsitaskmgmt.notify);
1604			break;
1605		case CMD_VDISKMGMT_TYPE:
1606			cmdrsp = (struct uiscmdrsp *) pendingdel->sent;
1607			*(int *) cmdrsp->vdiskmgmt.notifyresult =
1608			    VDISK_MGMT_FAILED;
1609			wake_up_all((wait_queue_head_t *)
1610				    cmdrsp->vdiskmgmt.notify);
1611			break;
1612		default:
1613			if (pendingdel->sent != NULL)
1614				LOGERR("Unknown command type: 0x%x.  Only freeing list structure.\n",
1615				     pendingdel->cmdtype);
1616		}
1617		pendingdel->cmdtype = 0;
1618		pendingdel->sent = NULL;
1619	}
1620	spin_unlock_irqrestore(&virthbainfo->privlock, flags);
1621
1622	virtpcidev = virthbainfo->virtpcidev;
1623
1624	DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
1625	       virtpcidev->deviceNo);
1626	virthbainfo->serverdown = true;
1627	virthbainfo->serverchangingstate = false;
1628	/* Return the ServerDown response to Command */
1629	visorchipset_device_pause_response(virtpcidev->busNo,
1630					   virtpcidev->deviceNo, 0);
1631}
1632
1633/* As per VirtpciFunc returns 1 for success and 0 for failure */
1634static int
1635virthba_serverdown(struct virtpci_dev *virtpcidev, u32 state)
1636{
1637	struct virthba_info *virthbainfo =
1638	    (struct virthba_info *) ((struct Scsi_Host *) virtpcidev->scsi.
1639				     scsihost)->hostdata;
1640
1641	DBGINF("virthba_serverdown");
1642	DBGINF("virtpcidev busNo<<%d>>devNo<<%d>>", virtpcidev->busNo,
1643	       virtpcidev->deviceNo);
1644
1645	if (!virthbainfo->serverdown && !virthbainfo->serverchangingstate) {
1646		virthbainfo->serverchangingstate = true;
1647		queue_work(virthba_serverdown_workqueue,
1648			   &virthbainfo->serverdown_completion);
1649	} else if (virthbainfo->serverchangingstate) {
1650		LOGERR("Server already processing change state message\n");
1651		return 0;
1652	} else
1653		LOGERR("Server already down, but another server down message received.");
1654
1655	return 1;
1656}
1657
1658/*****************************************************/
1659/* Module Init & Exit functions                      */
1660/*****************************************************/
1661
1662static int __init
1663virthba_parse_line(char *str)
1664{
1665	DBGINF("In virthba_parse_line %s\n", str);
1666	return 1;
1667}
1668
1669static void __init
1670virthba_parse_options(char *line)
1671{
1672	char *next = line;
1673
1674	POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1675	if (line == NULL || !*line)
1676		return;
1677	while ((line = next) != NULL) {
1678		next = strchr(line, ' ');
1679		if (next != NULL)
1680			*next++ = 0;
1681		if (!virthba_parse_line(line))
1682			DBGINF("Unknown option '%s'\n", line);
1683	}
1684
1685	POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
1686}
1687
1688static int __init
1689virthba_mod_init(void)
1690{
1691	int error;
1692	int i;
1693
1694	LOGINF("Entering virthba_mod_init...\n");
1695
1696	POSTCODE_LINUX_2(VHBA_CREATE_ENTRY_PC, POSTCODE_SEVERITY_INFO);
1697	virthba_parse_options(virthba_options);
1698
1699	error = virtpci_register_driver(&virthba_driver);
1700	if (error < 0) {
1701		LOGERR("register ****FAILED 0x%x\n", error);
1702		POSTCODE_LINUX_3(VHBA_CREATE_FAILURE_PC, error,
1703				 POSTCODE_SEVERITY_ERR);
1704	} else {
1705		/* create the proc directories */
1706		virthba_proc_dir = proc_mkdir(DIR_PROC_ENTRY, NULL);
1707		info_proc_entry = proc_create(INFO_PROC_ENTRY_FN, 0,
1708					      virthba_proc_dir,
1709					      &proc_info_fops);
1710		rqwaitus_proc_entry = proc_create(RQWU_PROC_ENTRY_FN, 0,
1711						  virthba_proc_dir,
1712						  &proc_rqwu_fops);
1713		enable_ints_proc_entry = proc_create(ENABLE_INTS_ENTRY_FN, 0,
1714						     virthba_proc_dir,
1715						     &proc_enable_ints_fops);
1716
1717		/* Initialize DARWorkQ */
1718		INIT_WORK(&DARWorkQ, doDiskAddRemove);
1719		spin_lock_init(&DARWorkQLock);
1720
1721		/* clear out array */
1722		for (i = 0; i < VIRTHBASOPENMAX; i++)
1723			VirtHbasOpen[i].virthbainfo = NULL;
1724		/* Initialize the serverdown workqueue */
1725		virthba_serverdown_workqueue =
1726		    create_singlethread_workqueue("virthba_serverdown");
1727		if (virthba_serverdown_workqueue == NULL) {
1728			LOGERR("**** FAILED virthba_serverdown_workqueue creation\n");
1729			POSTCODE_LINUX_2(VHBA_CREATE_FAILURE_PC,
1730					 POSTCODE_SEVERITY_ERR);
1731			error = -1;
1732		}
1733	}
1734
1735	POSTCODE_LINUX_2(VHBA_CREATE_EXIT_PC, POSTCODE_SEVERITY_INFO);
1736	LOGINF("Leaving virthba_mod_init\n");
1737	return error;
1738}
1739
1740static ssize_t
1741virthba_acquire_lun(struct device *cdev, struct device_attribute *attr,
1742		    const char *buf, size_t count)
1743{
1744	struct uisscsi_dest vdest;
1745	struct Scsi_Host *shost = class_to_shost(cdev);
1746	int i;
1747
1748	i = sscanf(buf, "%d-%d-%d", &vdest.channel, &vdest.id, &vdest.lun);
1749	if (i != 3)
1750		return i;
1751
1752	return forward_vdiskmgmt_command(VDISK_MGMT_ACQUIRE, shost, &vdest);
1753}
1754
1755static ssize_t
1756virthba_release_lun(struct device *cdev, struct device_attribute *attr,
1757		    const char *buf, size_t count)
1758{
1759	struct uisscsi_dest vdest;
1760	struct Scsi_Host *shost = class_to_shost(cdev);
1761	int i;
1762
1763	i = sscanf(buf, "%d-%d-%d", &vdest.channel, &vdest.id, &vdest.lun);
1764	if (i != 3)
1765		return i;
1766
1767	return forward_vdiskmgmt_command(VDISK_MGMT_RELEASE, shost, &vdest);
1768}
1769
1770#define CLASS_DEVICE_ATTR(_name, _mode, _show, _store)      \
1771	struct device_attribute class_device_attr_##_name =   \
1772		__ATTR(_name, _mode, _show, _store)
1773
1774static CLASS_DEVICE_ATTR(acquire_lun, S_IWUSR, NULL, virthba_acquire_lun);
1775static CLASS_DEVICE_ATTR(release_lun, S_IWUSR, NULL, virthba_release_lun);
1776
1777static DEVICE_ATTRIBUTE *virthba_shost_attrs[] = {
1778	&class_device_attr_acquire_lun,
1779	&class_device_attr_release_lun,
1780	NULL
1781};
1782
1783static void __exit
1784virthba_mod_exit(void)
1785{
1786	LOGINF("entering virthba_mod_exit...\n");
1787
1788	virtpci_unregister_driver(&virthba_driver);
1789	/* unregister is going to call virthba_remove */
1790	/* destroy serverdown completion workqueue */
1791	if (virthba_serverdown_workqueue) {
1792		destroy_workqueue(virthba_serverdown_workqueue);
1793		virthba_serverdown_workqueue = NULL;
1794	}
1795
1796	if (info_proc_entry)
1797		remove_proc_entry(INFO_PROC_ENTRY_FN, virthba_proc_dir);
1798
1799	if (rqwaitus_proc_entry)
1800		remove_proc_entry(RQWU_PROC_ENTRY_FN, NULL);
1801
1802	if (enable_ints_proc_entry)
1803		remove_proc_entry(ENABLE_INTS_ENTRY_FN, NULL);
1804
1805	if (virthba_proc_dir)
1806		remove_proc_entry(DIR_PROC_ENTRY, NULL);
1807
1808	LOGINF("Leaving virthba_mod_exit\n");
1809
1810}
1811
1812/* specify function to be run at module insertion time */
1813module_init(virthba_mod_init);
1814
1815/* specify function to be run when module is removed */
1816module_exit(virthba_mod_exit);
1817
1818MODULE_LICENSE("GPL");
1819MODULE_AUTHOR("Usha Srinivasan");
1820MODULE_ALIAS("uisvirthba");
1821	/* this is extracted during depmod and kept in modules.dep */
1822/* module parameter */
1823module_param(virthba_options, charp, S_IRUGO);
1824