commsup.c revision 8418852d11f0bbaeebeedd4243560d8fdc85410d
1/*
2 *	Adaptec AAC series RAID controller driver
3 *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING.  If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 *  commsup.c
26 *
27 * Abstract: Contain all routines that are required for FSA host/adapter
28 *    communication.
29 *
30 */
31
32#include <linux/kernel.h>
33#include <linux/init.h>
34#include <linux/types.h>
35#include <linux/sched.h>
36#include <linux/pci.h>
37#include <linux/spinlock.h>
38#include <linux/slab.h>
39#include <linux/completion.h>
40#include <linux/blkdev.h>
41#include <linux/delay.h>
42#include <linux/kthread.h>
43#include <linux/interrupt.h>
44#include <scsi/scsi.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_device.h>
47#include <scsi/scsi_cmnd.h>
48#include <asm/semaphore.h>
49
50#include "aacraid.h"
51
52/**
53 *	fib_map_alloc		-	allocate the fib objects
54 *	@dev: Adapter to allocate for
55 *
56 *	Allocate and map the shared PCI space for the FIB blocks used to
57 *	talk to the Adaptec firmware.
58 */
59
60static int fib_map_alloc(struct aac_dev *dev)
61{
62	dprintk((KERN_INFO
63	  "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
64	  dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
65	  AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
66	if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size
67	  * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
68	  &dev->hw_fib_pa))==NULL)
69		return -ENOMEM;
70	return 0;
71}
72
73/**
74 *	aac_fib_map_free		-	free the fib objects
75 *	@dev: Adapter to free
76 *
77 *	Free the PCI mappings and the memory allocated for FIB blocks
78 *	on this adapter.
79 */
80
81void aac_fib_map_free(struct aac_dev *dev)
82{
83	pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa);
84}
85
86/**
87 *	aac_fib_setup	-	setup the fibs
88 *	@dev: Adapter to set up
89 *
90 *	Allocate the PCI space for the fibs, map it and then intialise the
91 *	fib area, the unmapped fib data and also the free list
92 */
93
94int aac_fib_setup(struct aac_dev * dev)
95{
96	struct fib *fibptr;
97	struct hw_fib *hw_fib_va;
98	dma_addr_t hw_fib_pa;
99	int i;
100
101	while (((i = fib_map_alloc(dev)) == -ENOMEM)
102	 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
103		dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
104		dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
105	}
106	if (i<0)
107		return -ENOMEM;
108
109	hw_fib_va = dev->hw_fib_va;
110	hw_fib_pa = dev->hw_fib_pa;
111	memset(hw_fib_va, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
112	/*
113	 *	Initialise the fibs
114	 */
115	for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++)
116	{
117		fibptr->dev = dev;
118		fibptr->hw_fib = hw_fib_va;
119		fibptr->data = (void *) fibptr->hw_fib->data;
120		fibptr->next = fibptr+1;	/* Forward chain the fibs */
121		init_MUTEX_LOCKED(&fibptr->event_wait);
122		spin_lock_init(&fibptr->event_lock);
123		hw_fib_va->header.XferState = cpu_to_le32(0xffffffff);
124		hw_fib_va->header.SenderSize = cpu_to_le16(dev->max_fib_size);
125		fibptr->hw_fib_pa = hw_fib_pa;
126		hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + dev->max_fib_size);
127		hw_fib_pa = hw_fib_pa + dev->max_fib_size;
128	}
129	/*
130	 *	Add the fib chain to the free list
131	 */
132	dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
133	/*
134	 *	Enable this to debug out of queue space
135	 */
136	dev->free_fib = &dev->fibs[0];
137	return 0;
138}
139
140/**
141 *	aac_fib_alloc	-	allocate a fib
142 *	@dev: Adapter to allocate the fib for
143 *
144 *	Allocate a fib from the adapter fib pool. If the pool is empty we
145 *	return NULL.
146 */
147
148struct fib *aac_fib_alloc(struct aac_dev *dev)
149{
150	struct fib * fibptr;
151	unsigned long flags;
152	spin_lock_irqsave(&dev->fib_lock, flags);
153	fibptr = dev->free_fib;
154	if(!fibptr){
155		spin_unlock_irqrestore(&dev->fib_lock, flags);
156		return fibptr;
157	}
158	dev->free_fib = fibptr->next;
159	spin_unlock_irqrestore(&dev->fib_lock, flags);
160	/*
161	 *	Set the proper node type code and node byte size
162	 */
163	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
164	fibptr->size = sizeof(struct fib);
165	/*
166	 *	Null out fields that depend on being zero at the start of
167	 *	each I/O
168	 */
169	fibptr->hw_fib->header.XferState = 0;
170	fibptr->callback = NULL;
171	fibptr->callback_data = NULL;
172
173	return fibptr;
174}
175
176/**
177 *	aac_fib_free	-	free a fib
178 *	@fibptr: fib to free up
179 *
180 *	Frees up a fib and places it on the appropriate queue
181 *	(either free or timed out)
182 */
183
184void aac_fib_free(struct fib *fibptr)
185{
186	unsigned long flags;
187
188	spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
189	if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
190		aac_config.fib_timeouts++;
191		fibptr->next = fibptr->dev->timeout_fib;
192		fibptr->dev->timeout_fib = fibptr;
193	} else {
194		if (fibptr->hw_fib->header.XferState != 0) {
195			printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
196				 (void*)fibptr,
197				 le32_to_cpu(fibptr->hw_fib->header.XferState));
198		}
199		fibptr->next = fibptr->dev->free_fib;
200		fibptr->dev->free_fib = fibptr;
201	}
202	spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
203}
204
205/**
206 *	aac_fib_init	-	initialise a fib
207 *	@fibptr: The fib to initialize
208 *
209 *	Set up the generic fib fields ready for use
210 */
211
212void aac_fib_init(struct fib *fibptr)
213{
214	struct hw_fib *hw_fib = fibptr->hw_fib;
215
216	hw_fib->header.StructType = FIB_MAGIC;
217	hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
218	hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
219	hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */
220	hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
221	hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
222}
223
224/**
225 *	fib_deallocate		-	deallocate a fib
226 *	@fibptr: fib to deallocate
227 *
228 *	Will deallocate and return to the free pool the FIB pointed to by the
229 *	caller.
230 */
231
232static void fib_dealloc(struct fib * fibptr)
233{
234	struct hw_fib *hw_fib = fibptr->hw_fib;
235	BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
236	hw_fib->header.XferState = 0;
237}
238
239/*
240 *	Commuication primitives define and support the queuing method we use to
241 *	support host to adapter commuication. All queue accesses happen through
242 *	these routines and are the only routines which have a knowledge of the
243 *	 how these queues are implemented.
244 */
245
246/**
247 *	aac_get_entry		-	get a queue entry
248 *	@dev: Adapter
249 *	@qid: Queue Number
250 *	@entry: Entry return
251 *	@index: Index return
252 *	@nonotify: notification control
253 *
254 *	With a priority the routine returns a queue entry if the queue has free entries. If the queue
255 *	is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
256 *	returned.
257 */
258
259static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
260{
261	struct aac_queue * q;
262	unsigned long idx;
263
264	/*
265	 *	All of the queues wrap when they reach the end, so we check
266	 *	to see if they have reached the end and if they have we just
267	 *	set the index back to zero. This is a wrap. You could or off
268	 *	the high bits in all updates but this is a bit faster I think.
269	 */
270
271	q = &dev->queues->queue[qid];
272
273	idx = *index = le32_to_cpu(*(q->headers.producer));
274	/* Interrupt Moderation, only interrupt for first two entries */
275	if (idx != le32_to_cpu(*(q->headers.consumer))) {
276		if (--idx == 0) {
277			if (qid == AdapNormCmdQueue)
278				idx = ADAP_NORM_CMD_ENTRIES;
279			else
280				idx = ADAP_NORM_RESP_ENTRIES;
281		}
282		if (idx != le32_to_cpu(*(q->headers.consumer)))
283			*nonotify = 1;
284	}
285
286	if (qid == AdapNormCmdQueue) {
287	        if (*index >= ADAP_NORM_CMD_ENTRIES)
288			*index = 0; /* Wrap to front of the Producer Queue. */
289	} else {
290		if (*index >= ADAP_NORM_RESP_ENTRIES)
291			*index = 0; /* Wrap to front of the Producer Queue. */
292	}
293
294        if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
295		printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
296				qid, q->numpending);
297		return 0;
298	} else {
299	        *entry = q->base + *index;
300		return 1;
301	}
302}
303
304/**
305 *	aac_queue_get		-	get the next free QE
306 *	@dev: Adapter
307 *	@index: Returned index
308 *	@priority: Priority of fib
309 *	@fib: Fib to associate with the queue entry
310 *	@wait: Wait if queue full
311 *	@fibptr: Driver fib object to go with fib
312 *	@nonotify: Don't notify the adapter
313 *
314 *	Gets the next free QE off the requested priorty adapter command
315 *	queue and associates the Fib with the QE. The QE represented by
316 *	index is ready to insert on the queue when this routine returns
317 *	success.
318 */
319
320int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
321{
322	struct aac_entry * entry = NULL;
323	int map = 0;
324
325	if (qid == AdapNormCmdQueue) {
326		/*  if no entries wait for some if caller wants to */
327        	while (!aac_get_entry(dev, qid, &entry, index, nonotify))
328        	{
329			printk(KERN_ERR "GetEntries failed\n");
330		}
331	        /*
332	         *	Setup queue entry with a command, status and fib mapped
333	         */
334	        entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
335	        map = 1;
336	} else {
337	        while(!aac_get_entry(dev, qid, &entry, index, nonotify))
338	        {
339			/* if no entries wait for some if caller wants to */
340		}
341        	/*
342        	 *	Setup queue entry with command, status and fib mapped
343        	 */
344        	entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
345        	entry->addr = hw_fib->header.SenderFibAddress;
346     			/* Restore adapters pointer to the FIB */
347		hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress;	/* Let the adapter now where to find its data */
348        	map = 0;
349	}
350	/*
351	 *	If MapFib is true than we need to map the Fib and put pointers
352	 *	in the queue entry.
353	 */
354	if (map)
355		entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
356	return 0;
357}
358
359/*
360 *	Define the highest level of host to adapter communication routines.
361 *	These routines will support host to adapter FS commuication. These
362 *	routines have no knowledge of the commuication method used. This level
363 *	sends and receives FIBs. This level has no knowledge of how these FIBs
364 *	get passed back and forth.
365 */
366
367/**
368 *	aac_fib_send	-	send a fib to the adapter
369 *	@command: Command to send
370 *	@fibptr: The fib
371 *	@size: Size of fib data area
372 *	@priority: Priority of Fib
373 *	@wait: Async/sync select
374 *	@reply: True if a reply is wanted
375 *	@callback: Called with reply
376 *	@callback_data: Passed to callback
377 *
378 *	Sends the requested FIB to the adapter and optionally will wait for a
379 *	response FIB. If the caller does not wish to wait for a response than
380 *	an event to wait on must be supplied. This event will be set when a
381 *	response FIB is received from the adapter.
382 */
383
384int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
385		int priority, int wait, int reply, fib_callback callback,
386		void *callback_data)
387{
388	struct aac_dev * dev = fibptr->dev;
389	struct hw_fib * hw_fib = fibptr->hw_fib;
390	unsigned long flags = 0;
391	unsigned long qflags;
392
393	if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
394		return -EBUSY;
395	/*
396	 *	There are 5 cases with the wait and reponse requested flags.
397	 *	The only invalid cases are if the caller requests to wait and
398	 *	does not request a response and if the caller does not want a
399	 *	response and the Fib is not allocated from pool. If a response
400	 *	is not requesed the Fib will just be deallocaed by the DPC
401	 *	routine when the response comes back from the adapter. No
402	 *	further processing will be done besides deleting the Fib. We
403	 *	will have a debug mode where the adapter can notify the host
404	 *	it had a problem and the host can log that fact.
405	 */
406	if (wait && !reply) {
407		return -EINVAL;
408	} else if (!wait && reply) {
409		hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
410		FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
411	} else if (!wait && !reply) {
412		hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
413		FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
414	} else if (wait && reply) {
415		hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
416		FIB_COUNTER_INCREMENT(aac_config.NormalSent);
417	}
418	/*
419	 *	Map the fib into 32bits by using the fib number
420	 */
421
422	hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
423	hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
424	/*
425	 *	Set FIB state to indicate where it came from and if we want a
426	 *	response from the adapter. Also load the command from the
427	 *	caller.
428	 *
429	 *	Map the hw fib pointer as a 32bit value
430	 */
431	hw_fib->header.Command = cpu_to_le16(command);
432	hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
433	fibptr->hw_fib->header.Flags = 0;	/* 0 the flags field - internal only*/
434	/*
435	 *	Set the size of the Fib we want to send to the adapter
436	 */
437	hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
438	if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
439		return -EMSGSIZE;
440	}
441	/*
442	 *	Get a queue entry connect the FIB to it and send an notify
443	 *	the adapter a command is ready.
444	 */
445	hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
446
447	/*
448	 *	Fill in the Callback and CallbackContext if we are not
449	 *	going to wait.
450	 */
451	if (!wait) {
452		fibptr->callback = callback;
453		fibptr->callback_data = callback_data;
454	}
455
456	fibptr->done = 0;
457	fibptr->flags = 0;
458
459	FIB_COUNTER_INCREMENT(aac_config.FibsSent);
460
461	dprintk((KERN_DEBUG "Fib contents:.\n"));
462	dprintk((KERN_DEBUG "  Command =               %d.\n", le32_to_cpu(hw_fib->header.Command)));
463	dprintk((KERN_DEBUG "  SubCommand =            %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
464	dprintk((KERN_DEBUG "  XferState  =            %x.\n", le32_to_cpu(hw_fib->header.XferState)));
465	dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib));
466	dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
467	dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
468
469	if (!dev->queues)
470		return -EBUSY;
471
472	if(wait)
473		spin_lock_irqsave(&fibptr->event_lock, flags);
474	aac_adapter_deliver(fibptr);
475
476	/*
477	 *	If the caller wanted us to wait for response wait now.
478	 */
479
480	if (wait) {
481		spin_unlock_irqrestore(&fibptr->event_lock, flags);
482		/* Only set for first known interruptable command */
483		if (wait < 0) {
484			/*
485			 * *VERY* Dangerous to time out a command, the
486			 * assumption is made that we have no hope of
487			 * functioning because an interrupt routing or other
488			 * hardware failure has occurred.
489			 */
490			unsigned long count = 36000000L; /* 3 minutes */
491			while (down_trylock(&fibptr->event_wait)) {
492				int blink;
493				if (--count == 0) {
494					struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue];
495					spin_lock_irqsave(q->lock, qflags);
496					q->numpending--;
497					spin_unlock_irqrestore(q->lock, qflags);
498					if (wait == -1) {
499	        				printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
500						  "Usually a result of a PCI interrupt routing problem;\n"
501						  "update mother board BIOS or consider utilizing one of\n"
502						  "the SAFE mode kernel options (acpi, apic etc)\n");
503					}
504					return -ETIMEDOUT;
505				}
506				if ((blink = aac_adapter_check_health(dev)) > 0) {
507					if (wait == -1) {
508	        				printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
509						  "Usually a result of a serious unrecoverable hardware problem\n",
510						  blink);
511					}
512					return -EFAULT;
513				}
514				udelay(5);
515			}
516		} else if (down_interruptible(&fibptr->event_wait)) {
517			spin_lock_irqsave(&fibptr->event_lock, flags);
518			if (fibptr->done == 0) {
519				fibptr->done = 2; /* Tell interrupt we aborted */
520				spin_unlock_irqrestore(&fibptr->event_lock, flags);
521				return -EINTR;
522			}
523			spin_unlock_irqrestore(&fibptr->event_lock, flags);
524		}
525		BUG_ON(fibptr->done == 0);
526
527		if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
528			return -ETIMEDOUT;
529		} else {
530			return 0;
531		}
532	}
533	/*
534	 *	If the user does not want a response than return success otherwise
535	 *	return pending
536	 */
537	if (reply)
538		return -EINPROGRESS;
539	else
540		return 0;
541}
542
543/**
544 *	aac_consumer_get	-	get the top of the queue
545 *	@dev: Adapter
546 *	@q: Queue
547 *	@entry: Return entry
548 *
549 *	Will return a pointer to the entry on the top of the queue requested that
550 * 	we are a consumer of, and return the address of the queue entry. It does
551 *	not change the state of the queue.
552 */
553
554int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
555{
556	u32 index;
557	int status;
558	if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
559		status = 0;
560	} else {
561		/*
562		 *	The consumer index must be wrapped if we have reached
563		 *	the end of the queue, else we just use the entry
564		 *	pointed to by the header index
565		 */
566		if (le32_to_cpu(*q->headers.consumer) >= q->entries)
567			index = 0;
568		else
569		        index = le32_to_cpu(*q->headers.consumer);
570		*entry = q->base + index;
571		status = 1;
572	}
573	return(status);
574}
575
576/**
577 *	aac_consumer_free	-	free consumer entry
578 *	@dev: Adapter
579 *	@q: Queue
580 *	@qid: Queue ident
581 *
582 *	Frees up the current top of the queue we are a consumer of. If the
583 *	queue was full notify the producer that the queue is no longer full.
584 */
585
586void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
587{
588	int wasfull = 0;
589	u32 notify;
590
591	if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
592		wasfull = 1;
593
594	if (le32_to_cpu(*q->headers.consumer) >= q->entries)
595		*q->headers.consumer = cpu_to_le32(1);
596	else
597		*q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
598
599	if (wasfull) {
600		switch (qid) {
601
602		case HostNormCmdQueue:
603			notify = HostNormCmdNotFull;
604			break;
605		case HostNormRespQueue:
606			notify = HostNormRespNotFull;
607			break;
608		default:
609			BUG();
610			return;
611		}
612		aac_adapter_notify(dev, notify);
613	}
614}
615
616/**
617 *	aac_fib_adapter_complete	-	complete adapter issued fib
618 *	@fibptr: fib to complete
619 *	@size: size of fib
620 *
621 *	Will do all necessary work to complete a FIB that was sent from
622 *	the adapter.
623 */
624
625int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
626{
627	struct hw_fib * hw_fib = fibptr->hw_fib;
628	struct aac_dev * dev = fibptr->dev;
629	struct aac_queue * q;
630	unsigned long nointr = 0;
631	unsigned long qflags;
632
633	if (hw_fib->header.XferState == 0) {
634		if (dev->comm_interface == AAC_COMM_MESSAGE)
635			kfree (hw_fib);
636        	return 0;
637	}
638	/*
639	 *	If we plan to do anything check the structure type first.
640	 */
641	if ( hw_fib->header.StructType != FIB_MAGIC ) {
642		if (dev->comm_interface == AAC_COMM_MESSAGE)
643			kfree (hw_fib);
644        	return -EINVAL;
645	}
646	/*
647	 *	This block handles the case where the adapter had sent us a
648	 *	command and we have finished processing the command. We
649	 *	call completeFib when we are done processing the command
650	 *	and want to send a response back to the adapter. This will
651	 *	send the completed cdb to the adapter.
652	 */
653	if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
654		if (dev->comm_interface == AAC_COMM_MESSAGE) {
655			kfree (hw_fib);
656		} else {
657	       		u32 index;
658		        hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
659			if (size) {
660				size += sizeof(struct aac_fibhdr);
661				if (size > le16_to_cpu(hw_fib->header.SenderSize))
662					return -EMSGSIZE;
663				hw_fib->header.Size = cpu_to_le16(size);
664			}
665			q = &dev->queues->queue[AdapNormRespQueue];
666			spin_lock_irqsave(q->lock, qflags);
667			aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
668			*(q->headers.producer) = cpu_to_le32(index + 1);
669			spin_unlock_irqrestore(q->lock, qflags);
670			if (!(nointr & (int)aac_config.irq_mod))
671				aac_adapter_notify(dev, AdapNormRespQueue);
672		}
673	}
674	else
675	{
676        	printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n");
677        	BUG();
678	}
679	return 0;
680}
681
682/**
683 *	aac_fib_complete	-	fib completion handler
684 *	@fib: FIB to complete
685 *
686 *	Will do all necessary work to complete a FIB.
687 */
688
689int aac_fib_complete(struct fib *fibptr)
690{
691	struct hw_fib * hw_fib = fibptr->hw_fib;
692
693	/*
694	 *	Check for a fib which has already been completed
695	 */
696
697	if (hw_fib->header.XferState == 0)
698        	return 0;
699	/*
700	 *	If we plan to do anything check the structure type first.
701	 */
702
703	if (hw_fib->header.StructType != FIB_MAGIC)
704	        return -EINVAL;
705	/*
706	 *	This block completes a cdb which orginated on the host and we
707	 *	just need to deallocate the cdb or reinit it. At this point the
708	 *	command is complete that we had sent to the adapter and this
709	 *	cdb could be reused.
710	 */
711	if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
712		(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
713	{
714		fib_dealloc(fibptr);
715	}
716	else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
717	{
718		/*
719		 *	This handles the case when the host has aborted the I/O
720		 *	to the adapter because the adapter is not responding
721		 */
722		fib_dealloc(fibptr);
723	} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
724		fib_dealloc(fibptr);
725	} else {
726		BUG();
727	}
728	return 0;
729}
730
731/**
732 *	aac_printf	-	handle printf from firmware
733 *	@dev: Adapter
734 *	@val: Message info
735 *
736 *	Print a message passed to us by the controller firmware on the
737 *	Adaptec board
738 */
739
740void aac_printf(struct aac_dev *dev, u32 val)
741{
742	char *cp = dev->printfbuf;
743	if (dev->printf_enabled)
744	{
745		int length = val & 0xffff;
746		int level = (val >> 16) & 0xffff;
747
748		/*
749		 *	The size of the printfbuf is set in port.c
750		 *	There is no variable or define for it
751		 */
752		if (length > 255)
753			length = 255;
754		if (cp[length] != 0)
755			cp[length] = 0;
756		if (level == LOG_AAC_HIGH_ERROR)
757			printk(KERN_WARNING "%s:%s", dev->name, cp);
758		else
759			printk(KERN_INFO "%s:%s", dev->name, cp);
760	}
761	memset(cp, 0,  256);
762}
763
764
765/**
766 *	aac_handle_aif		-	Handle a message from the firmware
767 *	@dev: Which adapter this fib is from
768 *	@fibptr: Pointer to fibptr from adapter
769 *
770 *	This routine handles a driver notify fib from the adapter and
771 *	dispatches it to the appropriate routine for handling.
772 */
773
774#define AIF_SNIFF_TIMEOUT	(30*HZ)
775static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
776{
777	struct hw_fib * hw_fib = fibptr->hw_fib;
778	struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
779	int busy;
780	u32 container;
781	struct scsi_device *device;
782	enum {
783		NOTHING,
784		DELETE,
785		ADD,
786		CHANGE
787	} device_config_needed;
788
789	/* Sniff for container changes */
790
791	if (!dev || !dev->fsa_dev)
792		return;
793	container = (u32)-1;
794
795	/*
796	 *	We have set this up to try and minimize the number of
797	 * re-configures that take place. As a result of this when
798	 * certain AIF's come in we will set a flag waiting for another
799	 * type of AIF before setting the re-config flag.
800	 */
801	switch (le32_to_cpu(aifcmd->command)) {
802	case AifCmdDriverNotify:
803		switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
804		/*
805		 *	Morph or Expand complete
806		 */
807		case AifDenMorphComplete:
808		case AifDenVolumeExtendComplete:
809			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
810			if (container >= dev->maximum_num_containers)
811				break;
812
813			/*
814			 *	Find the scsi_device associated with the SCSI
815			 * address. Make sure we have the right array, and if
816			 * so set the flag to initiate a new re-config once we
817			 * see an AifEnConfigChange AIF come through.
818			 */
819
820			if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
821				device = scsi_device_lookup(dev->scsi_host_ptr,
822					CONTAINER_TO_CHANNEL(container),
823					CONTAINER_TO_ID(container),
824					CONTAINER_TO_LUN(container));
825				if (device) {
826					dev->fsa_dev[container].config_needed = CHANGE;
827					dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
828					dev->fsa_dev[container].config_waiting_stamp = jiffies;
829					scsi_device_put(device);
830				}
831			}
832		}
833
834		/*
835		 *	If we are waiting on something and this happens to be
836		 * that thing then set the re-configure flag.
837		 */
838		if (container != (u32)-1) {
839			if (container >= dev->maximum_num_containers)
840				break;
841			if ((dev->fsa_dev[container].config_waiting_on ==
842			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
843			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
844				dev->fsa_dev[container].config_waiting_on = 0;
845		} else for (container = 0;
846		    container < dev->maximum_num_containers; ++container) {
847			if ((dev->fsa_dev[container].config_waiting_on ==
848			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
849			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
850				dev->fsa_dev[container].config_waiting_on = 0;
851		}
852		break;
853
854	case AifCmdEventNotify:
855		switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
856		/*
857		 *	Add an Array.
858		 */
859		case AifEnAddContainer:
860			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
861			if (container >= dev->maximum_num_containers)
862				break;
863			dev->fsa_dev[container].config_needed = ADD;
864			dev->fsa_dev[container].config_waiting_on =
865				AifEnConfigChange;
866			dev->fsa_dev[container].config_waiting_stamp = jiffies;
867			break;
868
869		/*
870		 *	Delete an Array.
871		 */
872		case AifEnDeleteContainer:
873			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
874			if (container >= dev->maximum_num_containers)
875				break;
876			dev->fsa_dev[container].config_needed = DELETE;
877			dev->fsa_dev[container].config_waiting_on =
878				AifEnConfigChange;
879			dev->fsa_dev[container].config_waiting_stamp = jiffies;
880			break;
881
882		/*
883		 *	Container change detected. If we currently are not
884		 * waiting on something else, setup to wait on a Config Change.
885		 */
886		case AifEnContainerChange:
887			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
888			if (container >= dev->maximum_num_containers)
889				break;
890			if (dev->fsa_dev[container].config_waiting_on &&
891			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
892				break;
893			dev->fsa_dev[container].config_needed = CHANGE;
894			dev->fsa_dev[container].config_waiting_on =
895				AifEnConfigChange;
896			dev->fsa_dev[container].config_waiting_stamp = jiffies;
897			break;
898
899		case AifEnConfigChange:
900			break;
901
902		}
903
904		/*
905		 *	If we are waiting on something and this happens to be
906		 * that thing then set the re-configure flag.
907		 */
908		if (container != (u32)-1) {
909			if (container >= dev->maximum_num_containers)
910				break;
911			if ((dev->fsa_dev[container].config_waiting_on ==
912			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
913			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
914				dev->fsa_dev[container].config_waiting_on = 0;
915		} else for (container = 0;
916		    container < dev->maximum_num_containers; ++container) {
917			if ((dev->fsa_dev[container].config_waiting_on ==
918			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
919			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
920				dev->fsa_dev[container].config_waiting_on = 0;
921		}
922		break;
923
924	case AifCmdJobProgress:
925		/*
926		 *	These are job progress AIF's. When a Clear is being
927		 * done on a container it is initially created then hidden from
928		 * the OS. When the clear completes we don't get a config
929		 * change so we monitor the job status complete on a clear then
930		 * wait for a container change.
931		 */
932
933		if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
934		 && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5])
935		  || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) {
936			for (container = 0;
937			    container < dev->maximum_num_containers;
938			    ++container) {
939				/*
940				 * Stomp on all config sequencing for all
941				 * containers?
942				 */
943				dev->fsa_dev[container].config_waiting_on =
944					AifEnContainerChange;
945				dev->fsa_dev[container].config_needed = ADD;
946				dev->fsa_dev[container].config_waiting_stamp =
947					jiffies;
948			}
949		}
950		if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
951		 && (((u32 *)aifcmd->data)[6] == 0)
952		 && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) {
953			for (container = 0;
954			    container < dev->maximum_num_containers;
955			    ++container) {
956				/*
957				 * Stomp on all config sequencing for all
958				 * containers?
959				 */
960				dev->fsa_dev[container].config_waiting_on =
961					AifEnContainerChange;
962				dev->fsa_dev[container].config_needed = DELETE;
963				dev->fsa_dev[container].config_waiting_stamp =
964					jiffies;
965			}
966		}
967		break;
968	}
969
970	device_config_needed = NOTHING;
971	for (container = 0; container < dev->maximum_num_containers;
972	    ++container) {
973		if ((dev->fsa_dev[container].config_waiting_on == 0) &&
974			(dev->fsa_dev[container].config_needed != NOTHING) &&
975			time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
976			device_config_needed =
977				dev->fsa_dev[container].config_needed;
978			dev->fsa_dev[container].config_needed = NOTHING;
979			break;
980		}
981	}
982	if (device_config_needed == NOTHING)
983		return;
984
985	/*
986	 *	If we decided that a re-configuration needs to be done,
987	 * schedule it here on the way out the door, please close the door
988	 * behind you.
989	 */
990
991	busy = 0;
992
993
994	/*
995	 *	Find the scsi_device associated with the SCSI address,
996	 * and mark it as changed, invalidating the cache. This deals
997	 * with changes to existing device IDs.
998	 */
999
1000	if (!dev || !dev->scsi_host_ptr)
1001		return;
1002	/*
1003	 * force reload of disk info via aac_probe_container
1004	 */
1005	if ((device_config_needed == CHANGE)
1006	 && (dev->fsa_dev[container].valid == 1))
1007		dev->fsa_dev[container].valid = 2;
1008	if ((device_config_needed == CHANGE) ||
1009			(device_config_needed == ADD))
1010		aac_probe_container(dev, container);
1011	device = scsi_device_lookup(dev->scsi_host_ptr,
1012		CONTAINER_TO_CHANNEL(container),
1013		CONTAINER_TO_ID(container),
1014		CONTAINER_TO_LUN(container));
1015	if (device) {
1016		switch (device_config_needed) {
1017		case DELETE:
1018		case CHANGE:
1019			scsi_rescan_device(&device->sdev_gendev);
1020
1021		default:
1022			break;
1023		}
1024		scsi_device_put(device);
1025	}
1026	if (device_config_needed == ADD) {
1027		scsi_add_device(dev->scsi_host_ptr,
1028		  CONTAINER_TO_CHANNEL(container),
1029		  CONTAINER_TO_ID(container),
1030		  CONTAINER_TO_LUN(container));
1031	}
1032
1033}
1034
1035static int _aac_reset_adapter(struct aac_dev *aac)
1036{
1037	int index, quirks;
1038	int retval;
1039	struct Scsi_Host *host;
1040	struct scsi_device *dev;
1041	struct scsi_cmnd *command;
1042	struct scsi_cmnd *command_list;
1043
1044	/*
1045	 * Assumptions:
1046	 *	- host is locked.
1047	 *	- in_reset is asserted, so no new i/o is getting to the
1048	 *	  card.
1049	 *	- The card is dead.
1050	 */
1051	host = aac->scsi_host_ptr;
1052	scsi_block_requests(host);
1053	aac_adapter_disable_int(aac);
1054	spin_unlock_irq(host->host_lock);
1055	kthread_stop(aac->thread);
1056
1057	/*
1058	 *	If a positive health, means in a known DEAD PANIC
1059	 * state and the adapter could be reset to `try again'.
1060	 */
1061	retval = aac_adapter_restart(aac, aac_adapter_check_health(aac));
1062
1063	if (retval)
1064		goto out;
1065
1066	/*
1067	 *	Loop through the fibs, close the synchronous FIBS
1068	 */
1069	for (index = 0; index < (aac->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); index++) {
1070		struct fib *fib = &aac->fibs[index];
1071		if (!(fib->hw_fib->header.XferState & cpu_to_le32(NoResponseExpected | Async)) &&
1072		  (fib->hw_fib->header.XferState & cpu_to_le32(ResponseExpected))) {
1073			unsigned long flagv;
1074			spin_lock_irqsave(&fib->event_lock, flagv);
1075			up(&fib->event_wait);
1076			spin_unlock_irqrestore(&fib->event_lock, flagv);
1077			schedule();
1078		}
1079	}
1080	index = aac->cardtype;
1081
1082	/*
1083	 * Re-initialize the adapter, first free resources, then carefully
1084	 * apply the initialization sequence to come back again. Only risk
1085	 * is a change in Firmware dropping cache, it is assumed the caller
1086	 * will ensure that i/o is queisced and the card is flushed in that
1087	 * case.
1088	 */
1089	aac_fib_map_free(aac);
1090	aac->hw_fib_va = NULL;
1091	aac->hw_fib_pa = 0;
1092	pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1093	aac->comm_addr = NULL;
1094	aac->comm_phys = 0;
1095	kfree(aac->queues);
1096	aac->queues = NULL;
1097	free_irq(aac->pdev->irq, aac);
1098	kfree(aac->fsa_dev);
1099	aac->fsa_dev = NULL;
1100	if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) {
1101		if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
1102		  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
1103			goto out;
1104	} else {
1105		if (((retval = pci_set_dma_mask(aac->pdev, 0x7FFFFFFFULL))) ||
1106		  ((retval = pci_set_consistent_dma_mask(aac->pdev, 0x7FFFFFFFULL))))
1107			goto out;
1108	}
1109	if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1110		goto out;
1111	if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT)
1112		if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
1113			goto out;
1114	aac->thread = kthread_run(aac_command_thread, aac, aac->name);
1115	if (IS_ERR(aac->thread)) {
1116		retval = PTR_ERR(aac->thread);
1117		goto out;
1118	}
1119	(void)aac_get_adapter_info(aac);
1120	quirks = aac_get_driver_ident(index)->quirks;
1121	if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1122 		host->sg_tablesize = 34;
1123 		host->max_sectors = (host->sg_tablesize * 8) + 112;
1124 	}
1125 	if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1126 		host->sg_tablesize = 17;
1127 		host->max_sectors = (host->sg_tablesize * 8) + 112;
1128 	}
1129	aac_get_config_status(aac, 1);
1130	aac_get_containers(aac);
1131	/*
1132	 * This is where the assumption that the Adapter is quiesced
1133	 * is important.
1134	 */
1135	command_list = NULL;
1136	__shost_for_each_device(dev, host) {
1137		unsigned long flags;
1138		spin_lock_irqsave(&dev->list_lock, flags);
1139		list_for_each_entry(command, &dev->cmd_list, list)
1140			if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1141				command->SCp.buffer = (struct scatterlist *)command_list;
1142				command_list = command;
1143			}
1144		spin_unlock_irqrestore(&dev->list_lock, flags);
1145	}
1146	while ((command = command_list)) {
1147		command_list = (struct scsi_cmnd *)command->SCp.buffer;
1148		command->SCp.buffer = NULL;
1149		command->result = DID_OK << 16
1150		  | COMMAND_COMPLETE << 8
1151		  | SAM_STAT_TASK_SET_FULL;
1152		command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1153		command->scsi_done(command);
1154	}
1155	retval = 0;
1156
1157out:
1158	aac->in_reset = 0;
1159	scsi_unblock_requests(host);
1160	spin_lock_irq(host->host_lock);
1161	return retval;
1162}
1163
1164int aac_check_health(struct aac_dev * aac)
1165{
1166	int BlinkLED;
1167	unsigned long time_now, flagv = 0;
1168	struct list_head * entry;
1169	struct Scsi_Host * host;
1170
1171	/* Extending the scope of fib_lock slightly to protect aac->in_reset */
1172	if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1173		return 0;
1174
1175	if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1176		spin_unlock_irqrestore(&aac->fib_lock, flagv);
1177		return 0; /* OK */
1178	}
1179
1180	aac->in_reset = 1;
1181
1182	/* Fake up an AIF:
1183	 *	aac_aifcmd.command = AifCmdEventNotify = 1
1184	 *	aac_aifcmd.seqnum = 0xFFFFFFFF
1185	 *	aac_aifcmd.data[0] = AifEnExpEvent = 23
1186	 *	aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1187	 *	aac.aifcmd.data[2] = AifHighPriority = 3
1188	 *	aac.aifcmd.data[3] = BlinkLED
1189	 */
1190
1191	time_now = jiffies/HZ;
1192	entry = aac->fib_list.next;
1193
1194	/*
1195	 * For each Context that is on the
1196	 * fibctxList, make a copy of the
1197	 * fib, and then set the event to wake up the
1198	 * thread that is waiting for it.
1199	 */
1200	while (entry != &aac->fib_list) {
1201		/*
1202		 * Extract the fibctx
1203		 */
1204		struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1205		struct hw_fib * hw_fib;
1206		struct fib * fib;
1207		/*
1208		 * Check if the queue is getting
1209		 * backlogged
1210		 */
1211		if (fibctx->count > 20) {
1212			/*
1213			 * It's *not* jiffies folks,
1214			 * but jiffies / HZ, so do not
1215			 * panic ...
1216			 */
1217			u32 time_last = fibctx->jiffies;
1218			/*
1219			 * Has it been > 2 minutes
1220			 * since the last read off
1221			 * the queue?
1222			 */
1223			if ((time_now - time_last) > aif_timeout) {
1224				entry = entry->next;
1225				aac_close_fib_context(aac, fibctx);
1226				continue;
1227			}
1228		}
1229		/*
1230		 * Warning: no sleep allowed while
1231		 * holding spinlock
1232		 */
1233		hw_fib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1234		fib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
1235		if (fib && hw_fib) {
1236			struct aac_aifcmd * aif;
1237
1238			memset(hw_fib, 0, sizeof(struct hw_fib));
1239			memset(fib, 0, sizeof(struct fib));
1240			fib->hw_fib = hw_fib;
1241			fib->dev = aac;
1242			aac_fib_init(fib);
1243			fib->type = FSAFS_NTC_FIB_CONTEXT;
1244			fib->size = sizeof (struct fib);
1245			fib->data = hw_fib->data;
1246			aif = (struct aac_aifcmd *)hw_fib->data;
1247			aif->command = cpu_to_le32(AifCmdEventNotify);
1248		 	aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1249		 	aif->data[0] = cpu_to_le32(AifEnExpEvent);
1250			aif->data[1] = cpu_to_le32(AifExeFirmwarePanic);
1251		 	aif->data[2] = cpu_to_le32(AifHighPriority);
1252			aif->data[3] = cpu_to_le32(BlinkLED);
1253
1254			/*
1255			 * Put the FIB onto the
1256			 * fibctx's fibs
1257			 */
1258			list_add_tail(&fib->fiblink, &fibctx->fib_list);
1259			fibctx->count++;
1260			/*
1261			 * Set the event to wake up the
1262			 * thread that will waiting.
1263			 */
1264			up(&fibctx->wait_sem);
1265		} else {
1266			printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1267			kfree(fib);
1268			kfree(hw_fib);
1269		}
1270		entry = entry->next;
1271	}
1272
1273	spin_unlock_irqrestore(&aac->fib_lock, flagv);
1274
1275	if (BlinkLED < 0) {
1276		printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
1277		goto out;
1278	}
1279
1280	printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1281
1282	host = aac->scsi_host_ptr;
1283	spin_lock_irqsave(host->host_lock, flagv);
1284	BlinkLED = _aac_reset_adapter(aac);
1285	spin_unlock_irqrestore(host->host_lock, flagv);
1286	return BlinkLED;
1287
1288out:
1289	aac->in_reset = 0;
1290	return BlinkLED;
1291}
1292
1293
1294/**
1295 *	aac_command_thread	-	command processing thread
1296 *	@dev: Adapter to monitor
1297 *
1298 *	Waits on the commandready event in it's queue. When the event gets set
1299 *	it will pull FIBs off it's queue. It will continue to pull FIBs off
1300 *	until the queue is empty. When the queue is empty it will wait for
1301 *	more FIBs.
1302 */
1303
1304int aac_command_thread(void *data)
1305{
1306	struct aac_dev *dev = data;
1307	struct hw_fib *hw_fib, *hw_newfib;
1308	struct fib *fib, *newfib;
1309	struct aac_fib_context *fibctx;
1310	unsigned long flags;
1311	DECLARE_WAITQUEUE(wait, current);
1312
1313	/*
1314	 *	We can only have one thread per adapter for AIF's.
1315	 */
1316	if (dev->aif_thread)
1317		return -EINVAL;
1318
1319	/*
1320	 *	Let the DPC know it has a place to send the AIF's to.
1321	 */
1322	dev->aif_thread = 1;
1323	add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1324	set_current_state(TASK_INTERRUPTIBLE);
1325	dprintk ((KERN_INFO "aac_command_thread start\n"));
1326	while(1)
1327	{
1328		spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1329		while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
1330			struct list_head *entry;
1331			struct aac_aifcmd * aifcmd;
1332
1333			set_current_state(TASK_RUNNING);
1334
1335			entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
1336			list_del(entry);
1337
1338			spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1339			fib = list_entry(entry, struct fib, fiblink);
1340			/*
1341			 *	We will process the FIB here or pass it to a
1342			 *	worker thread that is TBD. We Really can't
1343			 *	do anything at this point since we don't have
1344			 *	anything defined for this thread to do.
1345			 */
1346			hw_fib = fib->hw_fib;
1347			memset(fib, 0, sizeof(struct fib));
1348			fib->type = FSAFS_NTC_FIB_CONTEXT;
1349			fib->size = sizeof( struct fib );
1350			fib->hw_fib = hw_fib;
1351			fib->data = hw_fib->data;
1352			fib->dev = dev;
1353			/*
1354			 *	We only handle AifRequest fibs from the adapter.
1355			 */
1356			aifcmd = (struct aac_aifcmd *) hw_fib->data;
1357			if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
1358				/* Handle Driver Notify Events */
1359				aac_handle_aif(dev, fib);
1360				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1361				aac_fib_adapter_complete(fib, (u16)sizeof(u32));
1362			} else {
1363				struct list_head *entry;
1364				/* The u32 here is important and intended. We are using
1365				   32bit wrapping time to fit the adapter field */
1366
1367				u32 time_now, time_last;
1368				unsigned long flagv;
1369				unsigned num;
1370				struct hw_fib ** hw_fib_pool, ** hw_fib_p;
1371				struct fib ** fib_pool, ** fib_p;
1372
1373				/* Sniff events */
1374				if ((aifcmd->command ==
1375				     cpu_to_le32(AifCmdEventNotify)) ||
1376				    (aifcmd->command ==
1377				     cpu_to_le32(AifCmdJobProgress))) {
1378					aac_handle_aif(dev, fib);
1379				}
1380
1381				time_now = jiffies/HZ;
1382
1383				/*
1384				 * Warning: no sleep allowed while
1385				 * holding spinlock. We take the estimate
1386				 * and pre-allocate a set of fibs outside the
1387				 * lock.
1388				 */
1389				num = le32_to_cpu(dev->init->AdapterFibsSize)
1390				    / sizeof(struct hw_fib); /* some extra */
1391				spin_lock_irqsave(&dev->fib_lock, flagv);
1392				entry = dev->fib_list.next;
1393				while (entry != &dev->fib_list) {
1394					entry = entry->next;
1395					++num;
1396				}
1397				spin_unlock_irqrestore(&dev->fib_lock, flagv);
1398				hw_fib_pool = NULL;
1399				fib_pool = NULL;
1400				if (num
1401				 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
1402				 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
1403					hw_fib_p = hw_fib_pool;
1404					fib_p = fib_pool;
1405					while (hw_fib_p < &hw_fib_pool[num]) {
1406						if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
1407							--hw_fib_p;
1408							break;
1409						}
1410						if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
1411							kfree(*(--hw_fib_p));
1412							break;
1413						}
1414					}
1415					if ((num = hw_fib_p - hw_fib_pool) == 0) {
1416						kfree(fib_pool);
1417						fib_pool = NULL;
1418						kfree(hw_fib_pool);
1419						hw_fib_pool = NULL;
1420					}
1421				} else {
1422					kfree(hw_fib_pool);
1423					hw_fib_pool = NULL;
1424				}
1425				spin_lock_irqsave(&dev->fib_lock, flagv);
1426				entry = dev->fib_list.next;
1427				/*
1428				 * For each Context that is on the
1429				 * fibctxList, make a copy of the
1430				 * fib, and then set the event to wake up the
1431				 * thread that is waiting for it.
1432				 */
1433				hw_fib_p = hw_fib_pool;
1434				fib_p = fib_pool;
1435				while (entry != &dev->fib_list) {
1436					/*
1437					 * Extract the fibctx
1438					 */
1439					fibctx = list_entry(entry, struct aac_fib_context, next);
1440					/*
1441					 * Check if the queue is getting
1442					 * backlogged
1443					 */
1444					if (fibctx->count > 20)
1445					{
1446						/*
1447						 * It's *not* jiffies folks,
1448						 * but jiffies / HZ so do not
1449						 * panic ...
1450						 */
1451						time_last = fibctx->jiffies;
1452						/*
1453						 * Has it been > 2 minutes
1454						 * since the last read off
1455						 * the queue?
1456						 */
1457						if ((time_now - time_last) > aif_timeout) {
1458							entry = entry->next;
1459							aac_close_fib_context(dev, fibctx);
1460							continue;
1461						}
1462					}
1463					/*
1464					 * Warning: no sleep allowed while
1465					 * holding spinlock
1466					 */
1467					if (hw_fib_p < &hw_fib_pool[num]) {
1468						hw_newfib = *hw_fib_p;
1469						*(hw_fib_p++) = NULL;
1470						newfib = *fib_p;
1471						*(fib_p++) = NULL;
1472						/*
1473						 * Make the copy of the FIB
1474						 */
1475						memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
1476						memcpy(newfib, fib, sizeof(struct fib));
1477						newfib->hw_fib = hw_newfib;
1478						/*
1479						 * Put the FIB onto the
1480						 * fibctx's fibs
1481						 */
1482						list_add_tail(&newfib->fiblink, &fibctx->fib_list);
1483						fibctx->count++;
1484						/*
1485						 * Set the event to wake up the
1486						 * thread that is waiting.
1487						 */
1488						up(&fibctx->wait_sem);
1489					} else {
1490						printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1491					}
1492					entry = entry->next;
1493				}
1494				/*
1495				 *	Set the status of this FIB
1496				 */
1497				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1498				aac_fib_adapter_complete(fib, sizeof(u32));
1499				spin_unlock_irqrestore(&dev->fib_lock, flagv);
1500				/* Free up the remaining resources */
1501				hw_fib_p = hw_fib_pool;
1502				fib_p = fib_pool;
1503				while (hw_fib_p < &hw_fib_pool[num]) {
1504					kfree(*hw_fib_p);
1505					kfree(*fib_p);
1506					++fib_p;
1507					++hw_fib_p;
1508				}
1509				kfree(hw_fib_pool);
1510				kfree(fib_pool);
1511			}
1512			kfree(fib);
1513			spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1514		}
1515		/*
1516		 *	There are no more AIF's
1517		 */
1518		spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1519		schedule();
1520
1521		if (kthread_should_stop())
1522			break;
1523		set_current_state(TASK_INTERRUPTIBLE);
1524	}
1525	if (dev->queues)
1526		remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1527	dev->aif_thread = 0;
1528	return 0;
1529}
1530