commsup.c revision 33524b70e8f3dd55a4ba78ad81742c7814e7b0ed
1/*
2 *	Adaptec AAC series RAID controller driver
3 *	(c) Copyright 2001 Red Hat Inc.	<alan@redhat.com>
4 *
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
7 *
8 * Copyright (c) 2000 Adaptec, Inc. (aacraid@adaptec.com)
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING.  If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23 *
24 * Module Name:
25 *  commsup.c
26 *
27 * Abstract: Contain all routines that are required for FSA host/adapter
28 *    communication.
29 *
30 */
31
32#include <linux/kernel.h>
33#include <linux/init.h>
34#include <linux/types.h>
35#include <linux/sched.h>
36#include <linux/pci.h>
37#include <linux/spinlock.h>
38#include <linux/slab.h>
39#include <linux/completion.h>
40#include <linux/blkdev.h>
41#include <linux/delay.h>
42#include <linux/kthread.h>
43#include <linux/interrupt.h>
44#include <scsi/scsi.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_device.h>
47#include <scsi/scsi_cmnd.h>
48#include <asm/semaphore.h>
49
50#include "aacraid.h"
51
52/**
53 *	fib_map_alloc		-	allocate the fib objects
54 *	@dev: Adapter to allocate for
55 *
56 *	Allocate and map the shared PCI space for the FIB blocks used to
57 *	talk to the Adaptec firmware.
58 */
59
60static int fib_map_alloc(struct aac_dev *dev)
61{
62	dprintk((KERN_INFO
63	  "allocate hardware fibs pci_alloc_consistent(%p, %d * (%d + %d), %p)\n",
64	  dev->pdev, dev->max_fib_size, dev->scsi_host_ptr->can_queue,
65	  AAC_NUM_MGT_FIB, &dev->hw_fib_pa));
66	if((dev->hw_fib_va = pci_alloc_consistent(dev->pdev, dev->max_fib_size
67	  * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB),
68	  &dev->hw_fib_pa))==NULL)
69		return -ENOMEM;
70	return 0;
71}
72
73/**
74 *	aac_fib_map_free		-	free the fib objects
75 *	@dev: Adapter to free
76 *
77 *	Free the PCI mappings and the memory allocated for FIB blocks
78 *	on this adapter.
79 */
80
81void aac_fib_map_free(struct aac_dev *dev)
82{
83	pci_free_consistent(dev->pdev, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB), dev->hw_fib_va, dev->hw_fib_pa);
84}
85
86/**
87 *	aac_fib_setup	-	setup the fibs
88 *	@dev: Adapter to set up
89 *
90 *	Allocate the PCI space for the fibs, map it and then intialise the
91 *	fib area, the unmapped fib data and also the free list
92 */
93
94int aac_fib_setup(struct aac_dev * dev)
95{
96	struct fib *fibptr;
97	struct hw_fib *hw_fib_va;
98	dma_addr_t hw_fib_pa;
99	int i;
100
101	while (((i = fib_map_alloc(dev)) == -ENOMEM)
102	 && (dev->scsi_host_ptr->can_queue > (64 - AAC_NUM_MGT_FIB))) {
103		dev->init->MaxIoCommands = cpu_to_le32((dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB) >> 1);
104		dev->scsi_host_ptr->can_queue = le32_to_cpu(dev->init->MaxIoCommands) - AAC_NUM_MGT_FIB;
105	}
106	if (i<0)
107		return -ENOMEM;
108
109	hw_fib_va = dev->hw_fib_va;
110	hw_fib_pa = dev->hw_fib_pa;
111	memset(hw_fib_va, 0, dev->max_fib_size * (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB));
112	/*
113	 *	Initialise the fibs
114	 */
115	for (i = 0, fibptr = &dev->fibs[i]; i < (dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB); i++, fibptr++)
116	{
117		fibptr->dev = dev;
118		fibptr->hw_fib = hw_fib_va;
119		fibptr->data = (void *) fibptr->hw_fib->data;
120		fibptr->next = fibptr+1;	/* Forward chain the fibs */
121		init_MUTEX_LOCKED(&fibptr->event_wait);
122		spin_lock_init(&fibptr->event_lock);
123		hw_fib_va->header.XferState = cpu_to_le32(0xffffffff);
124		hw_fib_va->header.SenderSize = cpu_to_le16(dev->max_fib_size);
125		fibptr->hw_fib_pa = hw_fib_pa;
126		hw_fib_va = (struct hw_fib *)((unsigned char *)hw_fib_va + dev->max_fib_size);
127		hw_fib_pa = hw_fib_pa + dev->max_fib_size;
128	}
129	/*
130	 *	Add the fib chain to the free list
131	 */
132	dev->fibs[dev->scsi_host_ptr->can_queue + AAC_NUM_MGT_FIB - 1].next = NULL;
133	/*
134	 *	Enable this to debug out of queue space
135	 */
136	dev->free_fib = &dev->fibs[0];
137	return 0;
138}
139
140/**
141 *	aac_fib_alloc	-	allocate a fib
142 *	@dev: Adapter to allocate the fib for
143 *
144 *	Allocate a fib from the adapter fib pool. If the pool is empty we
145 *	return NULL.
146 */
147
148struct fib *aac_fib_alloc(struct aac_dev *dev)
149{
150	struct fib * fibptr;
151	unsigned long flags;
152	spin_lock_irqsave(&dev->fib_lock, flags);
153	fibptr = dev->free_fib;
154	if(!fibptr){
155		spin_unlock_irqrestore(&dev->fib_lock, flags);
156		return fibptr;
157	}
158	dev->free_fib = fibptr->next;
159	spin_unlock_irqrestore(&dev->fib_lock, flags);
160	/*
161	 *	Set the proper node type code and node byte size
162	 */
163	fibptr->type = FSAFS_NTC_FIB_CONTEXT;
164	fibptr->size = sizeof(struct fib);
165	/*
166	 *	Null out fields that depend on being zero at the start of
167	 *	each I/O
168	 */
169	fibptr->hw_fib->header.XferState = 0;
170	fibptr->callback = NULL;
171	fibptr->callback_data = NULL;
172
173	return fibptr;
174}
175
176/**
177 *	aac_fib_free	-	free a fib
178 *	@fibptr: fib to free up
179 *
180 *	Frees up a fib and places it on the appropriate queue
181 *	(either free or timed out)
182 */
183
184void aac_fib_free(struct fib *fibptr)
185{
186	unsigned long flags;
187
188	spin_lock_irqsave(&fibptr->dev->fib_lock, flags);
189	if (fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT) {
190		aac_config.fib_timeouts++;
191		fibptr->next = fibptr->dev->timeout_fib;
192		fibptr->dev->timeout_fib = fibptr;
193	} else {
194		if (fibptr->hw_fib->header.XferState != 0) {
195			printk(KERN_WARNING "aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
196				 (void*)fibptr,
197				 le32_to_cpu(fibptr->hw_fib->header.XferState));
198		}
199		fibptr->next = fibptr->dev->free_fib;
200		fibptr->dev->free_fib = fibptr;
201	}
202	spin_unlock_irqrestore(&fibptr->dev->fib_lock, flags);
203}
204
205/**
206 *	aac_fib_init	-	initialise a fib
207 *	@fibptr: The fib to initialize
208 *
209 *	Set up the generic fib fields ready for use
210 */
211
212void aac_fib_init(struct fib *fibptr)
213{
214	struct hw_fib *hw_fib = fibptr->hw_fib;
215
216	hw_fib->header.StructType = FIB_MAGIC;
217	hw_fib->header.Size = cpu_to_le16(fibptr->dev->max_fib_size);
218	hw_fib->header.XferState = cpu_to_le32(HostOwned | FibInitialized | FibEmpty | FastResponseCapable);
219	hw_fib->header.SenderFibAddress = 0; /* Filled in later if needed */
220	hw_fib->header.ReceiverFibAddress = cpu_to_le32(fibptr->hw_fib_pa);
221	hw_fib->header.SenderSize = cpu_to_le16(fibptr->dev->max_fib_size);
222}
223
224/**
225 *	fib_deallocate		-	deallocate a fib
226 *	@fibptr: fib to deallocate
227 *
228 *	Will deallocate and return to the free pool the FIB pointed to by the
229 *	caller.
230 */
231
232static void fib_dealloc(struct fib * fibptr)
233{
234	struct hw_fib *hw_fib = fibptr->hw_fib;
235	BUG_ON(hw_fib->header.StructType != FIB_MAGIC);
236	hw_fib->header.XferState = 0;
237}
238
239/*
240 *	Commuication primitives define and support the queuing method we use to
241 *	support host to adapter commuication. All queue accesses happen through
242 *	these routines and are the only routines which have a knowledge of the
243 *	 how these queues are implemented.
244 */
245
246/**
247 *	aac_get_entry		-	get a queue entry
248 *	@dev: Adapter
249 *	@qid: Queue Number
250 *	@entry: Entry return
251 *	@index: Index return
252 *	@nonotify: notification control
253 *
254 *	With a priority the routine returns a queue entry if the queue has free entries. If the queue
255 *	is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
256 *	returned.
257 */
258
259static int aac_get_entry (struct aac_dev * dev, u32 qid, struct aac_entry **entry, u32 * index, unsigned long *nonotify)
260{
261	struct aac_queue * q;
262	unsigned long idx;
263
264	/*
265	 *	All of the queues wrap when they reach the end, so we check
266	 *	to see if they have reached the end and if they have we just
267	 *	set the index back to zero. This is a wrap. You could or off
268	 *	the high bits in all updates but this is a bit faster I think.
269	 */
270
271	q = &dev->queues->queue[qid];
272
273	idx = *index = le32_to_cpu(*(q->headers.producer));
274	/* Interrupt Moderation, only interrupt for first two entries */
275	if (idx != le32_to_cpu(*(q->headers.consumer))) {
276		if (--idx == 0) {
277			if (qid == AdapNormCmdQueue)
278				idx = ADAP_NORM_CMD_ENTRIES;
279			else
280				idx = ADAP_NORM_RESP_ENTRIES;
281		}
282		if (idx != le32_to_cpu(*(q->headers.consumer)))
283			*nonotify = 1;
284	}
285
286	if (qid == AdapNormCmdQueue) {
287	        if (*index >= ADAP_NORM_CMD_ENTRIES)
288			*index = 0; /* Wrap to front of the Producer Queue. */
289	} else {
290		if (*index >= ADAP_NORM_RESP_ENTRIES)
291			*index = 0; /* Wrap to front of the Producer Queue. */
292	}
293
294        if ((*index + 1) == le32_to_cpu(*(q->headers.consumer))) { /* Queue is full */
295		printk(KERN_WARNING "Queue %d full, %u outstanding.\n",
296				qid, q->numpending);
297		return 0;
298	} else {
299	        *entry = q->base + *index;
300		return 1;
301	}
302}
303
304/**
305 *	aac_queue_get		-	get the next free QE
306 *	@dev: Adapter
307 *	@index: Returned index
308 *	@priority: Priority of fib
309 *	@fib: Fib to associate with the queue entry
310 *	@wait: Wait if queue full
311 *	@fibptr: Driver fib object to go with fib
312 *	@nonotify: Don't notify the adapter
313 *
314 *	Gets the next free QE off the requested priorty adapter command
315 *	queue and associates the Fib with the QE. The QE represented by
316 *	index is ready to insert on the queue when this routine returns
317 *	success.
318 */
319
320static int aac_queue_get(struct aac_dev * dev, u32 * index, u32 qid, struct hw_fib * hw_fib, int wait, struct fib * fibptr, unsigned long *nonotify)
321{
322	struct aac_entry * entry = NULL;
323	int map = 0;
324
325	if (qid == AdapNormCmdQueue) {
326		/*  if no entries wait for some if caller wants to */
327        	while (!aac_get_entry(dev, qid, &entry, index, nonotify))
328        	{
329			printk(KERN_ERR "GetEntries failed\n");
330		}
331	        /*
332	         *	Setup queue entry with a command, status and fib mapped
333	         */
334	        entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
335	        map = 1;
336	} else {
337	        while(!aac_get_entry(dev, qid, &entry, index, nonotify))
338	        {
339			/* if no entries wait for some if caller wants to */
340		}
341        	/*
342        	 *	Setup queue entry with command, status and fib mapped
343        	 */
344        	entry->size = cpu_to_le32(le16_to_cpu(hw_fib->header.Size));
345        	entry->addr = hw_fib->header.SenderFibAddress;
346     			/* Restore adapters pointer to the FIB */
347		hw_fib->header.ReceiverFibAddress = hw_fib->header.SenderFibAddress;	/* Let the adapter now where to find its data */
348        	map = 0;
349	}
350	/*
351	 *	If MapFib is true than we need to map the Fib and put pointers
352	 *	in the queue entry.
353	 */
354	if (map)
355		entry->addr = cpu_to_le32(fibptr->hw_fib_pa);
356	return 0;
357}
358
359/*
360 *	Define the highest level of host to adapter communication routines.
361 *	These routines will support host to adapter FS commuication. These
362 *	routines have no knowledge of the commuication method used. This level
363 *	sends and receives FIBs. This level has no knowledge of how these FIBs
364 *	get passed back and forth.
365 */
366
367/**
368 *	aac_fib_send	-	send a fib to the adapter
369 *	@command: Command to send
370 *	@fibptr: The fib
371 *	@size: Size of fib data area
372 *	@priority: Priority of Fib
373 *	@wait: Async/sync select
374 *	@reply: True if a reply is wanted
375 *	@callback: Called with reply
376 *	@callback_data: Passed to callback
377 *
378 *	Sends the requested FIB to the adapter and optionally will wait for a
379 *	response FIB. If the caller does not wish to wait for a response than
380 *	an event to wait on must be supplied. This event will be set when a
381 *	response FIB is received from the adapter.
382 */
383
384int aac_fib_send(u16 command, struct fib *fibptr, unsigned long size,
385		int priority, int wait, int reply, fib_callback callback,
386		void *callback_data)
387{
388	struct aac_dev * dev = fibptr->dev;
389	struct hw_fib * hw_fib = fibptr->hw_fib;
390	struct aac_queue * q;
391	unsigned long flags = 0;
392	unsigned long qflags;
393
394	if (!(hw_fib->header.XferState & cpu_to_le32(HostOwned)))
395		return -EBUSY;
396	/*
397	 *	There are 5 cases with the wait and reponse requested flags.
398	 *	The only invalid cases are if the caller requests to wait and
399	 *	does not request a response and if the caller does not want a
400	 *	response and the Fib is not allocated from pool. If a response
401	 *	is not requesed the Fib will just be deallocaed by the DPC
402	 *	routine when the response comes back from the adapter. No
403	 *	further processing will be done besides deleting the Fib. We
404	 *	will have a debug mode where the adapter can notify the host
405	 *	it had a problem and the host can log that fact.
406	 */
407	if (wait && !reply) {
408		return -EINVAL;
409	} else if (!wait && reply) {
410		hw_fib->header.XferState |= cpu_to_le32(Async | ResponseExpected);
411		FIB_COUNTER_INCREMENT(aac_config.AsyncSent);
412	} else if (!wait && !reply) {
413		hw_fib->header.XferState |= cpu_to_le32(NoResponseExpected);
414		FIB_COUNTER_INCREMENT(aac_config.NoResponseSent);
415	} else if (wait && reply) {
416		hw_fib->header.XferState |= cpu_to_le32(ResponseExpected);
417		FIB_COUNTER_INCREMENT(aac_config.NormalSent);
418	}
419	/*
420	 *	Map the fib into 32bits by using the fib number
421	 */
422
423	hw_fib->header.SenderFibAddress = cpu_to_le32(((u32)(fibptr - dev->fibs)) << 2);
424	hw_fib->header.SenderData = (u32)(fibptr - dev->fibs);
425	/*
426	 *	Set FIB state to indicate where it came from and if we want a
427	 *	response from the adapter. Also load the command from the
428	 *	caller.
429	 *
430	 *	Map the hw fib pointer as a 32bit value
431	 */
432	hw_fib->header.Command = cpu_to_le16(command);
433	hw_fib->header.XferState |= cpu_to_le32(SentFromHost);
434	fibptr->hw_fib->header.Flags = 0;	/* 0 the flags field - internal only*/
435	/*
436	 *	Set the size of the Fib we want to send to the adapter
437	 */
438	hw_fib->header.Size = cpu_to_le16(sizeof(struct aac_fibhdr) + size);
439	if (le16_to_cpu(hw_fib->header.Size) > le16_to_cpu(hw_fib->header.SenderSize)) {
440		return -EMSGSIZE;
441	}
442	/*
443	 *	Get a queue entry connect the FIB to it and send an notify
444	 *	the adapter a command is ready.
445	 */
446	hw_fib->header.XferState |= cpu_to_le32(NormalPriority);
447
448	/*
449	 *	Fill in the Callback and CallbackContext if we are not
450	 *	going to wait.
451	 */
452	if (!wait) {
453		fibptr->callback = callback;
454		fibptr->callback_data = callback_data;
455	}
456
457	fibptr->done = 0;
458	fibptr->flags = 0;
459
460	FIB_COUNTER_INCREMENT(aac_config.FibsSent);
461
462	dprintk((KERN_DEBUG "Fib contents:.\n"));
463	dprintk((KERN_DEBUG "  Command =               %d.\n", le32_to_cpu(hw_fib->header.Command)));
464	dprintk((KERN_DEBUG "  SubCommand =            %d.\n", le32_to_cpu(((struct aac_query_mount *)fib_data(fibptr))->command)));
465	dprintk((KERN_DEBUG "  XferState  =            %x.\n", le32_to_cpu(hw_fib->header.XferState)));
466	dprintk((KERN_DEBUG "  hw_fib va being sent=%p\n",fibptr->hw_fib));
467	dprintk((KERN_DEBUG "  hw_fib pa being sent=%lx\n",(ulong)fibptr->hw_fib_pa));
468	dprintk((KERN_DEBUG "  fib being sent=%p\n",fibptr));
469
470	if (!dev->queues)
471		return -EBUSY;
472	q = &dev->queues->queue[AdapNormCmdQueue];
473
474	if(wait)
475		spin_lock_irqsave(&fibptr->event_lock, flags);
476	spin_lock_irqsave(q->lock, qflags);
477	if (dev->new_comm_interface) {
478		unsigned long count = 10000000L; /* 50 seconds */
479		q->numpending++;
480		spin_unlock_irqrestore(q->lock, qflags);
481		while (aac_adapter_send(fibptr) != 0) {
482			if (--count == 0) {
483				if (wait)
484					spin_unlock_irqrestore(&fibptr->event_lock, flags);
485				spin_lock_irqsave(q->lock, qflags);
486				q->numpending--;
487				spin_unlock_irqrestore(q->lock, qflags);
488				return -ETIMEDOUT;
489			}
490			udelay(5);
491		}
492	} else {
493		u32 index;
494		unsigned long nointr = 0;
495		aac_queue_get( dev, &index, AdapNormCmdQueue, hw_fib, 1, fibptr, &nointr);
496
497		q->numpending++;
498		*(q->headers.producer) = cpu_to_le32(index + 1);
499		spin_unlock_irqrestore(q->lock, qflags);
500		dprintk((KERN_DEBUG "aac_fib_send: inserting a queue entry at index %d.\n",index));
501		if (!(nointr & aac_config.irq_mod))
502			aac_adapter_notify(dev, AdapNormCmdQueue);
503	}
504
505	/*
506	 *	If the caller wanted us to wait for response wait now.
507	 */
508
509	if (wait) {
510		spin_unlock_irqrestore(&fibptr->event_lock, flags);
511		/* Only set for first known interruptable command */
512		if (wait < 0) {
513			/*
514			 * *VERY* Dangerous to time out a command, the
515			 * assumption is made that we have no hope of
516			 * functioning because an interrupt routing or other
517			 * hardware failure has occurred.
518			 */
519			unsigned long count = 36000000L; /* 3 minutes */
520			while (down_trylock(&fibptr->event_wait)) {
521				int blink;
522				if (--count == 0) {
523					spin_lock_irqsave(q->lock, qflags);
524					q->numpending--;
525					spin_unlock_irqrestore(q->lock, qflags);
526					if (wait == -1) {
527	        				printk(KERN_ERR "aacraid: aac_fib_send: first asynchronous command timed out.\n"
528						  "Usually a result of a PCI interrupt routing problem;\n"
529						  "update mother board BIOS or consider utilizing one of\n"
530						  "the SAFE mode kernel options (acpi, apic etc)\n");
531					}
532					return -ETIMEDOUT;
533				}
534				if ((blink = aac_adapter_check_health(dev)) > 0) {
535					if (wait == -1) {
536	        				printk(KERN_ERR "aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
537						  "Usually a result of a serious unrecoverable hardware problem\n",
538						  blink);
539					}
540					return -EFAULT;
541				}
542				udelay(5);
543			}
544		} else if (down_interruptible(&fibptr->event_wait)) {
545			spin_lock_irqsave(&fibptr->event_lock, flags);
546			if (fibptr->done == 0) {
547				fibptr->done = 2; /* Tell interrupt we aborted */
548				spin_unlock_irqrestore(&fibptr->event_lock, flags);
549				return -EINTR;
550			}
551			spin_unlock_irqrestore(&fibptr->event_lock, flags);
552		}
553		BUG_ON(fibptr->done == 0);
554
555		if((fibptr->flags & FIB_CONTEXT_FLAG_TIMED_OUT)){
556			return -ETIMEDOUT;
557		} else {
558			return 0;
559		}
560	}
561	/*
562	 *	If the user does not want a response than return success otherwise
563	 *	return pending
564	 */
565	if (reply)
566		return -EINPROGRESS;
567	else
568		return 0;
569}
570
571/**
572 *	aac_consumer_get	-	get the top of the queue
573 *	@dev: Adapter
574 *	@q: Queue
575 *	@entry: Return entry
576 *
577 *	Will return a pointer to the entry on the top of the queue requested that
578 * 	we are a consumer of, and return the address of the queue entry. It does
579 *	not change the state of the queue.
580 */
581
582int aac_consumer_get(struct aac_dev * dev, struct aac_queue * q, struct aac_entry **entry)
583{
584	u32 index;
585	int status;
586	if (le32_to_cpu(*q->headers.producer) == le32_to_cpu(*q->headers.consumer)) {
587		status = 0;
588	} else {
589		/*
590		 *	The consumer index must be wrapped if we have reached
591		 *	the end of the queue, else we just use the entry
592		 *	pointed to by the header index
593		 */
594		if (le32_to_cpu(*q->headers.consumer) >= q->entries)
595			index = 0;
596		else
597		        index = le32_to_cpu(*q->headers.consumer);
598		*entry = q->base + index;
599		status = 1;
600	}
601	return(status);
602}
603
604/**
605 *	aac_consumer_free	-	free consumer entry
606 *	@dev: Adapter
607 *	@q: Queue
608 *	@qid: Queue ident
609 *
610 *	Frees up the current top of the queue we are a consumer of. If the
611 *	queue was full notify the producer that the queue is no longer full.
612 */
613
614void aac_consumer_free(struct aac_dev * dev, struct aac_queue *q, u32 qid)
615{
616	int wasfull = 0;
617	u32 notify;
618
619	if ((le32_to_cpu(*q->headers.producer)+1) == le32_to_cpu(*q->headers.consumer))
620		wasfull = 1;
621
622	if (le32_to_cpu(*q->headers.consumer) >= q->entries)
623		*q->headers.consumer = cpu_to_le32(1);
624	else
625		*q->headers.consumer = cpu_to_le32(le32_to_cpu(*q->headers.consumer)+1);
626
627	if (wasfull) {
628		switch (qid) {
629
630		case HostNormCmdQueue:
631			notify = HostNormCmdNotFull;
632			break;
633		case HostNormRespQueue:
634			notify = HostNormRespNotFull;
635			break;
636		default:
637			BUG();
638			return;
639		}
640		aac_adapter_notify(dev, notify);
641	}
642}
643
644/**
645 *	aac_fib_adapter_complete	-	complete adapter issued fib
646 *	@fibptr: fib to complete
647 *	@size: size of fib
648 *
649 *	Will do all necessary work to complete a FIB that was sent from
650 *	the adapter.
651 */
652
653int aac_fib_adapter_complete(struct fib *fibptr, unsigned short size)
654{
655	struct hw_fib * hw_fib = fibptr->hw_fib;
656	struct aac_dev * dev = fibptr->dev;
657	struct aac_queue * q;
658	unsigned long nointr = 0;
659	unsigned long qflags;
660
661	if (hw_fib->header.XferState == 0) {
662		if (dev->new_comm_interface)
663			kfree (hw_fib);
664        	return 0;
665	}
666	/*
667	 *	If we plan to do anything check the structure type first.
668	 */
669	if ( hw_fib->header.StructType != FIB_MAGIC ) {
670		if (dev->new_comm_interface)
671			kfree (hw_fib);
672        	return -EINVAL;
673	}
674	/*
675	 *	This block handles the case where the adapter had sent us a
676	 *	command and we have finished processing the command. We
677	 *	call completeFib when we are done processing the command
678	 *	and want to send a response back to the adapter. This will
679	 *	send the completed cdb to the adapter.
680	 */
681	if (hw_fib->header.XferState & cpu_to_le32(SentFromAdapter)) {
682		if (dev->new_comm_interface) {
683			kfree (hw_fib);
684		} else {
685	       		u32 index;
686		        hw_fib->header.XferState |= cpu_to_le32(HostProcessed);
687			if (size) {
688				size += sizeof(struct aac_fibhdr);
689				if (size > le16_to_cpu(hw_fib->header.SenderSize))
690					return -EMSGSIZE;
691				hw_fib->header.Size = cpu_to_le16(size);
692			}
693			q = &dev->queues->queue[AdapNormRespQueue];
694			spin_lock_irqsave(q->lock, qflags);
695			aac_queue_get(dev, &index, AdapNormRespQueue, hw_fib, 1, NULL, &nointr);
696			*(q->headers.producer) = cpu_to_le32(index + 1);
697			spin_unlock_irqrestore(q->lock, qflags);
698			if (!(nointr & (int)aac_config.irq_mod))
699				aac_adapter_notify(dev, AdapNormRespQueue);
700		}
701	}
702	else
703	{
704        	printk(KERN_WARNING "aac_fib_adapter_complete: Unknown xferstate detected.\n");
705        	BUG();
706	}
707	return 0;
708}
709
710/**
711 *	aac_fib_complete	-	fib completion handler
712 *	@fib: FIB to complete
713 *
714 *	Will do all necessary work to complete a FIB.
715 */
716
717int aac_fib_complete(struct fib *fibptr)
718{
719	struct hw_fib * hw_fib = fibptr->hw_fib;
720
721	/*
722	 *	Check for a fib which has already been completed
723	 */
724
725	if (hw_fib->header.XferState == 0)
726        	return 0;
727	/*
728	 *	If we plan to do anything check the structure type first.
729	 */
730
731	if (hw_fib->header.StructType != FIB_MAGIC)
732	        return -EINVAL;
733	/*
734	 *	This block completes a cdb which orginated on the host and we
735	 *	just need to deallocate the cdb or reinit it. At this point the
736	 *	command is complete that we had sent to the adapter and this
737	 *	cdb could be reused.
738	 */
739	if((hw_fib->header.XferState & cpu_to_le32(SentFromHost)) &&
740		(hw_fib->header.XferState & cpu_to_le32(AdapterProcessed)))
741	{
742		fib_dealloc(fibptr);
743	}
744	else if(hw_fib->header.XferState & cpu_to_le32(SentFromHost))
745	{
746		/*
747		 *	This handles the case when the host has aborted the I/O
748		 *	to the adapter because the adapter is not responding
749		 */
750		fib_dealloc(fibptr);
751	} else if(hw_fib->header.XferState & cpu_to_le32(HostOwned)) {
752		fib_dealloc(fibptr);
753	} else {
754		BUG();
755	}
756	return 0;
757}
758
759/**
760 *	aac_printf	-	handle printf from firmware
761 *	@dev: Adapter
762 *	@val: Message info
763 *
764 *	Print a message passed to us by the controller firmware on the
765 *	Adaptec board
766 */
767
768void aac_printf(struct aac_dev *dev, u32 val)
769{
770	char *cp = dev->printfbuf;
771	if (dev->printf_enabled)
772	{
773		int length = val & 0xffff;
774		int level = (val >> 16) & 0xffff;
775
776		/*
777		 *	The size of the printfbuf is set in port.c
778		 *	There is no variable or define for it
779		 */
780		if (length > 255)
781			length = 255;
782		if (cp[length] != 0)
783			cp[length] = 0;
784		if (level == LOG_AAC_HIGH_ERROR)
785			printk(KERN_WARNING "%s:%s", dev->name, cp);
786		else
787			printk(KERN_INFO "%s:%s", dev->name, cp);
788	}
789	memset(cp, 0,  256);
790}
791
792
793/**
794 *	aac_handle_aif		-	Handle a message from the firmware
795 *	@dev: Which adapter this fib is from
796 *	@fibptr: Pointer to fibptr from adapter
797 *
798 *	This routine handles a driver notify fib from the adapter and
799 *	dispatches it to the appropriate routine for handling.
800 */
801
802#define AIF_SNIFF_TIMEOUT	(30*HZ)
803static void aac_handle_aif(struct aac_dev * dev, struct fib * fibptr)
804{
805	struct hw_fib * hw_fib = fibptr->hw_fib;
806	struct aac_aifcmd * aifcmd = (struct aac_aifcmd *)hw_fib->data;
807	int busy;
808	u32 container;
809	struct scsi_device *device;
810	enum {
811		NOTHING,
812		DELETE,
813		ADD,
814		CHANGE
815	} device_config_needed;
816
817	/* Sniff for container changes */
818
819	if (!dev || !dev->fsa_dev)
820		return;
821	container = (u32)-1;
822
823	/*
824	 *	We have set this up to try and minimize the number of
825	 * re-configures that take place. As a result of this when
826	 * certain AIF's come in we will set a flag waiting for another
827	 * type of AIF before setting the re-config flag.
828	 */
829	switch (le32_to_cpu(aifcmd->command)) {
830	case AifCmdDriverNotify:
831		switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
832		/*
833		 *	Morph or Expand complete
834		 */
835		case AifDenMorphComplete:
836		case AifDenVolumeExtendComplete:
837			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
838			if (container >= dev->maximum_num_containers)
839				break;
840
841			/*
842			 *	Find the scsi_device associated with the SCSI
843			 * address. Make sure we have the right array, and if
844			 * so set the flag to initiate a new re-config once we
845			 * see an AifEnConfigChange AIF come through.
846			 */
847
848			if ((dev != NULL) && (dev->scsi_host_ptr != NULL)) {
849				device = scsi_device_lookup(dev->scsi_host_ptr,
850					CONTAINER_TO_CHANNEL(container),
851					CONTAINER_TO_ID(container),
852					CONTAINER_TO_LUN(container));
853				if (device) {
854					dev->fsa_dev[container].config_needed = CHANGE;
855					dev->fsa_dev[container].config_waiting_on = AifEnConfigChange;
856					dev->fsa_dev[container].config_waiting_stamp = jiffies;
857					scsi_device_put(device);
858				}
859			}
860		}
861
862		/*
863		 *	If we are waiting on something and this happens to be
864		 * that thing then set the re-configure flag.
865		 */
866		if (container != (u32)-1) {
867			if (container >= dev->maximum_num_containers)
868				break;
869			if ((dev->fsa_dev[container].config_waiting_on ==
870			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
871			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
872				dev->fsa_dev[container].config_waiting_on = 0;
873		} else for (container = 0;
874		    container < dev->maximum_num_containers; ++container) {
875			if ((dev->fsa_dev[container].config_waiting_on ==
876			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
877			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
878				dev->fsa_dev[container].config_waiting_on = 0;
879		}
880		break;
881
882	case AifCmdEventNotify:
883		switch (le32_to_cpu(((u32 *)aifcmd->data)[0])) {
884		/*
885		 *	Add an Array.
886		 */
887		case AifEnAddContainer:
888			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
889			if (container >= dev->maximum_num_containers)
890				break;
891			dev->fsa_dev[container].config_needed = ADD;
892			dev->fsa_dev[container].config_waiting_on =
893				AifEnConfigChange;
894			dev->fsa_dev[container].config_waiting_stamp = jiffies;
895			break;
896
897		/*
898		 *	Delete an Array.
899		 */
900		case AifEnDeleteContainer:
901			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
902			if (container >= dev->maximum_num_containers)
903				break;
904			dev->fsa_dev[container].config_needed = DELETE;
905			dev->fsa_dev[container].config_waiting_on =
906				AifEnConfigChange;
907			dev->fsa_dev[container].config_waiting_stamp = jiffies;
908			break;
909
910		/*
911		 *	Container change detected. If we currently are not
912		 * waiting on something else, setup to wait on a Config Change.
913		 */
914		case AifEnContainerChange:
915			container = le32_to_cpu(((u32 *)aifcmd->data)[1]);
916			if (container >= dev->maximum_num_containers)
917				break;
918			if (dev->fsa_dev[container].config_waiting_on &&
919			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
920				break;
921			dev->fsa_dev[container].config_needed = CHANGE;
922			dev->fsa_dev[container].config_waiting_on =
923				AifEnConfigChange;
924			dev->fsa_dev[container].config_waiting_stamp = jiffies;
925			break;
926
927		case AifEnConfigChange:
928			break;
929
930		}
931
932		/*
933		 *	If we are waiting on something and this happens to be
934		 * that thing then set the re-configure flag.
935		 */
936		if (container != (u32)-1) {
937			if (container >= dev->maximum_num_containers)
938				break;
939			if ((dev->fsa_dev[container].config_waiting_on ==
940			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
941			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
942				dev->fsa_dev[container].config_waiting_on = 0;
943		} else for (container = 0;
944		    container < dev->maximum_num_containers; ++container) {
945			if ((dev->fsa_dev[container].config_waiting_on ==
946			    le32_to_cpu(*(u32 *)aifcmd->data)) &&
947			 time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT))
948				dev->fsa_dev[container].config_waiting_on = 0;
949		}
950		break;
951
952	case AifCmdJobProgress:
953		/*
954		 *	These are job progress AIF's. When a Clear is being
955		 * done on a container it is initially created then hidden from
956		 * the OS. When the clear completes we don't get a config
957		 * change so we monitor the job status complete on a clear then
958		 * wait for a container change.
959		 */
960
961		if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
962		 && ((((u32 *)aifcmd->data)[6] == ((u32 *)aifcmd->data)[5])
963		  || (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsSuccess)))) {
964			for (container = 0;
965			    container < dev->maximum_num_containers;
966			    ++container) {
967				/*
968				 * Stomp on all config sequencing for all
969				 * containers?
970				 */
971				dev->fsa_dev[container].config_waiting_on =
972					AifEnContainerChange;
973				dev->fsa_dev[container].config_needed = ADD;
974				dev->fsa_dev[container].config_waiting_stamp =
975					jiffies;
976			}
977		}
978		if ((((u32 *)aifcmd->data)[1] == cpu_to_le32(AifJobCtrZero))
979		 && (((u32 *)aifcmd->data)[6] == 0)
980		 && (((u32 *)aifcmd->data)[4] == cpu_to_le32(AifJobStsRunning))) {
981			for (container = 0;
982			    container < dev->maximum_num_containers;
983			    ++container) {
984				/*
985				 * Stomp on all config sequencing for all
986				 * containers?
987				 */
988				dev->fsa_dev[container].config_waiting_on =
989					AifEnContainerChange;
990				dev->fsa_dev[container].config_needed = DELETE;
991				dev->fsa_dev[container].config_waiting_stamp =
992					jiffies;
993			}
994		}
995		break;
996	}
997
998	device_config_needed = NOTHING;
999	for (container = 0; container < dev->maximum_num_containers;
1000	    ++container) {
1001		if ((dev->fsa_dev[container].config_waiting_on == 0) &&
1002			(dev->fsa_dev[container].config_needed != NOTHING) &&
1003			time_before(jiffies, dev->fsa_dev[container].config_waiting_stamp + AIF_SNIFF_TIMEOUT)) {
1004			device_config_needed =
1005				dev->fsa_dev[container].config_needed;
1006			dev->fsa_dev[container].config_needed = NOTHING;
1007			break;
1008		}
1009	}
1010	if (device_config_needed == NOTHING)
1011		return;
1012
1013	/*
1014	 *	If we decided that a re-configuration needs to be done,
1015	 * schedule it here on the way out the door, please close the door
1016	 * behind you.
1017	 */
1018
1019	busy = 0;
1020
1021
1022	/*
1023	 *	Find the scsi_device associated with the SCSI address,
1024	 * and mark it as changed, invalidating the cache. This deals
1025	 * with changes to existing device IDs.
1026	 */
1027
1028	if (!dev || !dev->scsi_host_ptr)
1029		return;
1030	/*
1031	 * force reload of disk info via aac_probe_container
1032	 */
1033	if ((device_config_needed == CHANGE)
1034	 && (dev->fsa_dev[container].valid == 1))
1035		dev->fsa_dev[container].valid = 2;
1036	if ((device_config_needed == CHANGE) ||
1037			(device_config_needed == ADD))
1038		aac_probe_container(dev, container);
1039	device = scsi_device_lookup(dev->scsi_host_ptr,
1040		CONTAINER_TO_CHANNEL(container),
1041		CONTAINER_TO_ID(container),
1042		CONTAINER_TO_LUN(container));
1043	if (device) {
1044		switch (device_config_needed) {
1045		case DELETE:
1046		case CHANGE:
1047			scsi_rescan_device(&device->sdev_gendev);
1048
1049		default:
1050			break;
1051		}
1052		scsi_device_put(device);
1053	}
1054	if (device_config_needed == ADD) {
1055		scsi_add_device(dev->scsi_host_ptr,
1056		  CONTAINER_TO_CHANNEL(container),
1057		  CONTAINER_TO_ID(container),
1058		  CONTAINER_TO_LUN(container));
1059	}
1060
1061}
1062
1063static int _aac_reset_adapter(struct aac_dev *aac)
1064{
1065	int index, quirks;
1066	u32 ret;
1067	int retval;
1068	struct Scsi_Host *host;
1069	struct scsi_device *dev;
1070	struct scsi_cmnd *command;
1071	struct scsi_cmnd *command_list;
1072
1073	/*
1074	 * Assumptions:
1075	 *	- host is locked.
1076	 *	- in_reset is asserted, so no new i/o is getting to the
1077	 *	  card.
1078	 *	- The card is dead.
1079	 */
1080	host = aac->scsi_host_ptr;
1081	scsi_block_requests(host);
1082	aac_adapter_disable_int(aac);
1083	spin_unlock_irq(host->host_lock);
1084	kthread_stop(aac->thread);
1085
1086	/*
1087	 *	If a positive health, means in a known DEAD PANIC
1088	 * state and the adapter could be reset to `try again'.
1089	 */
1090	retval = aac_adapter_check_health(aac);
1091	if (retval == 0)
1092		retval = aac_adapter_sync_cmd(aac, IOP_RESET_ALWAYS,
1093		  0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL);
1094	if (retval)
1095		retval = aac_adapter_sync_cmd(aac, IOP_RESET,
1096		  0, 0, 0, 0, 0, 0, &ret, NULL, NULL, NULL, NULL);
1097
1098	if (retval)
1099		goto out;
1100	if (ret != 0x00000001) {
1101		retval = -ENODEV;
1102		goto out;
1103	}
1104
1105	index = aac->cardtype;
1106
1107	/*
1108	 * Re-initialize the adapter, first free resources, then carefully
1109	 * apply the initialization sequence to come back again. Only risk
1110	 * is a change in Firmware dropping cache, it is assumed the caller
1111	 * will ensure that i/o is queisced and the card is flushed in that
1112	 * case.
1113	 */
1114	aac_fib_map_free(aac);
1115	aac->hw_fib_va = NULL;
1116	aac->hw_fib_pa = 0;
1117	pci_free_consistent(aac->pdev, aac->comm_size, aac->comm_addr, aac->comm_phys);
1118	aac->comm_addr = NULL;
1119	aac->comm_phys = 0;
1120	kfree(aac->queues);
1121	aac->queues = NULL;
1122	free_irq(aac->pdev->irq, aac);
1123	kfree(aac->fsa_dev);
1124	aac->fsa_dev = NULL;
1125	if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT) {
1126		if (((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK))) ||
1127		  ((retval = pci_set_consistent_dma_mask(aac->pdev, DMA_32BIT_MASK))))
1128			goto out;
1129	} else {
1130		if (((retval = pci_set_dma_mask(aac->pdev, 0x7FFFFFFFULL))) ||
1131		  ((retval = pci_set_consistent_dma_mask(aac->pdev, 0x7FFFFFFFULL))))
1132			goto out;
1133	}
1134	if ((retval = (*(aac_get_driver_ident(index)->init))(aac)))
1135		goto out;
1136	if (aac_get_driver_ident(index)->quirks & AAC_QUIRK_31BIT)
1137		if ((retval = pci_set_dma_mask(aac->pdev, DMA_32BIT_MASK)))
1138			goto out;
1139	aac->thread = kthread_run(aac_command_thread, aac, aac->name);
1140	if (IS_ERR(aac->thread)) {
1141		retval = PTR_ERR(aac->thread);
1142		goto out;
1143	}
1144	(void)aac_get_adapter_info(aac);
1145	quirks = aac_get_driver_ident(index)->quirks;
1146	if ((quirks & AAC_QUIRK_34SG) && (host->sg_tablesize > 34)) {
1147 		host->sg_tablesize = 34;
1148 		host->max_sectors = (host->sg_tablesize * 8) + 112;
1149 	}
1150 	if ((quirks & AAC_QUIRK_17SG) && (host->sg_tablesize > 17)) {
1151 		host->sg_tablesize = 17;
1152 		host->max_sectors = (host->sg_tablesize * 8) + 112;
1153 	}
1154	aac_get_config_status(aac, 1);
1155	aac_get_containers(aac);
1156	/*
1157	 * This is where the assumption that the Adapter is quiesced
1158	 * is important.
1159	 */
1160	command_list = NULL;
1161	__shost_for_each_device(dev, host) {
1162		unsigned long flags;
1163		spin_lock_irqsave(&dev->list_lock, flags);
1164		list_for_each_entry(command, &dev->cmd_list, list)
1165			if (command->SCp.phase == AAC_OWNER_FIRMWARE) {
1166				command->SCp.buffer = (struct scatterlist *)command_list;
1167				command_list = command;
1168			}
1169		spin_unlock_irqrestore(&dev->list_lock, flags);
1170	}
1171	while ((command = command_list)) {
1172		command_list = (struct scsi_cmnd *)command->SCp.buffer;
1173		command->SCp.buffer = NULL;
1174		command->result = DID_OK << 16
1175		  | COMMAND_COMPLETE << 8
1176		  | SAM_STAT_TASK_SET_FULL;
1177		command->SCp.phase = AAC_OWNER_ERROR_HANDLER;
1178		command->scsi_done(command);
1179	}
1180	retval = 0;
1181
1182out:
1183	aac->in_reset = 0;
1184	scsi_unblock_requests(host);
1185	spin_lock_irq(host->host_lock);
1186	return retval;
1187}
1188
1189int aac_check_health(struct aac_dev * aac)
1190{
1191	int BlinkLED;
1192	unsigned long time_now, flagv = 0;
1193	struct list_head * entry;
1194	struct Scsi_Host * host;
1195
1196	/* Extending the scope of fib_lock slightly to protect aac->in_reset */
1197	if (spin_trylock_irqsave(&aac->fib_lock, flagv) == 0)
1198		return 0;
1199
1200	if (aac->in_reset || !(BlinkLED = aac_adapter_check_health(aac))) {
1201		spin_unlock_irqrestore(&aac->fib_lock, flagv);
1202		return 0; /* OK */
1203	}
1204
1205	aac->in_reset = 1;
1206
1207	/* Fake up an AIF:
1208	 *	aac_aifcmd.command = AifCmdEventNotify = 1
1209	 *	aac_aifcmd.seqnum = 0xFFFFFFFF
1210	 *	aac_aifcmd.data[0] = AifEnExpEvent = 23
1211	 *	aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1212	 *	aac.aifcmd.data[2] = AifHighPriority = 3
1213	 *	aac.aifcmd.data[3] = BlinkLED
1214	 */
1215
1216	time_now = jiffies/HZ;
1217	entry = aac->fib_list.next;
1218
1219	/*
1220	 * For each Context that is on the
1221	 * fibctxList, make a copy of the
1222	 * fib, and then set the event to wake up the
1223	 * thread that is waiting for it.
1224	 */
1225	while (entry != &aac->fib_list) {
1226		/*
1227		 * Extract the fibctx
1228		 */
1229		struct aac_fib_context *fibctx = list_entry(entry, struct aac_fib_context, next);
1230		struct hw_fib * hw_fib;
1231		struct fib * fib;
1232		/*
1233		 * Check if the queue is getting
1234		 * backlogged
1235		 */
1236		if (fibctx->count > 20) {
1237			/*
1238			 * It's *not* jiffies folks,
1239			 * but jiffies / HZ, so do not
1240			 * panic ...
1241			 */
1242			u32 time_last = fibctx->jiffies;
1243			/*
1244			 * Has it been > 2 minutes
1245			 * since the last read off
1246			 * the queue?
1247			 */
1248			if ((time_now - time_last) > aif_timeout) {
1249				entry = entry->next;
1250				aac_close_fib_context(aac, fibctx);
1251				continue;
1252			}
1253		}
1254		/*
1255		 * Warning: no sleep allowed while
1256		 * holding spinlock
1257		 */
1258		hw_fib = kmalloc(sizeof(struct hw_fib), GFP_ATOMIC);
1259		fib = kmalloc(sizeof(struct fib), GFP_ATOMIC);
1260		if (fib && hw_fib) {
1261			struct aac_aifcmd * aif;
1262
1263			memset(hw_fib, 0, sizeof(struct hw_fib));
1264			memset(fib, 0, sizeof(struct fib));
1265			fib->hw_fib = hw_fib;
1266			fib->dev = aac;
1267			aac_fib_init(fib);
1268			fib->type = FSAFS_NTC_FIB_CONTEXT;
1269			fib->size = sizeof (struct fib);
1270			fib->data = hw_fib->data;
1271			aif = (struct aac_aifcmd *)hw_fib->data;
1272			aif->command = cpu_to_le32(AifCmdEventNotify);
1273		 	aif->seqnum = cpu_to_le32(0xFFFFFFFF);
1274		 	aif->data[0] = cpu_to_le32(AifEnExpEvent);
1275			aif->data[1] = cpu_to_le32(AifExeFirmwarePanic);
1276		 	aif->data[2] = cpu_to_le32(AifHighPriority);
1277			aif->data[3] = cpu_to_le32(BlinkLED);
1278
1279			/*
1280			 * Put the FIB onto the
1281			 * fibctx's fibs
1282			 */
1283			list_add_tail(&fib->fiblink, &fibctx->fib_list);
1284			fibctx->count++;
1285			/*
1286			 * Set the event to wake up the
1287			 * thread that will waiting.
1288			 */
1289			up(&fibctx->wait_sem);
1290		} else {
1291			printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1292			kfree(fib);
1293			kfree(hw_fib);
1294		}
1295		entry = entry->next;
1296	}
1297
1298	spin_unlock_irqrestore(&aac->fib_lock, flagv);
1299
1300	if (BlinkLED < 0) {
1301		printk(KERN_ERR "%s: Host adapter dead %d\n", aac->name, BlinkLED);
1302		goto out;
1303	}
1304
1305	printk(KERN_ERR "%s: Host adapter BLINK LED 0x%x\n", aac->name, BlinkLED);
1306
1307	host = aac->scsi_host_ptr;
1308	spin_lock_irqsave(host->host_lock, flagv);
1309	BlinkLED = _aac_reset_adapter(aac);
1310	spin_unlock_irqrestore(host->host_lock, flagv);
1311	return BlinkLED;
1312
1313out:
1314	aac->in_reset = 0;
1315	return BlinkLED;
1316}
1317
1318
1319/**
1320 *	aac_command_thread	-	command processing thread
1321 *	@dev: Adapter to monitor
1322 *
1323 *	Waits on the commandready event in it's queue. When the event gets set
1324 *	it will pull FIBs off it's queue. It will continue to pull FIBs off
1325 *	until the queue is empty. When the queue is empty it will wait for
1326 *	more FIBs.
1327 */
1328
1329int aac_command_thread(void *data)
1330{
1331	struct aac_dev *dev = data;
1332	struct hw_fib *hw_fib, *hw_newfib;
1333	struct fib *fib, *newfib;
1334	struct aac_fib_context *fibctx;
1335	unsigned long flags;
1336	DECLARE_WAITQUEUE(wait, current);
1337
1338	/*
1339	 *	We can only have one thread per adapter for AIF's.
1340	 */
1341	if (dev->aif_thread)
1342		return -EINVAL;
1343
1344	/*
1345	 *	Let the DPC know it has a place to send the AIF's to.
1346	 */
1347	dev->aif_thread = 1;
1348	add_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1349	set_current_state(TASK_INTERRUPTIBLE);
1350	dprintk ((KERN_INFO "aac_command_thread start\n"));
1351	while(1)
1352	{
1353		spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1354		while(!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) {
1355			struct list_head *entry;
1356			struct aac_aifcmd * aifcmd;
1357
1358			set_current_state(TASK_RUNNING);
1359
1360			entry = dev->queues->queue[HostNormCmdQueue].cmdq.next;
1361			list_del(entry);
1362
1363			spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1364			fib = list_entry(entry, struct fib, fiblink);
1365			/*
1366			 *	We will process the FIB here or pass it to a
1367			 *	worker thread that is TBD. We Really can't
1368			 *	do anything at this point since we don't have
1369			 *	anything defined for this thread to do.
1370			 */
1371			hw_fib = fib->hw_fib;
1372			memset(fib, 0, sizeof(struct fib));
1373			fib->type = FSAFS_NTC_FIB_CONTEXT;
1374			fib->size = sizeof( struct fib );
1375			fib->hw_fib = hw_fib;
1376			fib->data = hw_fib->data;
1377			fib->dev = dev;
1378			/*
1379			 *	We only handle AifRequest fibs from the adapter.
1380			 */
1381			aifcmd = (struct aac_aifcmd *) hw_fib->data;
1382			if (aifcmd->command == cpu_to_le32(AifCmdDriverNotify)) {
1383				/* Handle Driver Notify Events */
1384				aac_handle_aif(dev, fib);
1385				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1386				aac_fib_adapter_complete(fib, (u16)sizeof(u32));
1387			} else {
1388				struct list_head *entry;
1389				/* The u32 here is important and intended. We are using
1390				   32bit wrapping time to fit the adapter field */
1391
1392				u32 time_now, time_last;
1393				unsigned long flagv;
1394				unsigned num;
1395				struct hw_fib ** hw_fib_pool, ** hw_fib_p;
1396				struct fib ** fib_pool, ** fib_p;
1397
1398				/* Sniff events */
1399				if ((aifcmd->command ==
1400				     cpu_to_le32(AifCmdEventNotify)) ||
1401				    (aifcmd->command ==
1402				     cpu_to_le32(AifCmdJobProgress))) {
1403					aac_handle_aif(dev, fib);
1404				}
1405
1406				time_now = jiffies/HZ;
1407
1408				/*
1409				 * Warning: no sleep allowed while
1410				 * holding spinlock. We take the estimate
1411				 * and pre-allocate a set of fibs outside the
1412				 * lock.
1413				 */
1414				num = le32_to_cpu(dev->init->AdapterFibsSize)
1415				    / sizeof(struct hw_fib); /* some extra */
1416				spin_lock_irqsave(&dev->fib_lock, flagv);
1417				entry = dev->fib_list.next;
1418				while (entry != &dev->fib_list) {
1419					entry = entry->next;
1420					++num;
1421				}
1422				spin_unlock_irqrestore(&dev->fib_lock, flagv);
1423				hw_fib_pool = NULL;
1424				fib_pool = NULL;
1425				if (num
1426				 && ((hw_fib_pool = kmalloc(sizeof(struct hw_fib *) * num, GFP_KERNEL)))
1427				 && ((fib_pool = kmalloc(sizeof(struct fib *) * num, GFP_KERNEL)))) {
1428					hw_fib_p = hw_fib_pool;
1429					fib_p = fib_pool;
1430					while (hw_fib_p < &hw_fib_pool[num]) {
1431						if (!(*(hw_fib_p++) = kmalloc(sizeof(struct hw_fib), GFP_KERNEL))) {
1432							--hw_fib_p;
1433							break;
1434						}
1435						if (!(*(fib_p++) = kmalloc(sizeof(struct fib), GFP_KERNEL))) {
1436							kfree(*(--hw_fib_p));
1437							break;
1438						}
1439					}
1440					if ((num = hw_fib_p - hw_fib_pool) == 0) {
1441						kfree(fib_pool);
1442						fib_pool = NULL;
1443						kfree(hw_fib_pool);
1444						hw_fib_pool = NULL;
1445					}
1446				} else {
1447					kfree(hw_fib_pool);
1448					hw_fib_pool = NULL;
1449				}
1450				spin_lock_irqsave(&dev->fib_lock, flagv);
1451				entry = dev->fib_list.next;
1452				/*
1453				 * For each Context that is on the
1454				 * fibctxList, make a copy of the
1455				 * fib, and then set the event to wake up the
1456				 * thread that is waiting for it.
1457				 */
1458				hw_fib_p = hw_fib_pool;
1459				fib_p = fib_pool;
1460				while (entry != &dev->fib_list) {
1461					/*
1462					 * Extract the fibctx
1463					 */
1464					fibctx = list_entry(entry, struct aac_fib_context, next);
1465					/*
1466					 * Check if the queue is getting
1467					 * backlogged
1468					 */
1469					if (fibctx->count > 20)
1470					{
1471						/*
1472						 * It's *not* jiffies folks,
1473						 * but jiffies / HZ so do not
1474						 * panic ...
1475						 */
1476						time_last = fibctx->jiffies;
1477						/*
1478						 * Has it been > 2 minutes
1479						 * since the last read off
1480						 * the queue?
1481						 */
1482						if ((time_now - time_last) > aif_timeout) {
1483							entry = entry->next;
1484							aac_close_fib_context(dev, fibctx);
1485							continue;
1486						}
1487					}
1488					/*
1489					 * Warning: no sleep allowed while
1490					 * holding spinlock
1491					 */
1492					if (hw_fib_p < &hw_fib_pool[num]) {
1493						hw_newfib = *hw_fib_p;
1494						*(hw_fib_p++) = NULL;
1495						newfib = *fib_p;
1496						*(fib_p++) = NULL;
1497						/*
1498						 * Make the copy of the FIB
1499						 */
1500						memcpy(hw_newfib, hw_fib, sizeof(struct hw_fib));
1501						memcpy(newfib, fib, sizeof(struct fib));
1502						newfib->hw_fib = hw_newfib;
1503						/*
1504						 * Put the FIB onto the
1505						 * fibctx's fibs
1506						 */
1507						list_add_tail(&newfib->fiblink, &fibctx->fib_list);
1508						fibctx->count++;
1509						/*
1510						 * Set the event to wake up the
1511						 * thread that is waiting.
1512						 */
1513						up(&fibctx->wait_sem);
1514					} else {
1515						printk(KERN_WARNING "aifd: didn't allocate NewFib.\n");
1516					}
1517					entry = entry->next;
1518				}
1519				/*
1520				 *	Set the status of this FIB
1521				 */
1522				*(__le32 *)hw_fib->data = cpu_to_le32(ST_OK);
1523				aac_fib_adapter_complete(fib, sizeof(u32));
1524				spin_unlock_irqrestore(&dev->fib_lock, flagv);
1525				/* Free up the remaining resources */
1526				hw_fib_p = hw_fib_pool;
1527				fib_p = fib_pool;
1528				while (hw_fib_p < &hw_fib_pool[num]) {
1529					kfree(*hw_fib_p);
1530					kfree(*fib_p);
1531					++fib_p;
1532					++hw_fib_p;
1533				}
1534				kfree(hw_fib_pool);
1535				kfree(fib_pool);
1536			}
1537			kfree(fib);
1538			spin_lock_irqsave(dev->queues->queue[HostNormCmdQueue].lock, flags);
1539		}
1540		/*
1541		 *	There are no more AIF's
1542		 */
1543		spin_unlock_irqrestore(dev->queues->queue[HostNormCmdQueue].lock, flags);
1544		schedule();
1545
1546		if (kthread_should_stop())
1547			break;
1548		set_current_state(TASK_INTERRUPTIBLE);
1549	}
1550	if (dev->queues)
1551		remove_wait_queue(&dev->queues->queue[HostNormCmdQueue].cmdready, &wait);
1552	dev->aif_thread = 0;
1553	return 0;
1554}
1555