arcmsr_hba.c revision deff2627cda995c926788fd9192337ec3febe7b5
1/*
2*******************************************************************************
3**        O.S   : Linux
4**   FILE NAME  : arcmsr_hba.c
5**        BY    : Erich Chen
6**   Description: SCSI RAID Device Driver for
7**                ARECA RAID Host adapter
8*******************************************************************************
9** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
10**
11**     Web site: www.areca.com.tw
12**       E-mail: erich@areca.com.tw
13**
14** This program is free software; you can redistribute it and/or modify
15** it under the terms of the GNU General Public License version 2 as
16** published by the Free Software Foundation.
17** This program is distributed in the hope that it will be useful,
18** but WITHOUT ANY WARRANTY; without even the implied warranty of
19** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
20** GNU General Public License for more details.
21*******************************************************************************
22** Redistribution and use in source and binary forms, with or without
23** modification, are permitted provided that the following conditions
24** are met:
25** 1. Redistributions of source code must retain the above copyright
26**    notice, this list of conditions and the following disclaimer.
27** 2. Redistributions in binary form must reproduce the above copyright
28**    notice, this list of conditions and the following disclaimer in the
29**    documentation and/or other materials provided with the distribution.
30** 3. The name of the author may not be used to endorse or promote products
31**    derived from this software without specific prior written permission.
32**
33** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
38** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
40** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
42** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43*******************************************************************************
44** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
45**     Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
46*******************************************************************************
47*/
48#include <linux/module.h>
49#include <linux/reboot.h>
50#include <linux/spinlock.h>
51#include <linux/pci_ids.h>
52#include <linux/interrupt.h>
53#include <linux/moduleparam.h>
54#include <linux/errno.h>
55#include <linux/types.h>
56#include <linux/delay.h>
57#include <linux/dma-mapping.h>
58#include <linux/timer.h>
59#include <linux/pci.h>
60#include <asm/dma.h>
61#include <asm/io.h>
62#include <asm/system.h>
63#include <asm/uaccess.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi.h>
66#include <scsi/scsi_cmnd.h>
67#include <scsi/scsi_tcq.h>
68#include <scsi/scsi_device.h>
69#include <scsi/scsi_transport.h>
70#include <scsi/scsicam.h>
71#include "arcmsr.h"
72
73MODULE_AUTHOR("Erich Chen <erich@areca.com.tw>");
74MODULE_DESCRIPTION("ARECA (ARC11xx/12xx) SATA RAID HOST Adapter");
75MODULE_LICENSE("Dual BSD/GPL");
76MODULE_VERSION(ARCMSR_DRIVER_VERSION);
77
78static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd);
79static int arcmsr_abort(struct scsi_cmnd *);
80static int arcmsr_bus_reset(struct scsi_cmnd *);
81static int arcmsr_bios_param(struct scsi_device *sdev,
82				struct block_device *bdev, sector_t capacity, int *info);
83static int arcmsr_queue_command(struct scsi_cmnd * cmd,
84				void (*done) (struct scsi_cmnd *));
85static int arcmsr_probe(struct pci_dev *pdev,
86				const struct pci_device_id *id);
87static void arcmsr_remove(struct pci_dev *pdev);
88static void arcmsr_shutdown(struct pci_dev *pdev);
89static void arcmsr_iop_init(struct AdapterControlBlock *acb);
90static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
91static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
92static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
93static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb);
94static const char *arcmsr_info(struct Scsi_Host *);
95static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
96
97static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
98{
99	if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
100		queue_depth = ARCMSR_MAX_CMD_PERLUN;
101	scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
102	return queue_depth;
103}
104
105static struct scsi_host_template arcmsr_scsi_host_template = {
106	.module			= THIS_MODULE,
107	.name			= "ARCMSR ARECA SATA RAID HOST Adapter" ARCMSR_DRIVER_VERSION,
108	.info			= arcmsr_info,
109	.queuecommand		= arcmsr_queue_command,
110	.eh_abort_handler	= arcmsr_abort,
111	.eh_bus_reset_handler	= arcmsr_bus_reset,
112	.bios_param		= arcmsr_bios_param,
113	.change_queue_depth	= arcmsr_adjust_disk_queue_depth,
114	.can_queue		= ARCMSR_MAX_OUTSTANDING_CMD,
115	.this_id		= ARCMSR_SCSI_INITIATOR_ID,
116	.sg_tablesize		= ARCMSR_MAX_SG_ENTRIES,
117	.max_sectors    	= ARCMSR_MAX_XFER_SECTORS,
118	.cmd_per_lun		= ARCMSR_MAX_CMD_PERLUN,
119	.use_clustering		= ENABLE_CLUSTERING,
120	.shost_attrs		= arcmsr_host_attrs,
121};
122
123static struct pci_device_id arcmsr_device_id_table[] = {
124	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
125	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
126	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
127	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
128	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
129	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
130	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
131	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
132	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
133	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
134	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
135	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
136	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
137	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
138	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
139	{0, 0}, /* Terminating entry */
140};
141MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
142static struct pci_driver arcmsr_pci_driver = {
143	.name			= "arcmsr",
144	.id_table		= arcmsr_device_id_table,
145	.probe			= arcmsr_probe,
146	.remove			= arcmsr_remove,
147	.shutdown		= arcmsr_shutdown
148};
149
150static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
151{
152	irqreturn_t handle_state;
153	struct AdapterControlBlock *acb;
154	unsigned long flags;
155
156	acb = (struct AdapterControlBlock *)dev_id;
157
158	spin_lock_irqsave(acb->host->host_lock, flags);
159	handle_state = arcmsr_interrupt(acb);
160	spin_unlock_irqrestore(acb->host->host_lock, flags);
161	return handle_state;
162}
163
164static int arcmsr_bios_param(struct scsi_device *sdev,
165		struct block_device *bdev, sector_t capacity, int *geom)
166{
167	int ret, heads, sectors, cylinders, total_capacity;
168	unsigned char *buffer;/* return copy of block device's partition table */
169
170	buffer = scsi_bios_ptable(bdev);
171	if (buffer) {
172		ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]);
173		kfree(buffer);
174		if (ret != -1)
175			return ret;
176	}
177	total_capacity = capacity;
178	heads = 64;
179	sectors = 32;
180	cylinders = total_capacity / (heads * sectors);
181	if (cylinders > 1024) {
182		heads = 255;
183		sectors = 63;
184		cylinders = total_capacity / (heads * sectors);
185	}
186	geom[0] = heads;
187	geom[1] = sectors;
188	geom[2] = cylinders;
189	return 0;
190}
191
192static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
193{
194	struct pci_dev *pdev = acb->pdev;
195	struct MessageUnit __iomem *reg = acb->pmu;
196	u32 ccb_phyaddr_hi32;
197	void *dma_coherent;
198	dma_addr_t dma_coherent_handle, dma_addr;
199	struct CommandControlBlock *ccb_tmp;
200	int i, j;
201
202	dma_coherent = dma_alloc_coherent(&pdev->dev,
203			ARCMSR_MAX_FREECCB_NUM *
204			sizeof (struct CommandControlBlock) + 0x20,
205			&dma_coherent_handle, GFP_KERNEL);
206	if (!dma_coherent)
207		return -ENOMEM;
208
209	acb->dma_coherent = dma_coherent;
210	acb->dma_coherent_handle = dma_coherent_handle;
211
212	if (((unsigned long)dma_coherent & 0x1F)) {
213		dma_coherent = dma_coherent +
214			(0x20 - ((unsigned long)dma_coherent & 0x1F));
215		dma_coherent_handle = dma_coherent_handle +
216			(0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
217	}
218
219	dma_addr = dma_coherent_handle;
220	ccb_tmp = (struct CommandControlBlock *)dma_coherent;
221	for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
222		ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
223		ccb_tmp->acb = acb;
224		acb->pccb_pool[i] = ccb_tmp;
225		list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
226		dma_addr = dma_addr + sizeof (struct CommandControlBlock);
227		ccb_tmp++;
228	}
229
230	acb->vir2phy_offset = (unsigned long)ccb_tmp -
231			      (unsigned long)dma_addr;
232	for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
233		for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
234			acb->devstate[i][j] = ARECA_RAID_GOOD;
235
236	/*
237	** here we need to tell iop 331 our ccb_tmp.HighPart
238	** if ccb_tmp.HighPart is not zero
239	*/
240	ccb_phyaddr_hi32 = (uint32_t) ((dma_coherent_handle >> 16) >> 16);
241	if (ccb_phyaddr_hi32 != 0) {
242		writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->message_rwbuffer[0]);
243		writel(ccb_phyaddr_hi32, &reg->message_rwbuffer[1]);
244		writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
245		if (arcmsr_wait_msgint_ready(acb))
246			printk(KERN_NOTICE "arcmsr%d: "
247			       "'set ccb high part physical address' timeout\n",
248				acb->host->host_no);
249	}
250
251	writel(readl(&reg->outbound_intmask) |
252			ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
253	       &reg->outbound_intmask);
254	return 0;
255}
256
257static int arcmsr_probe(struct pci_dev *pdev,
258	const struct pci_device_id *id)
259{
260	struct Scsi_Host *host;
261	struct AdapterControlBlock *acb;
262	uint8_t bus, dev_fun;
263	int error;
264
265	error = pci_enable_device(pdev);
266	if (error)
267		goto out;
268	pci_set_master(pdev);
269
270	host = scsi_host_alloc(&arcmsr_scsi_host_template,
271			sizeof(struct AdapterControlBlock));
272	if (!host) {
273		error = -ENOMEM;
274		goto out_disable_device;
275	}
276	acb = (struct AdapterControlBlock *)host->hostdata;
277	memset(acb, 0, sizeof (struct AdapterControlBlock));
278
279	error = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
280	if (error) {
281		error = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
282		if (error) {
283			printk(KERN_WARNING
284			       "scsi%d: No suitable DMA mask available\n",
285			       host->host_no);
286			goto out_host_put;
287		}
288	}
289	bus = pdev->bus->number;
290	dev_fun = pdev->devfn;
291	acb->host = host;
292	acb->pdev = pdev;
293	host->max_sectors = ARCMSR_MAX_XFER_SECTORS;
294	host->max_lun = ARCMSR_MAX_TARGETLUN;
295	host->max_id = ARCMSR_MAX_TARGETID;/*16:8*/
296	host->max_cmd_len = 16;    /*this is issue of 64bit LBA, over 2T byte*/
297	host->sg_tablesize = ARCMSR_MAX_SG_ENTRIES;
298	host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */
299	host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;
300	host->this_id = ARCMSR_SCSI_INITIATOR_ID;
301	host->unique_id = (bus << 8) | dev_fun;
302	host->irq = pdev->irq;
303	error = pci_request_regions(pdev, "arcmsr");
304	if (error)
305		goto out_host_put;
306
307	acb->pmu = ioremap(pci_resource_start(pdev, 0),
308			   pci_resource_len(pdev, 0));
309	if (!acb->pmu) {
310		printk(KERN_NOTICE "arcmsr%d: memory"
311			" mapping region fail \n", acb->host->host_no);
312		goto out_release_regions;
313	}
314	acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
315			   ACB_F_MESSAGE_RQBUFFER_CLEARED |
316			   ACB_F_MESSAGE_WQBUFFER_READED);
317	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
318	INIT_LIST_HEAD(&acb->ccb_free_list);
319
320	error = arcmsr_alloc_ccb_pool(acb);
321	if (error)
322		goto out_iounmap;
323
324	error = request_irq(pdev->irq, arcmsr_do_interrupt,
325			IRQF_DISABLED | IRQF_SHARED, "arcmsr", acb);
326	if (error)
327		goto out_free_ccb_pool;
328
329	arcmsr_iop_init(acb);
330	pci_set_drvdata(pdev, host);
331
332	error = scsi_add_host(host, &pdev->dev);
333	if (error)
334		goto out_free_irq;
335
336	error = arcmsr_alloc_sysfs_attr(acb);
337	if (error)
338		goto out_free_sysfs;
339
340	scsi_scan_host(host);
341	return 0;
342 out_free_sysfs:
343 out_free_irq:
344	free_irq(pdev->irq, acb);
345 out_free_ccb_pool:
346	arcmsr_free_ccb_pool(acb);
347 out_iounmap:
348	iounmap(acb->pmu);
349 out_release_regions:
350	pci_release_regions(pdev);
351 out_host_put:
352	scsi_host_put(host);
353 out_disable_device:
354	pci_disable_device(pdev);
355 out:
356	return error;
357}
358
359static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
360{
361	struct MessageUnit __iomem *reg = acb->pmu;
362
363	writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
364	if (arcmsr_wait_msgint_ready(acb))
365		printk(KERN_NOTICE
366			"arcmsr%d: wait 'abort all outstanding command' timeout \n"
367			, acb->host->host_no);
368}
369
370static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
371{
372	struct scsi_cmnd *pcmd = ccb->pcmd;
373
374	scsi_dma_unmap(pcmd);
375}
376
377static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
378{
379	struct AdapterControlBlock *acb = ccb->acb;
380	struct scsi_cmnd *pcmd = ccb->pcmd;
381
382	arcmsr_pci_unmap_dma(ccb);
383	if (stand_flag == 1)
384		atomic_dec(&acb->ccboutstandingcount);
385	ccb->startdone = ARCMSR_CCB_DONE;
386	ccb->ccb_flags = 0;
387	list_add_tail(&ccb->list, &acb->ccb_free_list);
388	pcmd->scsi_done(pcmd);
389}
390
391static void arcmsr_remove(struct pci_dev *pdev)
392{
393	struct Scsi_Host *host = pci_get_drvdata(pdev);
394	struct AdapterControlBlock *acb =
395		(struct AdapterControlBlock *) host->hostdata;
396	struct MessageUnit __iomem *reg = acb->pmu;
397	int poll_count = 0;
398
399	arcmsr_free_sysfs_attr(acb);
400	scsi_remove_host(host);
401	arcmsr_stop_adapter_bgrb(acb);
402	arcmsr_flush_adapter_cache(acb);
403	writel(readl(&reg->outbound_intmask) |
404		ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
405		&reg->outbound_intmask);
406	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
407	acb->acb_flags &= ~ACB_F_IOP_INITED;
408
409	for (poll_count = 0; poll_count < 256; poll_count++) {
410		if (!atomic_read(&acb->ccboutstandingcount))
411			break;
412		arcmsr_interrupt(acb);
413		msleep(25);
414	}
415
416	if (atomic_read(&acb->ccboutstandingcount)) {
417		int i;
418
419		arcmsr_abort_allcmd(acb);
420		for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++)
421			readl(&reg->outbound_queueport);
422		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
423			struct CommandControlBlock *ccb = acb->pccb_pool[i];
424			if (ccb->startdone == ARCMSR_CCB_START) {
425				ccb->startdone = ARCMSR_CCB_ABORTED;
426				ccb->pcmd->result = DID_ABORT << 16;
427				arcmsr_ccb_complete(ccb, 1);
428			}
429		}
430	}
431
432	free_irq(pdev->irq, acb);
433	iounmap(acb->pmu);
434	arcmsr_free_ccb_pool(acb);
435	pci_release_regions(pdev);
436
437	scsi_host_put(host);
438
439	pci_disable_device(pdev);
440	pci_set_drvdata(pdev, NULL);
441}
442
443static void arcmsr_shutdown(struct pci_dev *pdev)
444{
445	struct Scsi_Host *host = pci_get_drvdata(pdev);
446	struct AdapterControlBlock *acb =
447		(struct AdapterControlBlock *)host->hostdata;
448
449	arcmsr_stop_adapter_bgrb(acb);
450	arcmsr_flush_adapter_cache(acb);
451}
452
453static int arcmsr_module_init(void)
454{
455	int error = 0;
456
457	error = pci_register_driver(&arcmsr_pci_driver);
458	return error;
459}
460
461static void arcmsr_module_exit(void)
462{
463	pci_unregister_driver(&arcmsr_pci_driver);
464}
465module_init(arcmsr_module_init);
466module_exit(arcmsr_module_exit);
467
468static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
469{
470	struct MessageUnit __iomem *reg = acb->pmu;
471	u32 orig_mask = readl(&reg->outbound_intmask);
472
473	writel(orig_mask | ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
474			&reg->outbound_intmask);
475	return orig_mask;
476}
477
478static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
479		u32 orig_mask)
480{
481	struct MessageUnit __iomem *reg = acb->pmu;
482	u32 mask;
483
484	mask = orig_mask & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
485			     ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
486	writel(mask, &reg->outbound_intmask);
487}
488
489static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
490{
491	struct MessageUnit __iomem *reg=acb->pmu;
492
493	writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
494	if (arcmsr_wait_msgint_ready(acb))
495		printk(KERN_NOTICE
496			"arcmsr%d: wait 'flush adapter cache' timeout \n"
497			, acb->host->host_no);
498}
499
500static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
501{
502	struct scsi_cmnd *pcmd = ccb->pcmd;
503	struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
504
505	pcmd->result = DID_OK << 16;
506	if (sensebuffer) {
507		int sense_data_length =
508			sizeof (struct SENSE_DATA) < sizeof (pcmd->sense_buffer)
509			? sizeof (struct SENSE_DATA) : sizeof (pcmd->sense_buffer);
510		memset(sensebuffer, 0, sizeof (pcmd->sense_buffer));
511		memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
512		sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
513		sensebuffer->Valid = 1;
514	}
515}
516
517static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb)
518{
519	struct MessageUnit __iomem *reg = acb->pmu;
520	uint32_t Index;
521	uint8_t Retries = 0x00;
522
523	do {
524		for (Index = 0; Index < 100; Index++) {
525			if (readl(&reg->outbound_intstatus)
526				& ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
527				writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT
528					, &reg->outbound_intstatus);
529				return 0x00;
530			}
531			msleep_interruptible(10);
532		}/*max 1 seconds*/
533	} while (Retries++ < 20);/*max 20 sec*/
534	return 0xff;
535}
536
537static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
538	struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
539{
540	struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
541	int8_t *psge = (int8_t *)&arcmsr_cdb->u;
542	uint32_t address_lo, address_hi;
543	int arccdbsize = 0x30;
544	int nseg;
545
546	ccb->pcmd = pcmd;
547	memset(arcmsr_cdb, 0, sizeof (struct ARCMSR_CDB));
548	arcmsr_cdb->Bus = 0;
549	arcmsr_cdb->TargetID = pcmd->device->id;
550	arcmsr_cdb->LUN = pcmd->device->lun;
551	arcmsr_cdb->Function = 1;
552	arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
553	arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
554	memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
555
556	nseg = scsi_dma_map(pcmd);
557	BUG_ON(nseg < 0);
558
559	if (nseg) {
560		int length, i, cdb_sgcount = 0;
561		struct scatterlist *sg;
562
563		/* map stor port SG list to our iop SG List. */
564		scsi_for_each_sg(pcmd, sg, nseg, i) {
565			/* Get the physical address of the current data pointer */
566			length = cpu_to_le32(sg_dma_len(sg));
567			address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
568			address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
569			if (address_hi == 0) {
570				struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
571
572				pdma_sg->address = address_lo;
573				pdma_sg->length = length;
574				psge += sizeof (struct SG32ENTRY);
575				arccdbsize += sizeof (struct SG32ENTRY);
576			} else {
577				struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
578
579				pdma_sg->addresshigh = address_hi;
580				pdma_sg->address = address_lo;
581				pdma_sg->length = length|IS_SG64_ADDR;
582				psge += sizeof (struct SG64ENTRY);
583				arccdbsize += sizeof (struct SG64ENTRY);
584			}
585			cdb_sgcount++;
586		}
587		arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
588		arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
589		if ( arccdbsize > 256)
590			arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
591	}
592	if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
593		arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
594		ccb->ccb_flags |= CCB_FLAG_WRITE;
595	}
596}
597
598static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
599{
600	struct MessageUnit __iomem *reg = acb->pmu;
601	uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
602	struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
603
604	atomic_inc(&acb->ccboutstandingcount);
605	ccb->startdone = ARCMSR_CCB_START;
606	if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
607		writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
608			&reg->inbound_queueport);
609	else
610		writel(cdb_shifted_phyaddr, &reg->inbound_queueport);
611}
612
613void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb)
614{
615	struct MessageUnit __iomem *reg = acb->pmu;
616	struct QBUFFER __iomem *pwbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
617	uint8_t __iomem *iop_data = (uint8_t __iomem *) pwbuffer->data;
618	int32_t allxfer_len = 0;
619
620	if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
621		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
622		while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
623			&& (allxfer_len < 124)) {
624			writeb(acb->wqbuffer[acb->wqbuf_firstindex], iop_data);
625			acb->wqbuf_firstindex++;
626			acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
627			iop_data++;
628			allxfer_len++;
629		}
630		writel(allxfer_len, &pwbuffer->data_len);
631		writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK
632			, &reg->inbound_doorbell);
633	}
634}
635
636static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
637{
638	struct MessageUnit __iomem *reg = acb->pmu;
639
640	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
641	writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
642	if (arcmsr_wait_msgint_ready(acb))
643		printk(KERN_NOTICE
644			"arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
645			, acb->host->host_no);
646}
647
648static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
649{
650	dma_free_coherent(&acb->pdev->dev,
651		ARCMSR_MAX_FREECCB_NUM * sizeof (struct CommandControlBlock) + 0x20,
652		acb->dma_coherent,
653		acb->dma_coherent_handle);
654}
655
656static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
657{
658	struct MessageUnit __iomem *reg = acb->pmu;
659	struct CommandControlBlock *ccb;
660	uint32_t flag_ccb, outbound_intstatus, outbound_doorbell;
661
662	outbound_intstatus = readl(&reg->outbound_intstatus)
663		& acb->outbound_int_enable;
664	writel(outbound_intstatus, &reg->outbound_intstatus);
665	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
666		outbound_doorbell = readl(&reg->outbound_doorbell);
667		writel(outbound_doorbell, &reg->outbound_doorbell);
668		if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
669			struct QBUFFER __iomem * prbuffer =
670				(struct QBUFFER __iomem *) &reg->message_rbuffer;
671			uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data;
672			int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
673
674			rqbuf_lastindex = acb->rqbuf_lastindex;
675			rqbuf_firstindex = acb->rqbuf_firstindex;
676			iop_len = readl(&prbuffer->data_len);
677			my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1)
678					&(ARCMSR_MAX_QBUFFER - 1);
679			if (my_empty_len >= iop_len) {
680				while (iop_len > 0) {
681					acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
682					acb->rqbuf_lastindex++;
683					acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
684					iop_data++;
685					iop_len--;
686				}
687				writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
688					&reg->inbound_doorbell);
689			} else
690				acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
691		}
692		if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
693			acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
694			if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
695				struct QBUFFER __iomem * pwbuffer =
696						(struct QBUFFER __iomem *) &reg->message_wbuffer;
697				uint8_t __iomem * iop_data = (uint8_t __iomem *) pwbuffer->data;
698				int32_t allxfer_len = 0;
699
700				acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
701				while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
702					&& (allxfer_len < 124)) {
703					writeb(acb->wqbuffer[acb->wqbuf_firstindex], iop_data);
704					acb->wqbuf_firstindex++;
705					acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
706					iop_data++;
707					allxfer_len++;
708				}
709				writel(allxfer_len, &pwbuffer->data_len);
710				writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK,
711					&reg->inbound_doorbell);
712			}
713			if (acb->wqbuf_firstindex == acb->wqbuf_lastindex)
714				acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
715		}
716	}
717	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
718		int id, lun;
719		/*
720		****************************************************************
721		**               areca cdb command done
722		****************************************************************
723		*/
724		while (1) {
725			if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF)
726				break;/*chip FIFO no ccb for completion already*/
727			/* check if command done with no error*/
728			ccb = (struct CommandControlBlock *)(acb->vir2phy_offset +
729				(flag_ccb << 5));
730			if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
731				if (ccb->startdone == ARCMSR_CCB_ABORTED) {
732					struct scsi_cmnd *abortcmd=ccb->pcmd;
733					if (abortcmd) {
734					abortcmd->result |= DID_ABORT >> 16;
735					arcmsr_ccb_complete(ccb, 1);
736					printk(KERN_NOTICE
737						"arcmsr%d: ccb='0x%p' isr got aborted command \n"
738						, acb->host->host_no, ccb);
739					}
740					continue;
741				}
742				printk(KERN_NOTICE
743					"arcmsr%d: isr get an illegal ccb command done acb='0x%p'"
744					"ccb='0x%p' ccbacb='0x%p' startdone = 0x%x"
745					" ccboutstandingcount=%d \n"
746					, acb->host->host_no
747					, acb
748					, ccb
749					, ccb->acb
750					, ccb->startdone
751					, atomic_read(&acb->ccboutstandingcount));
752				continue;
753			}
754			id = ccb->pcmd->device->id;
755			lun = ccb->pcmd->device->lun;
756			if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
757				if (acb->devstate[id][lun] == ARECA_RAID_GONE)
758					acb->devstate[id][lun] = ARECA_RAID_GOOD;
759				ccb->pcmd->result = DID_OK << 16;
760				arcmsr_ccb_complete(ccb, 1);
761			} else {
762				switch(ccb->arcmsr_cdb.DeviceStatus) {
763				case ARCMSR_DEV_SELECT_TIMEOUT: {
764						acb->devstate[id][lun] = ARECA_RAID_GONE;
765						ccb->pcmd->result = DID_TIME_OUT << 16;
766						arcmsr_ccb_complete(ccb, 1);
767					}
768					break;
769				case ARCMSR_DEV_ABORTED:
770				case ARCMSR_DEV_INIT_FAIL: {
771						acb->devstate[id][lun] = ARECA_RAID_GONE;
772						ccb->pcmd->result = DID_BAD_TARGET << 16;
773						arcmsr_ccb_complete(ccb, 1);
774					}
775					break;
776				case ARCMSR_DEV_CHECK_CONDITION: {
777						acb->devstate[id][lun] = ARECA_RAID_GOOD;
778						arcmsr_report_sense_info(ccb);
779						arcmsr_ccb_complete(ccb, 1);
780					}
781					break;
782				default:
783					printk(KERN_NOTICE
784						"arcmsr%d: scsi id=%d lun=%d"
785						" isr get command error done,"
786						"but got unknown DeviceStatus = 0x%x \n"
787						, acb->host->host_no
788						, id
789						, lun
790						, ccb->arcmsr_cdb.DeviceStatus);
791						acb->devstate[id][lun] = ARECA_RAID_GONE;
792						ccb->pcmd->result = DID_NO_CONNECT << 16;
793						arcmsr_ccb_complete(ccb, 1);
794					break;
795				}
796			}
797		}/*drain reply FIFO*/
798	}
799	if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
800		return IRQ_NONE;
801	return IRQ_HANDLED;
802}
803
804static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
805{
806	if (acb) {
807		/* stop adapter background rebuild */
808		if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
809			acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
810			arcmsr_stop_adapter_bgrb(acb);
811			arcmsr_flush_adapter_cache(acb);
812		}
813	}
814}
815
816static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd)
817{
818	struct MessageUnit __iomem *reg = acb->pmu;
819	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
820	int retvalue = 0, transfer_len = 0;
821	char *buffer;
822	struct scatterlist *sg;
823	uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
824						(uint32_t ) cmd->cmnd[6] << 16 |
825						(uint32_t ) cmd->cmnd[7] << 8  |
826						(uint32_t ) cmd->cmnd[8];
827					/* 4 bytes: Areca io control code */
828
829	sg = scsi_sglist(cmd);
830	buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
831	if (scsi_sg_count(cmd) > 1) {
832		retvalue = ARCMSR_MESSAGE_FAIL;
833		goto message_out;
834	}
835	transfer_len += sg->length;
836
837	if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
838		retvalue = ARCMSR_MESSAGE_FAIL;
839		goto message_out;
840	}
841	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
842	switch(controlcode) {
843	case ARCMSR_MESSAGE_READ_RQBUFFER: {
844			unsigned long *ver_addr;
845			dma_addr_t buf_handle;
846			uint8_t *pQbuffer, *ptmpQbuffer;
847			int32_t allxfer_len = 0;
848
849			ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
850			if (!ver_addr) {
851				retvalue = ARCMSR_MESSAGE_FAIL;
852				goto message_out;
853			}
854			ptmpQbuffer = (uint8_t *) ver_addr;
855			while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
856				&& (allxfer_len < 1031)) {
857				pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
858				memcpy(ptmpQbuffer, pQbuffer, 1);
859				acb->rqbuf_firstindex++;
860				acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
861				ptmpQbuffer++;
862				allxfer_len++;
863			}
864			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
865				struct QBUFFER __iomem * prbuffer = (struct QBUFFER __iomem *)
866							&reg->message_rbuffer;
867				uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data;
868				int32_t iop_len;
869
870				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
871				iop_len = readl(&prbuffer->data_len);
872				while (iop_len > 0) {
873					acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
874					acb->rqbuf_lastindex++;
875					acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
876					iop_data++;
877					iop_len--;
878				}
879				writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
880						&reg->inbound_doorbell);
881			}
882			memcpy(pcmdmessagefld->messagedatabuffer,
883				(uint8_t *)ver_addr, allxfer_len);
884			pcmdmessagefld->cmdmessage.Length = allxfer_len;
885			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
886			pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
887		}
888		break;
889	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
890			unsigned long *ver_addr;
891			dma_addr_t buf_handle;
892			int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
893			uint8_t *pQbuffer, *ptmpuserbuffer;
894
895			ver_addr = pci_alloc_consistent(acb->pdev, 1032, &buf_handle);
896			if (!ver_addr) {
897				retvalue = ARCMSR_MESSAGE_FAIL;
898				goto message_out;
899			}
900			ptmpuserbuffer = (uint8_t *)ver_addr;
901			user_len = pcmdmessagefld->cmdmessage.Length;
902			memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
903			wqbuf_lastindex = acb->wqbuf_lastindex;
904			wqbuf_firstindex = acb->wqbuf_firstindex;
905			if (wqbuf_lastindex != wqbuf_firstindex) {
906				struct SENSE_DATA *sensebuffer =
907					(struct SENSE_DATA *)cmd->sense_buffer;
908				arcmsr_post_Qbuffer(acb);
909				/* has error report sensedata */
910				sensebuffer->ErrorCode = 0x70;
911				sensebuffer->SenseKey = ILLEGAL_REQUEST;
912				sensebuffer->AdditionalSenseLength = 0x0A;
913				sensebuffer->AdditionalSenseCode = 0x20;
914				sensebuffer->Valid = 1;
915				retvalue = ARCMSR_MESSAGE_FAIL;
916			} else {
917				my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
918						&(ARCMSR_MAX_QBUFFER - 1);
919				if (my_empty_len >= user_len) {
920					while (user_len > 0) {
921						pQbuffer =
922						&acb->wqbuffer[acb->wqbuf_lastindex];
923						memcpy(pQbuffer, ptmpuserbuffer, 1);
924						acb->wqbuf_lastindex++;
925						acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
926						ptmpuserbuffer++;
927						user_len--;
928					}
929					if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
930						acb->acb_flags &=
931							~ACB_F_MESSAGE_WQBUFFER_CLEARED;
932						arcmsr_post_Qbuffer(acb);
933					}
934				} else {
935					/* has error report sensedata */
936					struct SENSE_DATA *sensebuffer =
937						(struct SENSE_DATA *)cmd->sense_buffer;
938					sensebuffer->ErrorCode = 0x70;
939					sensebuffer->SenseKey = ILLEGAL_REQUEST;
940					sensebuffer->AdditionalSenseLength = 0x0A;
941					sensebuffer->AdditionalSenseCode = 0x20;
942					sensebuffer->Valid = 1;
943					retvalue = ARCMSR_MESSAGE_FAIL;
944				}
945			}
946			pci_free_consistent(acb->pdev, 1032, ver_addr, buf_handle);
947		}
948		break;
949	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
950			uint8_t *pQbuffer = acb->rqbuffer;
951
952			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
953				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
954				writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
955					&reg->inbound_doorbell);
956			}
957			acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
958			acb->rqbuf_firstindex = 0;
959			acb->rqbuf_lastindex = 0;
960			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
961			pcmdmessagefld->cmdmessage.ReturnCode =
962				ARCMSR_MESSAGE_RETURNCODE_OK;
963		}
964		break;
965	case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
966			uint8_t *pQbuffer = acb->wqbuffer;
967
968			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
969				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
970				writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
971						, &reg->inbound_doorbell);
972			}
973			acb->acb_flags |=
974				(ACB_F_MESSAGE_WQBUFFER_CLEARED |
975					ACB_F_MESSAGE_WQBUFFER_READED);
976			acb->wqbuf_firstindex = 0;
977			acb->wqbuf_lastindex = 0;
978			memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
979			pcmdmessagefld->cmdmessage.ReturnCode =
980				ARCMSR_MESSAGE_RETURNCODE_OK;
981		}
982		break;
983	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
984			uint8_t *pQbuffer;
985
986			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
987				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
988				writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
989						, &reg->inbound_doorbell);
990			}
991			acb->acb_flags |=
992				(ACB_F_MESSAGE_WQBUFFER_CLEARED
993				| ACB_F_MESSAGE_RQBUFFER_CLEARED
994				| ACB_F_MESSAGE_WQBUFFER_READED);
995			acb->rqbuf_firstindex = 0;
996			acb->rqbuf_lastindex = 0;
997			acb->wqbuf_firstindex = 0;
998			acb->wqbuf_lastindex = 0;
999			pQbuffer = acb->rqbuffer;
1000			memset(pQbuffer, 0, sizeof (struct QBUFFER));
1001			pQbuffer = acb->wqbuffer;
1002			memset(pQbuffer, 0, sizeof (struct QBUFFER));
1003			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1004		}
1005		break;
1006	case ARCMSR_MESSAGE_RETURN_CODE_3F: {
1007			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
1008		}
1009		break;
1010	case ARCMSR_MESSAGE_SAY_HELLO: {
1011			int8_t * hello_string = "Hello! I am ARCMSR";
1012
1013			memcpy(pcmdmessagefld->messagedatabuffer, hello_string
1014				, (int16_t)strlen(hello_string));
1015			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
1016		}
1017		break;
1018	case ARCMSR_MESSAGE_SAY_GOODBYE:
1019		arcmsr_iop_parking(acb);
1020		break;
1021	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
1022		arcmsr_flush_adapter_cache(acb);
1023		break;
1024	default:
1025		retvalue = ARCMSR_MESSAGE_FAIL;
1026	}
1027 message_out:
1028	sg = scsi_sglist(cmd);
1029	kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1030
1031	return retvalue;
1032}
1033
1034static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb)
1035{
1036	struct list_head *head = &acb->ccb_free_list;
1037	struct CommandControlBlock *ccb = NULL;
1038
1039	if (!list_empty(head)) {
1040		ccb = list_entry(head->next, struct CommandControlBlock, list);
1041		list_del(head->next);
1042	}
1043	return ccb;
1044}
1045
1046static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1047		struct scsi_cmnd *cmd)
1048{
1049	switch (cmd->cmnd[0]) {
1050	case INQUIRY: {
1051		unsigned char inqdata[36];
1052		char *buffer;
1053		struct scatterlist *sg;
1054
1055		if (cmd->device->lun) {
1056			cmd->result = (DID_TIME_OUT << 16);
1057			cmd->scsi_done(cmd);
1058			return;
1059		}
1060		inqdata[0] = TYPE_PROCESSOR;
1061		/* Periph Qualifier & Periph Dev Type */
1062		inqdata[1] = 0;
1063		/* rem media bit & Dev Type Modifier */
1064		inqdata[2] = 0;
1065		/* ISO,ECMA,& ANSI versions */
1066		inqdata[4] = 31;
1067		/* length of additional data */
1068		strncpy(&inqdata[8], "Areca   ", 8);
1069		/* Vendor Identification */
1070		strncpy(&inqdata[16], "RAID controller ", 16);
1071		/* Product Identification */
1072		strncpy(&inqdata[32], "R001", 4); /* Product Revision */
1073
1074		sg = scsi_sglist(cmd);
1075		buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1076
1077		memcpy(buffer, inqdata, sizeof(inqdata));
1078		sg = scsi_sglist(cmd);
1079		kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1080
1081		cmd->scsi_done(cmd);
1082	}
1083	break;
1084	case WRITE_BUFFER:
1085	case READ_BUFFER: {
1086		if (arcmsr_iop_message_xfer(acb, cmd))
1087			cmd->result = (DID_ERROR << 16);
1088		cmd->scsi_done(cmd);
1089	}
1090	break;
1091	default:
1092		cmd->scsi_done(cmd);
1093	}
1094}
1095
1096static int arcmsr_queue_command(struct scsi_cmnd *cmd,
1097	void (* done)(struct scsi_cmnd *))
1098{
1099	struct Scsi_Host *host = cmd->device->host;
1100	struct AdapterControlBlock *acb =
1101		(struct AdapterControlBlock *) host->hostdata;
1102	struct CommandControlBlock *ccb;
1103	int target = cmd->device->id;
1104	int lun = cmd->device->lun;
1105
1106	cmd->scsi_done = done;
1107	cmd->host_scribble = NULL;
1108	cmd->result = 0;
1109	if (acb->acb_flags & ACB_F_BUS_RESET) {
1110		printk(KERN_NOTICE "arcmsr%d: bus reset"
1111			" and return busy \n"
1112			, acb->host->host_no);
1113		return SCSI_MLQUEUE_HOST_BUSY;
1114	}
1115	if(target == 16) {
1116		/* virtual device for iop message transfer */
1117		arcmsr_handle_virtual_command(acb, cmd);
1118		return 0;
1119	}
1120	if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1121		uint8_t block_cmd;
1122
1123		block_cmd = cmd->cmnd[0] & 0x0f;
1124		if (block_cmd == 0x08 || block_cmd == 0x0a) {
1125			printk(KERN_NOTICE
1126				"arcmsr%d: block 'read/write'"
1127				"command with gone raid volume"
1128				" Cmd=%2x, TargetId=%d, Lun=%d \n"
1129				, acb->host->host_no
1130				, cmd->cmnd[0]
1131				, target, lun);
1132			cmd->result = (DID_NO_CONNECT << 16);
1133			cmd->scsi_done(cmd);
1134			return 0;
1135		}
1136	}
1137	if (atomic_read(&acb->ccboutstandingcount) >=
1138			ARCMSR_MAX_OUTSTANDING_CMD)
1139		return SCSI_MLQUEUE_HOST_BUSY;
1140
1141	ccb = arcmsr_get_freeccb(acb);
1142	if (!ccb)
1143		return SCSI_MLQUEUE_HOST_BUSY;
1144	arcmsr_build_ccb(acb, ccb, cmd);
1145	arcmsr_post_ccb(acb, ccb);
1146	return 0;
1147}
1148
1149static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
1150{
1151	struct MessageUnit __iomem *reg = acb->pmu;
1152	char *acb_firm_model = acb->firm_model;
1153	char *acb_firm_version = acb->firm_version;
1154	char __iomem *iop_firm_model = (char __iomem *) &reg->message_rwbuffer[15];
1155	char __iomem *iop_firm_version = (char __iomem *) &reg->message_rwbuffer[17];
1156	int count;
1157
1158	writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
1159	if (arcmsr_wait_msgint_ready(acb))
1160		printk(KERN_NOTICE
1161			"arcmsr%d: wait "
1162			"'get adapter firmware miscellaneous data' timeout \n"
1163			, acb->host->host_no);
1164	count = 8;
1165	while (count) {
1166		*acb_firm_model = readb(iop_firm_model);
1167		acb_firm_model++;
1168		iop_firm_model++;
1169		count--;
1170	}
1171	count = 16;
1172	while (count) {
1173		*acb_firm_version = readb(iop_firm_version);
1174		acb_firm_version++;
1175		iop_firm_version++;
1176		count--;
1177	}
1178	printk(KERN_INFO
1179		"ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
1180		, acb->host->host_no
1181		, acb->firm_version);
1182	acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
1183	acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
1184	acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
1185	acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
1186}
1187
1188static void arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
1189	struct CommandControlBlock *poll_ccb)
1190{
1191	struct MessageUnit __iomem *reg = acb->pmu;
1192	struct CommandControlBlock *ccb;
1193	uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0;
1194	int id, lun;
1195
1196 polling_ccb_retry:
1197	poll_count++;
1198	outbound_intstatus = readl(&reg->outbound_intstatus)
1199					& acb->outbound_int_enable;
1200	writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
1201	while (1) {
1202		if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) {
1203			if (poll_ccb_done)
1204				break;
1205			else {
1206				msleep(25);
1207				if (poll_count > 100)
1208					break;
1209				goto polling_ccb_retry;
1210			}
1211		}
1212		ccb = (struct CommandControlBlock *)
1213			(acb->vir2phy_offset + (flag_ccb << 5));
1214		if ((ccb->acb != acb) ||
1215			(ccb->startdone != ARCMSR_CCB_START)) {
1216			if ((ccb->startdone == ARCMSR_CCB_ABORTED) ||
1217				(ccb == poll_ccb)) {
1218				printk(KERN_NOTICE
1219					"arcmsr%d: scsi id=%d lun=%d ccb='0x%p'"
1220					" poll command abort successfully \n"
1221					, acb->host->host_no
1222					, ccb->pcmd->device->id
1223					, ccb->pcmd->device->lun
1224					, ccb);
1225				ccb->pcmd->result = DID_ABORT << 16;
1226				arcmsr_ccb_complete(ccb, 1);
1227				poll_ccb_done = 1;
1228				continue;
1229			}
1230			printk(KERN_NOTICE
1231				"arcmsr%d: polling get an illegal ccb"
1232				" command done ccb='0x%p'"
1233				"ccboutstandingcount=%d \n"
1234				, acb->host->host_no
1235				, ccb
1236				, atomic_read(&acb->ccboutstandingcount));
1237			continue;
1238		}
1239		id = ccb->pcmd->device->id;
1240		lun = ccb->pcmd->device->lun;
1241		if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
1242			if (acb->devstate[id][lun] == ARECA_RAID_GONE)
1243				acb->devstate[id][lun] = ARECA_RAID_GOOD;
1244			ccb->pcmd->result = DID_OK << 16;
1245			arcmsr_ccb_complete(ccb, 1);
1246		} else {
1247			switch(ccb->arcmsr_cdb.DeviceStatus) {
1248			case ARCMSR_DEV_SELECT_TIMEOUT: {
1249					acb->devstate[id][lun] = ARECA_RAID_GONE;
1250					ccb->pcmd->result = DID_TIME_OUT << 16;
1251					arcmsr_ccb_complete(ccb, 1);
1252				}
1253				break;
1254			case ARCMSR_DEV_ABORTED:
1255			case ARCMSR_DEV_INIT_FAIL: {
1256					acb->devstate[id][lun] = ARECA_RAID_GONE;
1257					ccb->pcmd->result = DID_BAD_TARGET << 16;
1258					arcmsr_ccb_complete(ccb, 1);
1259				}
1260				break;
1261			case ARCMSR_DEV_CHECK_CONDITION: {
1262					acb->devstate[id][lun] = ARECA_RAID_GOOD;
1263					arcmsr_report_sense_info(ccb);
1264					arcmsr_ccb_complete(ccb, 1);
1265				}
1266				break;
1267			default:
1268				printk(KERN_NOTICE
1269					"arcmsr%d: scsi id=%d lun=%d"
1270					" polling and getting command error done"
1271					"but got unknown DeviceStatus = 0x%x \n"
1272					, acb->host->host_no
1273					, id
1274					, lun
1275					, ccb->arcmsr_cdb.DeviceStatus);
1276				acb->devstate[id][lun] = ARECA_RAID_GONE;
1277				ccb->pcmd->result = DID_BAD_TARGET << 16;
1278				arcmsr_ccb_complete(ccb, 1);
1279				break;
1280			}
1281		}
1282	}
1283}
1284
1285static void arcmsr_iop_init(struct AdapterControlBlock *acb)
1286{
1287	struct MessageUnit __iomem *reg = acb->pmu;
1288	uint32_t intmask_org, mask, outbound_doorbell, firmware_state = 0;
1289
1290	do {
1291		firmware_state = readl(&reg->outbound_msgaddr1);
1292	} while (!(firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK));
1293	intmask_org = readl(&reg->outbound_intmask)
1294			| ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
1295	arcmsr_get_firmware_spec(acb);
1296
1297	acb->acb_flags |= ACB_F_MSG_START_BGRB;
1298	writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
1299	if (arcmsr_wait_msgint_ready(acb)) {
1300		printk(KERN_NOTICE "arcmsr%d: "
1301			"wait 'start adapter background rebulid' timeout\n",
1302			acb->host->host_no);
1303	}
1304
1305	outbound_doorbell = readl(&reg->outbound_doorbell);
1306	writel(outbound_doorbell, &reg->outbound_doorbell);
1307	writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
1308	mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE
1309			| ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
1310	writel(intmask_org & mask, &reg->outbound_intmask);
1311	acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
1312	acb->acb_flags |= ACB_F_IOP_INITED;
1313}
1314
1315static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
1316{
1317	struct MessageUnit __iomem *reg = acb->pmu;
1318	struct CommandControlBlock *ccb;
1319	uint32_t intmask_org;
1320	int i = 0;
1321
1322	if (atomic_read(&acb->ccboutstandingcount) != 0) {
1323		/* talk to iop 331 outstanding command aborted */
1324		arcmsr_abort_allcmd(acb);
1325		/* wait for 3 sec for all command aborted*/
1326		msleep_interruptible(3000);
1327		/* disable all outbound interrupt */
1328		intmask_org = arcmsr_disable_outbound_ints(acb);
1329		/* clear all outbound posted Q */
1330		for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++)
1331			readl(&reg->outbound_queueport);
1332		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
1333			ccb = acb->pccb_pool[i];
1334			if ((ccb->startdone == ARCMSR_CCB_START) ||
1335				(ccb->startdone == ARCMSR_CCB_ABORTED)) {
1336				ccb->startdone = ARCMSR_CCB_ABORTED;
1337				ccb->pcmd->result = DID_ABORT << 16;
1338				arcmsr_ccb_complete(ccb, 1);
1339			}
1340		}
1341		/* enable all outbound interrupt */
1342		arcmsr_enable_outbound_ints(acb, intmask_org);
1343	}
1344	atomic_set(&acb->ccboutstandingcount, 0);
1345}
1346
1347static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
1348{
1349	struct AdapterControlBlock *acb =
1350		(struct AdapterControlBlock *)cmd->device->host->hostdata;
1351	int i;
1352
1353	acb->num_resets++;
1354	acb->acb_flags |= ACB_F_BUS_RESET;
1355	for (i = 0; i < 400; i++) {
1356		if (!atomic_read(&acb->ccboutstandingcount))
1357			break;
1358		arcmsr_interrupt(acb);
1359		msleep(25);
1360	}
1361	arcmsr_iop_reset(acb);
1362	acb->acb_flags &= ~ACB_F_BUS_RESET;
1363	return SUCCESS;
1364}
1365
1366static void arcmsr_abort_one_cmd(struct AdapterControlBlock *acb,
1367		struct CommandControlBlock *ccb)
1368{
1369	u32 intmask;
1370
1371	ccb->startdone = ARCMSR_CCB_ABORTED;
1372
1373	/*
1374	** Wait for 3 sec for all command done.
1375	*/
1376	msleep_interruptible(3000);
1377
1378	intmask = arcmsr_disable_outbound_ints(acb);
1379	arcmsr_polling_ccbdone(acb, ccb);
1380	arcmsr_enable_outbound_ints(acb, intmask);
1381}
1382
1383static int arcmsr_abort(struct scsi_cmnd *cmd)
1384{
1385	struct AdapterControlBlock *acb =
1386		(struct AdapterControlBlock *)cmd->device->host->hostdata;
1387	int i = 0;
1388
1389	printk(KERN_NOTICE
1390		"arcmsr%d: abort device command of scsi id=%d lun=%d \n",
1391		acb->host->host_no, cmd->device->id, cmd->device->lun);
1392	acb->num_aborts++;
1393
1394	/*
1395	************************************************
1396	** the all interrupt service routine is locked
1397	** we need to handle it as soon as possible and exit
1398	************************************************
1399	*/
1400	if (!atomic_read(&acb->ccboutstandingcount))
1401		return SUCCESS;
1402
1403	for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
1404		struct CommandControlBlock *ccb = acb->pccb_pool[i];
1405		if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
1406			arcmsr_abort_one_cmd(acb, ccb);
1407			break;
1408		}
1409	}
1410
1411	return SUCCESS;
1412}
1413
1414static const char *arcmsr_info(struct Scsi_Host *host)
1415{
1416	struct AdapterControlBlock *acb =
1417		(struct AdapterControlBlock *) host->hostdata;
1418	static char buf[256];
1419	char *type;
1420	int raid6 = 1;
1421
1422	switch (acb->pdev->device) {
1423	case PCI_DEVICE_ID_ARECA_1110:
1424	case PCI_DEVICE_ID_ARECA_1210:
1425		raid6 = 0;
1426		/*FALLTHRU*/
1427	case PCI_DEVICE_ID_ARECA_1120:
1428	case PCI_DEVICE_ID_ARECA_1130:
1429	case PCI_DEVICE_ID_ARECA_1160:
1430	case PCI_DEVICE_ID_ARECA_1170:
1431	case PCI_DEVICE_ID_ARECA_1220:
1432	case PCI_DEVICE_ID_ARECA_1230:
1433	case PCI_DEVICE_ID_ARECA_1260:
1434	case PCI_DEVICE_ID_ARECA_1270:
1435	case PCI_DEVICE_ID_ARECA_1280:
1436		type = "SATA";
1437		break;
1438	case PCI_DEVICE_ID_ARECA_1380:
1439	case PCI_DEVICE_ID_ARECA_1381:
1440	case PCI_DEVICE_ID_ARECA_1680:
1441	case PCI_DEVICE_ID_ARECA_1681:
1442		type = "SAS";
1443		break;
1444	default:
1445		type = "X-TYPE";
1446		break;
1447	}
1448	sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n        %s",
1449			type, raid6 ? "( RAID6 capable)" : "",
1450			ARCMSR_DRIVER_VERSION);
1451	return buf;
1452}
1453
1454
1455