ipr.c revision 3d1d0da67520aa5dbcea617d52546ae046e946a4
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 *              Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 *	- Ultra 320 SCSI controller
38 *	- PCI-X host interface
39 *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 *	- Non-Volatile Write Cache
41 *	- Supports attachment of non-RAID disks, tape, and optical devices
42 *	- RAID Levels 0, 5, 10
43 *	- Hot spare
44 *	- Background Parity Checking
45 *	- Background Data Scrubbing
46 *	- Ability to increase the capacity of an existing RAID 5 disk array
47 *		by adding disks
48 *
49 * Driver Features:
50 *	- Tagged command queuing
51 *	- Adapter microcode download
52 *	- PCI hot plug
53 *	- SCSI device hot plug
54 *
55 */
56
57#include <linux/config.h>
58#include <linux/fs.h>
59#include <linux/init.h>
60#include <linux/types.h>
61#include <linux/errno.h>
62#include <linux/kernel.h>
63#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
74#include <asm/io.h>
75#include <asm/irq.h>
76#include <asm/processor.h>
77#include <scsi/scsi.h>
78#include <scsi/scsi_host.h>
79#include <scsi/scsi_tcq.h>
80#include <scsi/scsi_eh.h>
81#include <scsi/scsi_cmnd.h>
82#include <scsi/scsi_request.h>
83#include "ipr.h"
84
85/*
86 *   Global Data
87 */
88static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
89static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
90static unsigned int ipr_max_speed = 1;
91static int ipr_testmode = 0;
92static unsigned int ipr_fastfail = 0;
93static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
94static unsigned int ipr_enable_cache = 1;
95static unsigned int ipr_debug = 0;
96static DEFINE_SPINLOCK(ipr_driver_lock);
97
98/* This table describes the differences between DMA controller chips */
99static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
100	{ /* Gemstone and Citrine */
101		.mailbox = 0x0042C,
102		.cache_line_size = 0x20,
103		{
104			.set_interrupt_mask_reg = 0x0022C,
105			.clr_interrupt_mask_reg = 0x00230,
106			.sense_interrupt_mask_reg = 0x0022C,
107			.clr_interrupt_reg = 0x00228,
108			.sense_interrupt_reg = 0x00224,
109			.ioarrin_reg = 0x00404,
110			.sense_uproc_interrupt_reg = 0x00214,
111			.set_uproc_interrupt_reg = 0x00214,
112			.clr_uproc_interrupt_reg = 0x00218
113		}
114	},
115	{ /* Snipe and Scamp */
116		.mailbox = 0x0052C,
117		.cache_line_size = 0x20,
118		{
119			.set_interrupt_mask_reg = 0x00288,
120			.clr_interrupt_mask_reg = 0x0028C,
121			.sense_interrupt_mask_reg = 0x00288,
122			.clr_interrupt_reg = 0x00284,
123			.sense_interrupt_reg = 0x00280,
124			.ioarrin_reg = 0x00504,
125			.sense_uproc_interrupt_reg = 0x00290,
126			.set_uproc_interrupt_reg = 0x00290,
127			.clr_uproc_interrupt_reg = 0x00294
128		}
129	},
130};
131
132static const struct ipr_chip_t ipr_chip[] = {
133	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
134	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
135	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
136	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
137};
138
139static int ipr_max_bus_speeds [] = {
140	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
141};
142
143MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
144MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
145module_param_named(max_speed, ipr_max_speed, uint, 0);
146MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
147module_param_named(log_level, ipr_log_level, uint, 0);
148MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
149module_param_named(testmode, ipr_testmode, int, 0);
150MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
151module_param_named(fastfail, ipr_fastfail, int, 0);
152MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
153module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
154MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
155module_param_named(enable_cache, ipr_enable_cache, int, 0);
156MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
157module_param_named(debug, ipr_debug, int, 0);
158MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
159MODULE_LICENSE("GPL");
160MODULE_VERSION(IPR_DRIVER_VERSION);
161
162static const char *ipr_gpdd_dev_end_states[] = {
163	"Command complete",
164	"Terminated by host",
165	"Terminated by device reset",
166	"Terminated by bus reset",
167	"Unknown",
168	"Command not started"
169};
170
171static const char *ipr_gpdd_dev_bus_phases[] = {
172	"Bus free",
173	"Arbitration",
174	"Selection",
175	"Message out",
176	"Command",
177	"Message in",
178	"Data out",
179	"Data in",
180	"Status",
181	"Reselection",
182	"Unknown"
183};
184
185/*  A constant array of IOASCs/URCs/Error Messages */
186static const
187struct ipr_error_table_t ipr_error_table[] = {
188	{0x00000000, 1, 1,
189	"8155: An unknown error was received"},
190	{0x00330000, 0, 0,
191	"Soft underlength error"},
192	{0x005A0000, 0, 0,
193	"Command to be cancelled not found"},
194	{0x00808000, 0, 0,
195	"Qualified success"},
196	{0x01080000, 1, 1,
197	"FFFE: Soft device bus error recovered by the IOA"},
198	{0x01170600, 0, 1,
199	"FFF9: Device sector reassign successful"},
200	{0x01170900, 0, 1,
201	"FFF7: Media error recovered by device rewrite procedures"},
202	{0x01180200, 0, 1,
203	"7001: IOA sector reassignment successful"},
204	{0x01180500, 0, 1,
205	"FFF9: Soft media error. Sector reassignment recommended"},
206	{0x01180600, 0, 1,
207	"FFF7: Media error recovered by IOA rewrite procedures"},
208	{0x01418000, 0, 1,
209	"FF3D: Soft PCI bus error recovered by the IOA"},
210	{0x01440000, 1, 1,
211	"FFF6: Device hardware error recovered by the IOA"},
212	{0x01448100, 0, 1,
213	"FFF6: Device hardware error recovered by the device"},
214	{0x01448200, 1, 1,
215	"FF3D: Soft IOA error recovered by the IOA"},
216	{0x01448300, 0, 1,
217	"FFFA: Undefined device response recovered by the IOA"},
218	{0x014A0000, 1, 1,
219	"FFF6: Device bus error, message or command phase"},
220	{0x015D0000, 0, 1,
221	"FFF6: Failure prediction threshold exceeded"},
222	{0x015D9200, 0, 1,
223	"8009: Impending cache battery pack failure"},
224	{0x02040400, 0, 0,
225	"34FF: Disk device format in progress"},
226	{0x023F0000, 0, 0,
227	"Synchronization required"},
228	{0x024E0000, 0, 0,
229	"No ready, IOA shutdown"},
230	{0x025A0000, 0, 0,
231	"Not ready, IOA has been shutdown"},
232	{0x02670100, 0, 1,
233	"3020: Storage subsystem configuration error"},
234	{0x03110B00, 0, 0,
235	"FFF5: Medium error, data unreadable, recommend reassign"},
236	{0x03110C00, 0, 0,
237	"7000: Medium error, data unreadable, do not reassign"},
238	{0x03310000, 0, 1,
239	"FFF3: Disk media format bad"},
240	{0x04050000, 0, 1,
241	"3002: Addressed device failed to respond to selection"},
242	{0x04080000, 1, 1,
243	"3100: Device bus error"},
244	{0x04080100, 0, 1,
245	"3109: IOA timed out a device command"},
246	{0x04088000, 0, 0,
247	"3120: SCSI bus is not operational"},
248	{0x04118000, 0, 1,
249	"9000: IOA reserved area data check"},
250	{0x04118100, 0, 1,
251	"9001: IOA reserved area invalid data pattern"},
252	{0x04118200, 0, 1,
253	"9002: IOA reserved area LRC error"},
254	{0x04320000, 0, 1,
255	"102E: Out of alternate sectors for disk storage"},
256	{0x04330000, 1, 1,
257	"FFF4: Data transfer underlength error"},
258	{0x04338000, 1, 1,
259	"FFF4: Data transfer overlength error"},
260	{0x043E0100, 0, 1,
261	"3400: Logical unit failure"},
262	{0x04408500, 0, 1,
263	"FFF4: Device microcode is corrupt"},
264	{0x04418000, 1, 1,
265	"8150: PCI bus error"},
266	{0x04430000, 1, 0,
267	"Unsupported device bus message received"},
268	{0x04440000, 1, 1,
269	"FFF4: Disk device problem"},
270	{0x04448200, 1, 1,
271	"8150: Permanent IOA failure"},
272	{0x04448300, 0, 1,
273	"3010: Disk device returned wrong response to IOA"},
274	{0x04448400, 0, 1,
275	"8151: IOA microcode error"},
276	{0x04448500, 0, 0,
277	"Device bus status error"},
278	{0x04448600, 0, 1,
279	"8157: IOA error requiring IOA reset to recover"},
280	{0x04490000, 0, 0,
281	"Message reject received from the device"},
282	{0x04449200, 0, 1,
283	"8008: A permanent cache battery pack failure occurred"},
284	{0x0444A000, 0, 1,
285	"9090: Disk unit has been modified after the last known status"},
286	{0x0444A200, 0, 1,
287	"9081: IOA detected device error"},
288	{0x0444A300, 0, 1,
289	"9082: IOA detected device error"},
290	{0x044A0000, 1, 1,
291	"3110: Device bus error, message or command phase"},
292	{0x04670400, 0, 1,
293	"9091: Incorrect hardware configuration change has been detected"},
294	{0x04678000, 0, 1,
295	"9073: Invalid multi-adapter configuration"},
296	{0x046E0000, 0, 1,
297	"FFF4: Command to logical unit failed"},
298	{0x05240000, 1, 0,
299	"Illegal request, invalid request type or request packet"},
300	{0x05250000, 0, 0,
301	"Illegal request, invalid resource handle"},
302	{0x05258000, 0, 0,
303	"Illegal request, commands not allowed to this device"},
304	{0x05258100, 0, 0,
305	"Illegal request, command not allowed to a secondary adapter"},
306	{0x05260000, 0, 0,
307	"Illegal request, invalid field in parameter list"},
308	{0x05260100, 0, 0,
309	"Illegal request, parameter not supported"},
310	{0x05260200, 0, 0,
311	"Illegal request, parameter value invalid"},
312	{0x052C0000, 0, 0,
313	"Illegal request, command sequence error"},
314	{0x052C8000, 1, 0,
315	"Illegal request, dual adapter support not enabled"},
316	{0x06040500, 0, 1,
317	"9031: Array protection temporarily suspended, protection resuming"},
318	{0x06040600, 0, 1,
319	"9040: Array protection temporarily suspended, protection resuming"},
320	{0x06290000, 0, 1,
321	"FFFB: SCSI bus was reset"},
322	{0x06290500, 0, 0,
323	"FFFE: SCSI bus transition to single ended"},
324	{0x06290600, 0, 0,
325	"FFFE: SCSI bus transition to LVD"},
326	{0x06298000, 0, 1,
327	"FFFB: SCSI bus was reset by another initiator"},
328	{0x063F0300, 0, 1,
329	"3029: A device replacement has occurred"},
330	{0x064C8000, 0, 1,
331	"9051: IOA cache data exists for a missing or failed device"},
332	{0x064C8100, 0, 1,
333	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
334	{0x06670100, 0, 1,
335	"9025: Disk unit is not supported at its physical location"},
336	{0x06670600, 0, 1,
337	"3020: IOA detected a SCSI bus configuration error"},
338	{0x06678000, 0, 1,
339	"3150: SCSI bus configuration error"},
340	{0x06678100, 0, 1,
341	"9074: Asymmetric advanced function disk configuration"},
342	{0x06690200, 0, 1,
343	"9041: Array protection temporarily suspended"},
344	{0x06698200, 0, 1,
345	"9042: Corrupt array parity detected on specified device"},
346	{0x066B0200, 0, 1,
347	"9030: Array no longer protected due to missing or failed disk unit"},
348	{0x066B8000, 0, 1,
349	"9071: Link operational transition"},
350	{0x066B8100, 0, 1,
351	"9072: Link not operational transition"},
352	{0x066B8200, 0, 1,
353	"9032: Array exposed but still protected"},
354	{0x07270000, 0, 0,
355	"Failure due to other device"},
356	{0x07278000, 0, 1,
357	"9008: IOA does not support functions expected by devices"},
358	{0x07278100, 0, 1,
359	"9010: Cache data associated with attached devices cannot be found"},
360	{0x07278200, 0, 1,
361	"9011: Cache data belongs to devices other than those attached"},
362	{0x07278400, 0, 1,
363	"9020: Array missing 2 or more devices with only 1 device present"},
364	{0x07278500, 0, 1,
365	"9021: Array missing 2 or more devices with 2 or more devices present"},
366	{0x07278600, 0, 1,
367	"9022: Exposed array is missing a required device"},
368	{0x07278700, 0, 1,
369	"9023: Array member(s) not at required physical locations"},
370	{0x07278800, 0, 1,
371	"9024: Array not functional due to present hardware configuration"},
372	{0x07278900, 0, 1,
373	"9026: Array not functional due to present hardware configuration"},
374	{0x07278A00, 0, 1,
375	"9027: Array is missing a device and parity is out of sync"},
376	{0x07278B00, 0, 1,
377	"9028: Maximum number of arrays already exist"},
378	{0x07278C00, 0, 1,
379	"9050: Required cache data cannot be located for a disk unit"},
380	{0x07278D00, 0, 1,
381	"9052: Cache data exists for a device that has been modified"},
382	{0x07278F00, 0, 1,
383	"9054: IOA resources not available due to previous problems"},
384	{0x07279100, 0, 1,
385	"9092: Disk unit requires initialization before use"},
386	{0x07279200, 0, 1,
387	"9029: Incorrect hardware configuration change has been detected"},
388	{0x07279600, 0, 1,
389	"9060: One or more disk pairs are missing from an array"},
390	{0x07279700, 0, 1,
391	"9061: One or more disks are missing from an array"},
392	{0x07279800, 0, 1,
393	"9062: One or more disks are missing from an array"},
394	{0x07279900, 0, 1,
395	"9063: Maximum number of functional arrays has been exceeded"},
396	{0x0B260000, 0, 0,
397	"Aborted command, invalid descriptor"},
398	{0x0B5A0000, 0, 0,
399	"Command terminated by host"}
400};
401
402static const struct ipr_ses_table_entry ipr_ses_table[] = {
403	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
404	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
405	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
406	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
407	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
408	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
409	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
410	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
411	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
412	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
413	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
414	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
415	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
416};
417
418/*
419 *  Function Prototypes
420 */
421static int ipr_reset_alert(struct ipr_cmnd *);
422static void ipr_process_ccn(struct ipr_cmnd *);
423static void ipr_process_error(struct ipr_cmnd *);
424static void ipr_reset_ioa_job(struct ipr_cmnd *);
425static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
426				   enum ipr_shutdown_type);
427
428#ifdef CONFIG_SCSI_IPR_TRACE
429/**
430 * ipr_trc_hook - Add a trace entry to the driver trace
431 * @ipr_cmd:	ipr command struct
432 * @type:		trace type
433 * @add_data:	additional data
434 *
435 * Return value:
436 * 	none
437 **/
438static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
439			 u8 type, u32 add_data)
440{
441	struct ipr_trace_entry *trace_entry;
442	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
443
444	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
445	trace_entry->time = jiffies;
446	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
447	trace_entry->type = type;
448	trace_entry->cmd_index = ipr_cmd->cmd_index;
449	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
450	trace_entry->u.add_data = add_data;
451}
452#else
453#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
454#endif
455
456/**
457 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
458 * @ipr_cmd:	ipr command struct
459 *
460 * Return value:
461 * 	none
462 **/
463static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
464{
465	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
466	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
467
468	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
469	ioarcb->write_data_transfer_length = 0;
470	ioarcb->read_data_transfer_length = 0;
471	ioarcb->write_ioadl_len = 0;
472	ioarcb->read_ioadl_len = 0;
473	ioasa->ioasc = 0;
474	ioasa->residual_data_len = 0;
475
476	ipr_cmd->scsi_cmd = NULL;
477	ipr_cmd->sense_buffer[0] = 0;
478	ipr_cmd->dma_use_sg = 0;
479}
480
481/**
482 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
483 * @ipr_cmd:	ipr command struct
484 *
485 * Return value:
486 * 	none
487 **/
488static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
489{
490	ipr_reinit_ipr_cmnd(ipr_cmd);
491	ipr_cmd->u.scratch = 0;
492	ipr_cmd->sibling = NULL;
493	init_timer(&ipr_cmd->timer);
494}
495
496/**
497 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
498 * @ioa_cfg:	ioa config struct
499 *
500 * Return value:
501 * 	pointer to ipr command struct
502 **/
503static
504struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
505{
506	struct ipr_cmnd *ipr_cmd;
507
508	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
509	list_del(&ipr_cmd->queue);
510	ipr_init_ipr_cmnd(ipr_cmd);
511
512	return ipr_cmd;
513}
514
515/**
516 * ipr_unmap_sglist - Unmap scatterlist if mapped
517 * @ioa_cfg:	ioa config struct
518 * @ipr_cmd:	ipr command struct
519 *
520 * Return value:
521 * 	nothing
522 **/
523static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
524			     struct ipr_cmnd *ipr_cmd)
525{
526	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
527
528	if (ipr_cmd->dma_use_sg) {
529		if (scsi_cmd->use_sg > 0) {
530			pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
531				     scsi_cmd->use_sg,
532				     scsi_cmd->sc_data_direction);
533		} else {
534			pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
535					 scsi_cmd->request_bufflen,
536					 scsi_cmd->sc_data_direction);
537		}
538	}
539}
540
541/**
542 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
543 * @ioa_cfg:	ioa config struct
544 * @clr_ints:     interrupts to clear
545 *
546 * This function masks all interrupts on the adapter, then clears the
547 * interrupts specified in the mask
548 *
549 * Return value:
550 * 	none
551 **/
552static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
553					  u32 clr_ints)
554{
555	volatile u32 int_reg;
556
557	/* Stop new interrupts */
558	ioa_cfg->allow_interrupts = 0;
559
560	/* Set interrupt mask to stop all new interrupts */
561	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
562
563	/* Clear any pending interrupts */
564	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
565	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
566}
567
568/**
569 * ipr_save_pcix_cmd_reg - Save PCI-X command register
570 * @ioa_cfg:	ioa config struct
571 *
572 * Return value:
573 * 	0 on success / -EIO on failure
574 **/
575static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
576{
577	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
578
579	if (pcix_cmd_reg == 0) {
580		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
581		return -EIO;
582	}
583
584	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
585				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
586		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
587		return -EIO;
588	}
589
590	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
591	return 0;
592}
593
594/**
595 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
596 * @ioa_cfg:	ioa config struct
597 *
598 * Return value:
599 * 	0 on success / -EIO on failure
600 **/
601static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
602{
603	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
604
605	if (pcix_cmd_reg) {
606		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
607					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
608			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
609			return -EIO;
610		}
611	} else {
612		dev_err(&ioa_cfg->pdev->dev,
613			"Failed to setup PCI-X command register\n");
614		return -EIO;
615	}
616
617	return 0;
618}
619
620/**
621 * ipr_scsi_eh_done - mid-layer done function for aborted ops
622 * @ipr_cmd:	ipr command struct
623 *
624 * This function is invoked by the interrupt handler for
625 * ops generated by the SCSI mid-layer which are being aborted.
626 *
627 * Return value:
628 * 	none
629 **/
630static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
631{
632	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
633	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
634
635	scsi_cmd->result |= (DID_ERROR << 16);
636
637	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
638	scsi_cmd->scsi_done(scsi_cmd);
639	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
640}
641
642/**
643 * ipr_fail_all_ops - Fails all outstanding ops.
644 * @ioa_cfg:	ioa config struct
645 *
646 * This function fails all outstanding ops.
647 *
648 * Return value:
649 * 	none
650 **/
651static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
652{
653	struct ipr_cmnd *ipr_cmd, *temp;
654
655	ENTER;
656	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
657		list_del(&ipr_cmd->queue);
658
659		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
660		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
661
662		if (ipr_cmd->scsi_cmd)
663			ipr_cmd->done = ipr_scsi_eh_done;
664
665		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
666		del_timer(&ipr_cmd->timer);
667		ipr_cmd->done(ipr_cmd);
668	}
669
670	LEAVE;
671}
672
673/**
674 * ipr_do_req -  Send driver initiated requests.
675 * @ipr_cmd:		ipr command struct
676 * @done:			done function
677 * @timeout_func:	timeout function
678 * @timeout:		timeout value
679 *
680 * This function sends the specified command to the adapter with the
681 * timeout given. The done function is invoked on command completion.
682 *
683 * Return value:
684 * 	none
685 **/
686static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
687		       void (*done) (struct ipr_cmnd *),
688		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
689{
690	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
691
692	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
693
694	ipr_cmd->done = done;
695
696	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
697	ipr_cmd->timer.expires = jiffies + timeout;
698	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
699
700	add_timer(&ipr_cmd->timer);
701
702	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
703
704	mb();
705	writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
706	       ioa_cfg->regs.ioarrin_reg);
707}
708
709/**
710 * ipr_internal_cmd_done - Op done function for an internally generated op.
711 * @ipr_cmd:	ipr command struct
712 *
713 * This function is the op done function for an internally generated,
714 * blocking op. It simply wakes the sleeping thread.
715 *
716 * Return value:
717 * 	none
718 **/
719static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
720{
721	if (ipr_cmd->sibling)
722		ipr_cmd->sibling = NULL;
723	else
724		complete(&ipr_cmd->completion);
725}
726
727/**
728 * ipr_send_blocking_cmd - Send command and sleep on its completion.
729 * @ipr_cmd:	ipr command struct
730 * @timeout_func:	function to invoke if command times out
731 * @timeout:	timeout
732 *
733 * Return value:
734 * 	none
735 **/
736static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
737				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
738				  u32 timeout)
739{
740	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
741
742	init_completion(&ipr_cmd->completion);
743	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
744
745	spin_unlock_irq(ioa_cfg->host->host_lock);
746	wait_for_completion(&ipr_cmd->completion);
747	spin_lock_irq(ioa_cfg->host->host_lock);
748}
749
750/**
751 * ipr_send_hcam - Send an HCAM to the adapter.
752 * @ioa_cfg:	ioa config struct
753 * @type:		HCAM type
754 * @hostrcb:	hostrcb struct
755 *
756 * This function will send a Host Controlled Async command to the adapter.
757 * If HCAMs are currently not allowed to be issued to the adapter, it will
758 * place the hostrcb on the free queue.
759 *
760 * Return value:
761 * 	none
762 **/
763static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
764			  struct ipr_hostrcb *hostrcb)
765{
766	struct ipr_cmnd *ipr_cmd;
767	struct ipr_ioarcb *ioarcb;
768
769	if (ioa_cfg->allow_cmds) {
770		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
771		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
772		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
773
774		ipr_cmd->u.hostrcb = hostrcb;
775		ioarcb = &ipr_cmd->ioarcb;
776
777		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
778		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
779		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
780		ioarcb->cmd_pkt.cdb[1] = type;
781		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
782		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
783
784		ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
785		ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
786		ipr_cmd->ioadl[0].flags_and_data_len =
787			cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
788		ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
789
790		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
791			ipr_cmd->done = ipr_process_ccn;
792		else
793			ipr_cmd->done = ipr_process_error;
794
795		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
796
797		mb();
798		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
799		       ioa_cfg->regs.ioarrin_reg);
800	} else {
801		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
802	}
803}
804
805/**
806 * ipr_init_res_entry - Initialize a resource entry struct.
807 * @res:	resource entry struct
808 *
809 * Return value:
810 * 	none
811 **/
812static void ipr_init_res_entry(struct ipr_resource_entry *res)
813{
814	res->needs_sync_complete = 1;
815	res->in_erp = 0;
816	res->add_to_ml = 0;
817	res->del_from_ml = 0;
818	res->resetting_device = 0;
819	res->sdev = NULL;
820}
821
822/**
823 * ipr_handle_config_change - Handle a config change from the adapter
824 * @ioa_cfg:	ioa config struct
825 * @hostrcb:	hostrcb
826 *
827 * Return value:
828 * 	none
829 **/
830static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
831			      struct ipr_hostrcb *hostrcb)
832{
833	struct ipr_resource_entry *res = NULL;
834	struct ipr_config_table_entry *cfgte;
835	u32 is_ndn = 1;
836
837	cfgte = &hostrcb->hcam.u.ccn.cfgte;
838
839	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
840		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
841			    sizeof(cfgte->res_addr))) {
842			is_ndn = 0;
843			break;
844		}
845	}
846
847	if (is_ndn) {
848		if (list_empty(&ioa_cfg->free_res_q)) {
849			ipr_send_hcam(ioa_cfg,
850				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
851				      hostrcb);
852			return;
853		}
854
855		res = list_entry(ioa_cfg->free_res_q.next,
856				 struct ipr_resource_entry, queue);
857
858		list_del(&res->queue);
859		ipr_init_res_entry(res);
860		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
861	}
862
863	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
864
865	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
866		if (res->sdev) {
867			res->sdev->hostdata = NULL;
868			res->del_from_ml = 1;
869			if (ioa_cfg->allow_ml_add_del)
870				schedule_work(&ioa_cfg->work_q);
871		} else
872			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
873	} else if (!res->sdev) {
874		res->add_to_ml = 1;
875		if (ioa_cfg->allow_ml_add_del)
876			schedule_work(&ioa_cfg->work_q);
877	}
878
879	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
880}
881
882/**
883 * ipr_process_ccn - Op done function for a CCN.
884 * @ipr_cmd:	ipr command struct
885 *
886 * This function is the op done function for a configuration
887 * change notification host controlled async from the adapter.
888 *
889 * Return value:
890 * 	none
891 **/
892static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
893{
894	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
895	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
896	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
897
898	list_del(&hostrcb->queue);
899	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
900
901	if (ioasc) {
902		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
903			dev_err(&ioa_cfg->pdev->dev,
904				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
905
906		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
907	} else {
908		ipr_handle_config_change(ioa_cfg, hostrcb);
909	}
910}
911
912/**
913 * ipr_log_vpd - Log the passed VPD to the error log.
914 * @vpd:		vendor/product id/sn struct
915 *
916 * Return value:
917 * 	none
918 **/
919static void ipr_log_vpd(struct ipr_vpd *vpd)
920{
921	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
922		    + IPR_SERIAL_NUM_LEN];
923
924	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
925	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
926	       IPR_PROD_ID_LEN);
927	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
928	ipr_err("Vendor/Product ID: %s\n", buffer);
929
930	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
931	buffer[IPR_SERIAL_NUM_LEN] = '\0';
932	ipr_err("    Serial Number: %s\n", buffer);
933}
934
935/**
936 * ipr_log_cache_error - Log a cache error.
937 * @ioa_cfg:	ioa config struct
938 * @hostrcb:	hostrcb struct
939 *
940 * Return value:
941 * 	none
942 **/
943static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
944				struct ipr_hostrcb *hostrcb)
945{
946	struct ipr_hostrcb_type_02_error *error =
947		&hostrcb->hcam.u.error.u.type_02_error;
948
949	ipr_err("-----Current Configuration-----\n");
950	ipr_err("Cache Directory Card Information:\n");
951	ipr_log_vpd(&error->ioa_vpd);
952	ipr_err("Adapter Card Information:\n");
953	ipr_log_vpd(&error->cfc_vpd);
954
955	ipr_err("-----Expected Configuration-----\n");
956	ipr_err("Cache Directory Card Information:\n");
957	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
958	ipr_err("Adapter Card Information:\n");
959	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
960
961	ipr_err("Additional IOA Data: %08X %08X %08X\n",
962		     be32_to_cpu(error->ioa_data[0]),
963		     be32_to_cpu(error->ioa_data[1]),
964		     be32_to_cpu(error->ioa_data[2]));
965}
966
967/**
968 * ipr_log_config_error - Log a configuration error.
969 * @ioa_cfg:	ioa config struct
970 * @hostrcb:	hostrcb struct
971 *
972 * Return value:
973 * 	none
974 **/
975static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
976				 struct ipr_hostrcb *hostrcb)
977{
978	int errors_logged, i;
979	struct ipr_hostrcb_device_data_entry *dev_entry;
980	struct ipr_hostrcb_type_03_error *error;
981
982	error = &hostrcb->hcam.u.error.u.type_03_error;
983	errors_logged = be32_to_cpu(error->errors_logged);
984
985	ipr_err("Device Errors Detected/Logged: %d/%d\n",
986		be32_to_cpu(error->errors_detected), errors_logged);
987
988	dev_entry = error->dev;
989
990	for (i = 0; i < errors_logged; i++, dev_entry++) {
991		ipr_err_separator;
992
993		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
994		ipr_log_vpd(&dev_entry->vpd);
995
996		ipr_err("-----New Device Information-----\n");
997		ipr_log_vpd(&dev_entry->new_vpd);
998
999		ipr_err("Cache Directory Card Information:\n");
1000		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1001
1002		ipr_err("Adapter Card Information:\n");
1003		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1004
1005		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1006			be32_to_cpu(dev_entry->ioa_data[0]),
1007			be32_to_cpu(dev_entry->ioa_data[1]),
1008			be32_to_cpu(dev_entry->ioa_data[2]),
1009			be32_to_cpu(dev_entry->ioa_data[3]),
1010			be32_to_cpu(dev_entry->ioa_data[4]));
1011	}
1012}
1013
1014/**
1015 * ipr_log_array_error - Log an array configuration error.
1016 * @ioa_cfg:	ioa config struct
1017 * @hostrcb:	hostrcb struct
1018 *
1019 * Return value:
1020 * 	none
1021 **/
1022static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1023				struct ipr_hostrcb *hostrcb)
1024{
1025	int i;
1026	struct ipr_hostrcb_type_04_error *error;
1027	struct ipr_hostrcb_array_data_entry *array_entry;
1028	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1029
1030	error = &hostrcb->hcam.u.error.u.type_04_error;
1031
1032	ipr_err_separator;
1033
1034	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1035		error->protection_level,
1036		ioa_cfg->host->host_no,
1037		error->last_func_vset_res_addr.bus,
1038		error->last_func_vset_res_addr.target,
1039		error->last_func_vset_res_addr.lun);
1040
1041	ipr_err_separator;
1042
1043	array_entry = error->array_member;
1044
1045	for (i = 0; i < 18; i++) {
1046		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1047			continue;
1048
1049		if (be32_to_cpu(error->exposed_mode_adn) == i)
1050			ipr_err("Exposed Array Member %d:\n", i);
1051		else
1052			ipr_err("Array Member %d:\n", i);
1053
1054		ipr_log_vpd(&array_entry->vpd);
1055
1056		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1057		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1058				 "Expected Location");
1059
1060		ipr_err_separator;
1061
1062		if (i == 9)
1063			array_entry = error->array_member2;
1064		else
1065			array_entry++;
1066	}
1067}
1068
1069/**
1070 * ipr_log_hex_data - Log additional hex IOA error data.
1071 * @data:		IOA error data
1072 * @len:		data length
1073 *
1074 * Return value:
1075 * 	none
1076 **/
1077static void ipr_log_hex_data(u32 *data, int len)
1078{
1079	int i;
1080
1081	if (len == 0)
1082		return;
1083
1084	for (i = 0; i < len / 4; i += 4) {
1085		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1086			be32_to_cpu(data[i]),
1087			be32_to_cpu(data[i+1]),
1088			be32_to_cpu(data[i+2]),
1089			be32_to_cpu(data[i+3]));
1090	}
1091}
1092
1093/**
1094 * ipr_log_dual_ioa_error - Log a dual adapter error.
1095 * @ioa_cfg:	ioa config struct
1096 * @hostrcb:	hostrcb struct
1097 *
1098 * Return value:
1099 * 	none
1100 **/
1101static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1102				   struct ipr_hostrcb *hostrcb)
1103{
1104	struct ipr_hostrcb_type_07_error *error;
1105
1106	error = &hostrcb->hcam.u.error.u.type_07_error;
1107	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1108
1109	ipr_err("%s\n", error->failure_reason);
1110	ipr_err("Remote Adapter VPD:\n");
1111	ipr_log_vpd(&error->vpd);
1112	ipr_log_hex_data(error->data,
1113			 be32_to_cpu(hostrcb->hcam.length) -
1114			 (offsetof(struct ipr_hostrcb_error, u) +
1115			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1116}
1117
1118/**
1119 * ipr_log_generic_error - Log an adapter error.
1120 * @ioa_cfg:	ioa config struct
1121 * @hostrcb:	hostrcb struct
1122 *
1123 * Return value:
1124 * 	none
1125 **/
1126static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1127				  struct ipr_hostrcb *hostrcb)
1128{
1129	ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1130			 be32_to_cpu(hostrcb->hcam.length));
1131}
1132
1133/**
1134 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1135 * @ioasc:	IOASC
1136 *
1137 * This function will return the index of into the ipr_error_table
1138 * for the specified IOASC. If the IOASC is not in the table,
1139 * 0 will be returned, which points to the entry used for unknown errors.
1140 *
1141 * Return value:
1142 * 	index into the ipr_error_table
1143 **/
1144static u32 ipr_get_error(u32 ioasc)
1145{
1146	int i;
1147
1148	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1149		if (ipr_error_table[i].ioasc == ioasc)
1150			return i;
1151
1152	return 0;
1153}
1154
1155/**
1156 * ipr_handle_log_data - Log an adapter error.
1157 * @ioa_cfg:	ioa config struct
1158 * @hostrcb:	hostrcb struct
1159 *
1160 * This function logs an adapter error to the system.
1161 *
1162 * Return value:
1163 * 	none
1164 **/
1165static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1166				struct ipr_hostrcb *hostrcb)
1167{
1168	u32 ioasc;
1169	int error_index;
1170
1171	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1172		return;
1173
1174	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1175		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1176
1177	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1178
1179	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1180	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1181		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
1182		scsi_report_bus_reset(ioa_cfg->host,
1183				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1184	}
1185
1186	error_index = ipr_get_error(ioasc);
1187
1188	if (!ipr_error_table[error_index].log_hcam)
1189		return;
1190
1191	if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1192		ipr_res_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1193			    "%s\n", ipr_error_table[error_index].error);
1194	} else {
1195		dev_err(&ioa_cfg->pdev->dev, "%s\n",
1196			ipr_error_table[error_index].error);
1197	}
1198
1199	/* Set indication we have logged an error */
1200	ioa_cfg->errors_logged++;
1201
1202	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1203		return;
1204	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1205		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1206
1207	switch (hostrcb->hcam.overlay_id) {
1208	case IPR_HOST_RCB_OVERLAY_ID_2:
1209		ipr_log_cache_error(ioa_cfg, hostrcb);
1210		break;
1211	case IPR_HOST_RCB_OVERLAY_ID_3:
1212		ipr_log_config_error(ioa_cfg, hostrcb);
1213		break;
1214	case IPR_HOST_RCB_OVERLAY_ID_4:
1215	case IPR_HOST_RCB_OVERLAY_ID_6:
1216		ipr_log_array_error(ioa_cfg, hostrcb);
1217		break;
1218	case IPR_HOST_RCB_OVERLAY_ID_7:
1219		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1220		break;
1221	case IPR_HOST_RCB_OVERLAY_ID_1:
1222	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1223	default:
1224		ipr_log_generic_error(ioa_cfg, hostrcb);
1225		break;
1226	}
1227}
1228
1229/**
1230 * ipr_process_error - Op done function for an adapter error log.
1231 * @ipr_cmd:	ipr command struct
1232 *
1233 * This function is the op done function for an error log host
1234 * controlled async from the adapter. It will log the error and
1235 * send the HCAM back to the adapter.
1236 *
1237 * Return value:
1238 * 	none
1239 **/
1240static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1241{
1242	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1243	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1244	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1245
1246	list_del(&hostrcb->queue);
1247	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1248
1249	if (!ioasc) {
1250		ipr_handle_log_data(ioa_cfg, hostrcb);
1251	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1252		dev_err(&ioa_cfg->pdev->dev,
1253			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1254	}
1255
1256	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1257}
1258
1259/**
1260 * ipr_timeout -  An internally generated op has timed out.
1261 * @ipr_cmd:	ipr command struct
1262 *
1263 * This function blocks host requests and initiates an
1264 * adapter reset.
1265 *
1266 * Return value:
1267 * 	none
1268 **/
1269static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1270{
1271	unsigned long lock_flags = 0;
1272	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1273
1274	ENTER;
1275	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1276
1277	ioa_cfg->errors_logged++;
1278	dev_err(&ioa_cfg->pdev->dev,
1279		"Adapter being reset due to command timeout.\n");
1280
1281	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1282		ioa_cfg->sdt_state = GET_DUMP;
1283
1284	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1285		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1286
1287	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1288	LEAVE;
1289}
1290
1291/**
1292 * ipr_oper_timeout -  Adapter timed out transitioning to operational
1293 * @ipr_cmd:	ipr command struct
1294 *
1295 * This function blocks host requests and initiates an
1296 * adapter reset.
1297 *
1298 * Return value:
1299 * 	none
1300 **/
1301static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1302{
1303	unsigned long lock_flags = 0;
1304	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1305
1306	ENTER;
1307	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1308
1309	ioa_cfg->errors_logged++;
1310	dev_err(&ioa_cfg->pdev->dev,
1311		"Adapter timed out transitioning to operational.\n");
1312
1313	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1314		ioa_cfg->sdt_state = GET_DUMP;
1315
1316	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1317		if (ipr_fastfail)
1318			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1319		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1320	}
1321
1322	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1323	LEAVE;
1324}
1325
1326/**
1327 * ipr_reset_reload - Reset/Reload the IOA
1328 * @ioa_cfg:		ioa config struct
1329 * @shutdown_type:	shutdown type
1330 *
1331 * This function resets the adapter and re-initializes it.
1332 * This function assumes that all new host commands have been stopped.
1333 * Return value:
1334 * 	SUCCESS / FAILED
1335 **/
1336static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1337			    enum ipr_shutdown_type shutdown_type)
1338{
1339	if (!ioa_cfg->in_reset_reload)
1340		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1341
1342	spin_unlock_irq(ioa_cfg->host->host_lock);
1343	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1344	spin_lock_irq(ioa_cfg->host->host_lock);
1345
1346	/* If we got hit with a host reset while we were already resetting
1347	 the adapter for some reason, and the reset failed. */
1348	if (ioa_cfg->ioa_is_dead) {
1349		ipr_trace;
1350		return FAILED;
1351	}
1352
1353	return SUCCESS;
1354}
1355
1356/**
1357 * ipr_find_ses_entry - Find matching SES in SES table
1358 * @res:	resource entry struct of SES
1359 *
1360 * Return value:
1361 * 	pointer to SES table entry / NULL on failure
1362 **/
1363static const struct ipr_ses_table_entry *
1364ipr_find_ses_entry(struct ipr_resource_entry *res)
1365{
1366	int i, j, matches;
1367	const struct ipr_ses_table_entry *ste = ipr_ses_table;
1368
1369	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1370		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1371			if (ste->compare_product_id_byte[j] == 'X') {
1372				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1373					matches++;
1374				else
1375					break;
1376			} else
1377				matches++;
1378		}
1379
1380		if (matches == IPR_PROD_ID_LEN)
1381			return ste;
1382	}
1383
1384	return NULL;
1385}
1386
1387/**
1388 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1389 * @ioa_cfg:	ioa config struct
1390 * @bus:		SCSI bus
1391 * @bus_width:	bus width
1392 *
1393 * Return value:
1394 *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1395 *	For a 2-byte wide SCSI bus, the maximum transfer speed is
1396 *	twice the maximum transfer rate (e.g. for a wide enabled bus,
1397 *	max 160MHz = max 320MB/sec).
1398 **/
1399static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1400{
1401	struct ipr_resource_entry *res;
1402	const struct ipr_ses_table_entry *ste;
1403	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1404
1405	/* Loop through each config table entry in the config table buffer */
1406	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1407		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1408			continue;
1409
1410		if (bus != res->cfgte.res_addr.bus)
1411			continue;
1412
1413		if (!(ste = ipr_find_ses_entry(res)))
1414			continue;
1415
1416		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1417	}
1418
1419	return max_xfer_rate;
1420}
1421
1422/**
1423 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1424 * @ioa_cfg:		ioa config struct
1425 * @max_delay:		max delay in micro-seconds to wait
1426 *
1427 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1428 *
1429 * Return value:
1430 * 	0 on success / other on failure
1431 **/
1432static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1433{
1434	volatile u32 pcii_reg;
1435	int delay = 1;
1436
1437	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
1438	while (delay < max_delay) {
1439		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1440
1441		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1442			return 0;
1443
1444		/* udelay cannot be used if delay is more than a few milliseconds */
1445		if ((delay / 1000) > MAX_UDELAY_MS)
1446			mdelay(delay / 1000);
1447		else
1448			udelay(delay);
1449
1450		delay += delay;
1451	}
1452	return -EIO;
1453}
1454
1455/**
1456 * ipr_get_ldump_data_section - Dump IOA memory
1457 * @ioa_cfg:			ioa config struct
1458 * @start_addr:			adapter address to dump
1459 * @dest:				destination kernel buffer
1460 * @length_in_words:	length to dump in 4 byte words
1461 *
1462 * Return value:
1463 * 	0 on success / -EIO on failure
1464 **/
1465static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1466				      u32 start_addr,
1467				      __be32 *dest, u32 length_in_words)
1468{
1469	volatile u32 temp_pcii_reg;
1470	int i, delay = 0;
1471
1472	/* Write IOA interrupt reg starting LDUMP state  */
1473	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1474	       ioa_cfg->regs.set_uproc_interrupt_reg);
1475
1476	/* Wait for IO debug acknowledge */
1477	if (ipr_wait_iodbg_ack(ioa_cfg,
1478			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1479		dev_err(&ioa_cfg->pdev->dev,
1480			"IOA dump long data transfer timeout\n");
1481		return -EIO;
1482	}
1483
1484	/* Signal LDUMP interlocked - clear IO debug ack */
1485	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1486	       ioa_cfg->regs.clr_interrupt_reg);
1487
1488	/* Write Mailbox with starting address */
1489	writel(start_addr, ioa_cfg->ioa_mailbox);
1490
1491	/* Signal address valid - clear IOA Reset alert */
1492	writel(IPR_UPROCI_RESET_ALERT,
1493	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1494
1495	for (i = 0; i < length_in_words; i++) {
1496		/* Wait for IO debug acknowledge */
1497		if (ipr_wait_iodbg_ack(ioa_cfg,
1498				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1499			dev_err(&ioa_cfg->pdev->dev,
1500				"IOA dump short data transfer timeout\n");
1501			return -EIO;
1502		}
1503
1504		/* Read data from mailbox and increment destination pointer */
1505		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1506		dest++;
1507
1508		/* For all but the last word of data, signal data received */
1509		if (i < (length_in_words - 1)) {
1510			/* Signal dump data received - Clear IO debug Ack */
1511			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1512			       ioa_cfg->regs.clr_interrupt_reg);
1513		}
1514	}
1515
1516	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
1517	writel(IPR_UPROCI_RESET_ALERT,
1518	       ioa_cfg->regs.set_uproc_interrupt_reg);
1519
1520	writel(IPR_UPROCI_IO_DEBUG_ALERT,
1521	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1522
1523	/* Signal dump data received - Clear IO debug Ack */
1524	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1525	       ioa_cfg->regs.clr_interrupt_reg);
1526
1527	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1528	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1529		temp_pcii_reg =
1530		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1531
1532		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1533			return 0;
1534
1535		udelay(10);
1536		delay += 10;
1537	}
1538
1539	return 0;
1540}
1541
1542#ifdef CONFIG_SCSI_IPR_DUMP
1543/**
1544 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1545 * @ioa_cfg:		ioa config struct
1546 * @pci_address:	adapter address
1547 * @length:			length of data to copy
1548 *
1549 * Copy data from PCI adapter to kernel buffer.
1550 * Note: length MUST be a 4 byte multiple
1551 * Return value:
1552 * 	0 on success / other on failure
1553 **/
1554static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1555			unsigned long pci_address, u32 length)
1556{
1557	int bytes_copied = 0;
1558	int cur_len, rc, rem_len, rem_page_len;
1559	__be32 *page;
1560	unsigned long lock_flags = 0;
1561	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1562
1563	while (bytes_copied < length &&
1564	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1565		if (ioa_dump->page_offset >= PAGE_SIZE ||
1566		    ioa_dump->page_offset == 0) {
1567			page = (__be32 *)__get_free_page(GFP_ATOMIC);
1568
1569			if (!page) {
1570				ipr_trace;
1571				return bytes_copied;
1572			}
1573
1574			ioa_dump->page_offset = 0;
1575			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1576			ioa_dump->next_page_index++;
1577		} else
1578			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1579
1580		rem_len = length - bytes_copied;
1581		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1582		cur_len = min(rem_len, rem_page_len);
1583
1584		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1585		if (ioa_cfg->sdt_state == ABORT_DUMP) {
1586			rc = -EIO;
1587		} else {
1588			rc = ipr_get_ldump_data_section(ioa_cfg,
1589							pci_address + bytes_copied,
1590							&page[ioa_dump->page_offset / 4],
1591							(cur_len / sizeof(u32)));
1592		}
1593		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1594
1595		if (!rc) {
1596			ioa_dump->page_offset += cur_len;
1597			bytes_copied += cur_len;
1598		} else {
1599			ipr_trace;
1600			break;
1601		}
1602		schedule();
1603	}
1604
1605	return bytes_copied;
1606}
1607
1608/**
1609 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1610 * @hdr:	dump entry header struct
1611 *
1612 * Return value:
1613 * 	nothing
1614 **/
1615static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1616{
1617	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1618	hdr->num_elems = 1;
1619	hdr->offset = sizeof(*hdr);
1620	hdr->status = IPR_DUMP_STATUS_SUCCESS;
1621}
1622
1623/**
1624 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1625 * @ioa_cfg:	ioa config struct
1626 * @driver_dump:	driver dump struct
1627 *
1628 * Return value:
1629 * 	nothing
1630 **/
1631static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1632				   struct ipr_driver_dump *driver_dump)
1633{
1634	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1635
1636	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1637	driver_dump->ioa_type_entry.hdr.len =
1638		sizeof(struct ipr_dump_ioa_type_entry) -
1639		sizeof(struct ipr_dump_entry_header);
1640	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1641	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1642	driver_dump->ioa_type_entry.type = ioa_cfg->type;
1643	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1644		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1645		ucode_vpd->minor_release[1];
1646	driver_dump->hdr.num_entries++;
1647}
1648
1649/**
1650 * ipr_dump_version_data - Fill in the driver version in the dump.
1651 * @ioa_cfg:	ioa config struct
1652 * @driver_dump:	driver dump struct
1653 *
1654 * Return value:
1655 * 	nothing
1656 **/
1657static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1658				  struct ipr_driver_dump *driver_dump)
1659{
1660	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1661	driver_dump->version_entry.hdr.len =
1662		sizeof(struct ipr_dump_version_entry) -
1663		sizeof(struct ipr_dump_entry_header);
1664	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1665	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1666	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1667	driver_dump->hdr.num_entries++;
1668}
1669
1670/**
1671 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1672 * @ioa_cfg:	ioa config struct
1673 * @driver_dump:	driver dump struct
1674 *
1675 * Return value:
1676 * 	nothing
1677 **/
1678static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1679				   struct ipr_driver_dump *driver_dump)
1680{
1681	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1682	driver_dump->trace_entry.hdr.len =
1683		sizeof(struct ipr_dump_trace_entry) -
1684		sizeof(struct ipr_dump_entry_header);
1685	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1686	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1687	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1688	driver_dump->hdr.num_entries++;
1689}
1690
1691/**
1692 * ipr_dump_location_data - Fill in the IOA location in the dump.
1693 * @ioa_cfg:	ioa config struct
1694 * @driver_dump:	driver dump struct
1695 *
1696 * Return value:
1697 * 	nothing
1698 **/
1699static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1700				   struct ipr_driver_dump *driver_dump)
1701{
1702	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1703	driver_dump->location_entry.hdr.len =
1704		sizeof(struct ipr_dump_location_entry) -
1705		sizeof(struct ipr_dump_entry_header);
1706	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1707	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1708	strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1709	driver_dump->hdr.num_entries++;
1710}
1711
1712/**
1713 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1714 * @ioa_cfg:	ioa config struct
1715 * @dump:		dump struct
1716 *
1717 * Return value:
1718 * 	nothing
1719 **/
1720static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1721{
1722	unsigned long start_addr, sdt_word;
1723	unsigned long lock_flags = 0;
1724	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1725	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1726	u32 num_entries, start_off, end_off;
1727	u32 bytes_to_copy, bytes_copied, rc;
1728	struct ipr_sdt *sdt;
1729	int i;
1730
1731	ENTER;
1732
1733	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1734
1735	if (ioa_cfg->sdt_state != GET_DUMP) {
1736		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1737		return;
1738	}
1739
1740	start_addr = readl(ioa_cfg->ioa_mailbox);
1741
1742	if (!ipr_sdt_is_fmt2(start_addr)) {
1743		dev_err(&ioa_cfg->pdev->dev,
1744			"Invalid dump table format: %lx\n", start_addr);
1745		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1746		return;
1747	}
1748
1749	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1750
1751	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1752
1753	/* Initialize the overall dump header */
1754	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1755	driver_dump->hdr.num_entries = 1;
1756	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1757	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1758	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1759	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1760
1761	ipr_dump_version_data(ioa_cfg, driver_dump);
1762	ipr_dump_location_data(ioa_cfg, driver_dump);
1763	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1764	ipr_dump_trace_data(ioa_cfg, driver_dump);
1765
1766	/* Update dump_header */
1767	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1768
1769	/* IOA Dump entry */
1770	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1771	ioa_dump->format = IPR_SDT_FMT2;
1772	ioa_dump->hdr.len = 0;
1773	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1774	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1775
1776	/* First entries in sdt are actually a list of dump addresses and
1777	 lengths to gather the real dump data.  sdt represents the pointer
1778	 to the ioa generated dump table.  Dump data will be extracted based
1779	 on entries in this table */
1780	sdt = &ioa_dump->sdt;
1781
1782	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1783					sizeof(struct ipr_sdt) / sizeof(__be32));
1784
1785	/* Smart Dump table is ready to use and the first entry is valid */
1786	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1787		dev_err(&ioa_cfg->pdev->dev,
1788			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
1789			rc, be32_to_cpu(sdt->hdr.state));
1790		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1791		ioa_cfg->sdt_state = DUMP_OBTAINED;
1792		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1793		return;
1794	}
1795
1796	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1797
1798	if (num_entries > IPR_NUM_SDT_ENTRIES)
1799		num_entries = IPR_NUM_SDT_ENTRIES;
1800
1801	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1802
1803	for (i = 0; i < num_entries; i++) {
1804		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1805			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1806			break;
1807		}
1808
1809		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1810			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1811			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1812			end_off = be32_to_cpu(sdt->entry[i].end_offset);
1813
1814			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1815				bytes_to_copy = end_off - start_off;
1816				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
1817					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
1818					continue;
1819				}
1820
1821				/* Copy data from adapter to driver buffers */
1822				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
1823							    bytes_to_copy);
1824
1825				ioa_dump->hdr.len += bytes_copied;
1826
1827				if (bytes_copied != bytes_to_copy) {
1828					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1829					break;
1830				}
1831			}
1832		}
1833	}
1834
1835	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
1836
1837	/* Update dump_header */
1838	driver_dump->hdr.len += ioa_dump->hdr.len;
1839	wmb();
1840	ioa_cfg->sdt_state = DUMP_OBTAINED;
1841	LEAVE;
1842}
1843
1844#else
1845#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
1846#endif
1847
1848/**
1849 * ipr_release_dump - Free adapter dump memory
1850 * @kref:	kref struct
1851 *
1852 * Return value:
1853 *	nothing
1854 **/
1855static void ipr_release_dump(struct kref *kref)
1856{
1857	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
1858	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
1859	unsigned long lock_flags = 0;
1860	int i;
1861
1862	ENTER;
1863	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1864	ioa_cfg->dump = NULL;
1865	ioa_cfg->sdt_state = INACTIVE;
1866	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1867
1868	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
1869		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
1870
1871	kfree(dump);
1872	LEAVE;
1873}
1874
1875/**
1876 * ipr_worker_thread - Worker thread
1877 * @data:		ioa config struct
1878 *
1879 * Called at task level from a work thread. This function takes care
1880 * of adding and removing device from the mid-layer as configuration
1881 * changes are detected by the adapter.
1882 *
1883 * Return value:
1884 * 	nothing
1885 **/
1886static void ipr_worker_thread(void *data)
1887{
1888	unsigned long lock_flags;
1889	struct ipr_resource_entry *res;
1890	struct scsi_device *sdev;
1891	struct ipr_dump *dump;
1892	struct ipr_ioa_cfg *ioa_cfg = data;
1893	u8 bus, target, lun;
1894	int did_work;
1895
1896	ENTER;
1897	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1898
1899	if (ioa_cfg->sdt_state == GET_DUMP) {
1900		dump = ioa_cfg->dump;
1901		if (!dump) {
1902			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1903			return;
1904		}
1905		kref_get(&dump->kref);
1906		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1907		ipr_get_ioa_dump(ioa_cfg, dump);
1908		kref_put(&dump->kref, ipr_release_dump);
1909
1910		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1911		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
1912			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1913		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1914		return;
1915	}
1916
1917restart:
1918	do {
1919		did_work = 0;
1920		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
1921			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1922			return;
1923		}
1924
1925		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1926			if (res->del_from_ml && res->sdev) {
1927				did_work = 1;
1928				sdev = res->sdev;
1929				if (!scsi_device_get(sdev)) {
1930					res->sdev = NULL;
1931					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1932					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1933					scsi_remove_device(sdev);
1934					scsi_device_put(sdev);
1935					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1936				}
1937				break;
1938			}
1939		}
1940	} while(did_work);
1941
1942	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1943		if (res->add_to_ml) {
1944			bus = res->cfgte.res_addr.bus;
1945			target = res->cfgte.res_addr.target;
1946			lun = res->cfgte.res_addr.lun;
1947			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1948			scsi_add_device(ioa_cfg->host, bus, target, lun);
1949			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1950			goto restart;
1951		}
1952	}
1953
1954	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1955	kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE, NULL);
1956	LEAVE;
1957}
1958
1959#ifdef CONFIG_SCSI_IPR_TRACE
1960/**
1961 * ipr_read_trace - Dump the adapter trace
1962 * @kobj:		kobject struct
1963 * @buf:		buffer
1964 * @off:		offset
1965 * @count:		buffer size
1966 *
1967 * Return value:
1968 *	number of bytes printed to buffer
1969 **/
1970static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
1971			      loff_t off, size_t count)
1972{
1973	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
1974	struct Scsi_Host *shost = class_to_shost(cdev);
1975	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
1976	unsigned long lock_flags = 0;
1977	int size = IPR_TRACE_SIZE;
1978	char *src = (char *)ioa_cfg->trace;
1979
1980	if (off > size)
1981		return 0;
1982	if (off + count > size) {
1983		size -= off;
1984		count = size;
1985	}
1986
1987	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1988	memcpy(buf, &src[off], count);
1989	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1990	return count;
1991}
1992
1993static struct bin_attribute ipr_trace_attr = {
1994	.attr =	{
1995		.name = "trace",
1996		.mode = S_IRUGO,
1997	},
1998	.size = 0,
1999	.read = ipr_read_trace,
2000};
2001#endif
2002
2003static const struct {
2004	enum ipr_cache_state state;
2005	char *name;
2006} cache_state [] = {
2007	{ CACHE_NONE, "none" },
2008	{ CACHE_DISABLED, "disabled" },
2009	{ CACHE_ENABLED, "enabled" }
2010};
2011
2012/**
2013 * ipr_show_write_caching - Show the write caching attribute
2014 * @class_dev:	class device struct
2015 * @buf:		buffer
2016 *
2017 * Return value:
2018 *	number of bytes printed to buffer
2019 **/
2020static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2021{
2022	struct Scsi_Host *shost = class_to_shost(class_dev);
2023	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2024	unsigned long lock_flags = 0;
2025	int i, len = 0;
2026
2027	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2028	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2029		if (cache_state[i].state == ioa_cfg->cache_state) {
2030			len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2031			break;
2032		}
2033	}
2034	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2035	return len;
2036}
2037
2038
2039/**
2040 * ipr_store_write_caching - Enable/disable adapter write cache
2041 * @class_dev:	class_device struct
2042 * @buf:		buffer
2043 * @count:		buffer size
2044 *
2045 * This function will enable/disable adapter write cache.
2046 *
2047 * Return value:
2048 * 	count on success / other on failure
2049 **/
2050static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2051					const char *buf, size_t count)
2052{
2053	struct Scsi_Host *shost = class_to_shost(class_dev);
2054	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2055	unsigned long lock_flags = 0;
2056	enum ipr_cache_state new_state = CACHE_INVALID;
2057	int i;
2058
2059	if (!capable(CAP_SYS_ADMIN))
2060		return -EACCES;
2061	if (ioa_cfg->cache_state == CACHE_NONE)
2062		return -EINVAL;
2063
2064	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2065		if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2066			new_state = cache_state[i].state;
2067			break;
2068		}
2069	}
2070
2071	if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2072		return -EINVAL;
2073
2074	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2075	if (ioa_cfg->cache_state == new_state) {
2076		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2077		return count;
2078	}
2079
2080	ioa_cfg->cache_state = new_state;
2081	dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2082		 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2083	if (!ioa_cfg->in_reset_reload)
2084		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2085	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2086	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2087
2088	return count;
2089}
2090
2091static struct class_device_attribute ipr_ioa_cache_attr = {
2092	.attr = {
2093		.name =		"write_cache",
2094		.mode =		S_IRUGO | S_IWUSR,
2095	},
2096	.show = ipr_show_write_caching,
2097	.store = ipr_store_write_caching
2098};
2099
2100/**
2101 * ipr_show_fw_version - Show the firmware version
2102 * @class_dev:	class device struct
2103 * @buf:		buffer
2104 *
2105 * Return value:
2106 *	number of bytes printed to buffer
2107 **/
2108static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2109{
2110	struct Scsi_Host *shost = class_to_shost(class_dev);
2111	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2112	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2113	unsigned long lock_flags = 0;
2114	int len;
2115
2116	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2117	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2118		       ucode_vpd->major_release, ucode_vpd->card_type,
2119		       ucode_vpd->minor_release[0],
2120		       ucode_vpd->minor_release[1]);
2121	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2122	return len;
2123}
2124
2125static struct class_device_attribute ipr_fw_version_attr = {
2126	.attr = {
2127		.name =		"fw_version",
2128		.mode =		S_IRUGO,
2129	},
2130	.show = ipr_show_fw_version,
2131};
2132
2133/**
2134 * ipr_show_log_level - Show the adapter's error logging level
2135 * @class_dev:	class device struct
2136 * @buf:		buffer
2137 *
2138 * Return value:
2139 * 	number of bytes printed to buffer
2140 **/
2141static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2142{
2143	struct Scsi_Host *shost = class_to_shost(class_dev);
2144	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2145	unsigned long lock_flags = 0;
2146	int len;
2147
2148	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2149	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2150	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2151	return len;
2152}
2153
2154/**
2155 * ipr_store_log_level - Change the adapter's error logging level
2156 * @class_dev:	class device struct
2157 * @buf:		buffer
2158 *
2159 * Return value:
2160 * 	number of bytes printed to buffer
2161 **/
2162static ssize_t ipr_store_log_level(struct class_device *class_dev,
2163				   const char *buf, size_t count)
2164{
2165	struct Scsi_Host *shost = class_to_shost(class_dev);
2166	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2167	unsigned long lock_flags = 0;
2168
2169	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2170	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2171	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2172	return strlen(buf);
2173}
2174
2175static struct class_device_attribute ipr_log_level_attr = {
2176	.attr = {
2177		.name =		"log_level",
2178		.mode =		S_IRUGO | S_IWUSR,
2179	},
2180	.show = ipr_show_log_level,
2181	.store = ipr_store_log_level
2182};
2183
2184/**
2185 * ipr_store_diagnostics - IOA Diagnostics interface
2186 * @class_dev:	class_device struct
2187 * @buf:		buffer
2188 * @count:		buffer size
2189 *
2190 * This function will reset the adapter and wait a reasonable
2191 * amount of time for any errors that the adapter might log.
2192 *
2193 * Return value:
2194 * 	count on success / other on failure
2195 **/
2196static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2197				     const char *buf, size_t count)
2198{
2199	struct Scsi_Host *shost = class_to_shost(class_dev);
2200	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2201	unsigned long lock_flags = 0;
2202	int rc = count;
2203
2204	if (!capable(CAP_SYS_ADMIN))
2205		return -EACCES;
2206
2207	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2208	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2209	ioa_cfg->errors_logged = 0;
2210	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2211
2212	if (ioa_cfg->in_reset_reload) {
2213		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2214		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2215
2216		/* Wait for a second for any errors to be logged */
2217		msleep(1000);
2218	} else {
2219		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2220		return -EIO;
2221	}
2222
2223	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2224	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2225		rc = -EIO;
2226	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2227
2228	return rc;
2229}
2230
2231static struct class_device_attribute ipr_diagnostics_attr = {
2232	.attr = {
2233		.name =		"run_diagnostics",
2234		.mode =		S_IWUSR,
2235	},
2236	.store = ipr_store_diagnostics
2237};
2238
2239/**
2240 * ipr_show_adapter_state - Show the adapter's state
2241 * @class_dev:	class device struct
2242 * @buf:		buffer
2243 *
2244 * Return value:
2245 * 	number of bytes printed to buffer
2246 **/
2247static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2248{
2249	struct Scsi_Host *shost = class_to_shost(class_dev);
2250	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2251	unsigned long lock_flags = 0;
2252	int len;
2253
2254	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2255	if (ioa_cfg->ioa_is_dead)
2256		len = snprintf(buf, PAGE_SIZE, "offline\n");
2257	else
2258		len = snprintf(buf, PAGE_SIZE, "online\n");
2259	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2260	return len;
2261}
2262
2263/**
2264 * ipr_store_adapter_state - Change adapter state
2265 * @class_dev:	class_device struct
2266 * @buf:		buffer
2267 * @count:		buffer size
2268 *
2269 * This function will change the adapter's state.
2270 *
2271 * Return value:
2272 * 	count on success / other on failure
2273 **/
2274static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2275				       const char *buf, size_t count)
2276{
2277	struct Scsi_Host *shost = class_to_shost(class_dev);
2278	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2279	unsigned long lock_flags;
2280	int result = count;
2281
2282	if (!capable(CAP_SYS_ADMIN))
2283		return -EACCES;
2284
2285	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2286	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2287		ioa_cfg->ioa_is_dead = 0;
2288		ioa_cfg->reset_retries = 0;
2289		ioa_cfg->in_ioa_bringdown = 0;
2290		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2291	}
2292	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2293	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2294
2295	return result;
2296}
2297
2298static struct class_device_attribute ipr_ioa_state_attr = {
2299	.attr = {
2300		.name =		"state",
2301		.mode =		S_IRUGO | S_IWUSR,
2302	},
2303	.show = ipr_show_adapter_state,
2304	.store = ipr_store_adapter_state
2305};
2306
2307/**
2308 * ipr_store_reset_adapter - Reset the adapter
2309 * @class_dev:	class_device struct
2310 * @buf:		buffer
2311 * @count:		buffer size
2312 *
2313 * This function will reset the adapter.
2314 *
2315 * Return value:
2316 * 	count on success / other on failure
2317 **/
2318static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2319				       const char *buf, size_t count)
2320{
2321	struct Scsi_Host *shost = class_to_shost(class_dev);
2322	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2323	unsigned long lock_flags;
2324	int result = count;
2325
2326	if (!capable(CAP_SYS_ADMIN))
2327		return -EACCES;
2328
2329	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2330	if (!ioa_cfg->in_reset_reload)
2331		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2332	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2333	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2334
2335	return result;
2336}
2337
2338static struct class_device_attribute ipr_ioa_reset_attr = {
2339	.attr = {
2340		.name =		"reset_host",
2341		.mode =		S_IWUSR,
2342	},
2343	.store = ipr_store_reset_adapter
2344};
2345
2346/**
2347 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2348 * @buf_len:		buffer length
2349 *
2350 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2351 * list to use for microcode download
2352 *
2353 * Return value:
2354 * 	pointer to sglist / NULL on failure
2355 **/
2356static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2357{
2358	int sg_size, order, bsize_elem, num_elem, i, j;
2359	struct ipr_sglist *sglist;
2360	struct scatterlist *scatterlist;
2361	struct page *page;
2362
2363	/* Get the minimum size per scatter/gather element */
2364	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2365
2366	/* Get the actual size per element */
2367	order = get_order(sg_size);
2368
2369	/* Determine the actual number of bytes per element */
2370	bsize_elem = PAGE_SIZE * (1 << order);
2371
2372	/* Determine the actual number of sg entries needed */
2373	if (buf_len % bsize_elem)
2374		num_elem = (buf_len / bsize_elem) + 1;
2375	else
2376		num_elem = buf_len / bsize_elem;
2377
2378	/* Allocate a scatter/gather list for the DMA */
2379	sglist = kzalloc(sizeof(struct ipr_sglist) +
2380			 (sizeof(struct scatterlist) * (num_elem - 1)),
2381			 GFP_KERNEL);
2382
2383	if (sglist == NULL) {
2384		ipr_trace;
2385		return NULL;
2386	}
2387
2388	scatterlist = sglist->scatterlist;
2389
2390	sglist->order = order;
2391	sglist->num_sg = num_elem;
2392
2393	/* Allocate a bunch of sg elements */
2394	for (i = 0; i < num_elem; i++) {
2395		page = alloc_pages(GFP_KERNEL, order);
2396		if (!page) {
2397			ipr_trace;
2398
2399			/* Free up what we already allocated */
2400			for (j = i - 1; j >= 0; j--)
2401				__free_pages(scatterlist[j].page, order);
2402			kfree(sglist);
2403			return NULL;
2404		}
2405
2406		scatterlist[i].page = page;
2407	}
2408
2409	return sglist;
2410}
2411
2412/**
2413 * ipr_free_ucode_buffer - Frees a microcode download buffer
2414 * @p_dnld:		scatter/gather list pointer
2415 *
2416 * Free a DMA'able ucode download buffer previously allocated with
2417 * ipr_alloc_ucode_buffer
2418 *
2419 * Return value:
2420 * 	nothing
2421 **/
2422static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2423{
2424	int i;
2425
2426	for (i = 0; i < sglist->num_sg; i++)
2427		__free_pages(sglist->scatterlist[i].page, sglist->order);
2428
2429	kfree(sglist);
2430}
2431
2432/**
2433 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2434 * @sglist:		scatter/gather list pointer
2435 * @buffer:		buffer pointer
2436 * @len:		buffer length
2437 *
2438 * Copy a microcode image from a user buffer into a buffer allocated by
2439 * ipr_alloc_ucode_buffer
2440 *
2441 * Return value:
2442 * 	0 on success / other on failure
2443 **/
2444static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2445				 u8 *buffer, u32 len)
2446{
2447	int bsize_elem, i, result = 0;
2448	struct scatterlist *scatterlist;
2449	void *kaddr;
2450
2451	/* Determine the actual number of bytes per element */
2452	bsize_elem = PAGE_SIZE * (1 << sglist->order);
2453
2454	scatterlist = sglist->scatterlist;
2455
2456	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2457		kaddr = kmap(scatterlist[i].page);
2458		memcpy(kaddr, buffer, bsize_elem);
2459		kunmap(scatterlist[i].page);
2460
2461		scatterlist[i].length = bsize_elem;
2462
2463		if (result != 0) {
2464			ipr_trace;
2465			return result;
2466		}
2467	}
2468
2469	if (len % bsize_elem) {
2470		kaddr = kmap(scatterlist[i].page);
2471		memcpy(kaddr, buffer, len % bsize_elem);
2472		kunmap(scatterlist[i].page);
2473
2474		scatterlist[i].length = len % bsize_elem;
2475	}
2476
2477	sglist->buffer_len = len;
2478	return result;
2479}
2480
2481/**
2482 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2483 * @ipr_cmd:	ipr command struct
2484 * @sglist:		scatter/gather list
2485 *
2486 * Builds a microcode download IOA data list (IOADL).
2487 *
2488 **/
2489static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2490				  struct ipr_sglist *sglist)
2491{
2492	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2493	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2494	struct scatterlist *scatterlist = sglist->scatterlist;
2495	int i;
2496
2497	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2498	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2499	ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2500	ioarcb->write_ioadl_len =
2501		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2502
2503	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2504		ioadl[i].flags_and_data_len =
2505			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2506		ioadl[i].address =
2507			cpu_to_be32(sg_dma_address(&scatterlist[i]));
2508	}
2509
2510	ioadl[i-1].flags_and_data_len |=
2511		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2512}
2513
2514/**
2515 * ipr_update_ioa_ucode - Update IOA's microcode
2516 * @ioa_cfg:	ioa config struct
2517 * @sglist:		scatter/gather list
2518 *
2519 * Initiate an adapter reset to update the IOA's microcode
2520 *
2521 * Return value:
2522 * 	0 on success / -EIO on failure
2523 **/
2524static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2525				struct ipr_sglist *sglist)
2526{
2527	unsigned long lock_flags;
2528
2529	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2530
2531	if (ioa_cfg->ucode_sglist) {
2532		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2533		dev_err(&ioa_cfg->pdev->dev,
2534			"Microcode download already in progress\n");
2535		return -EIO;
2536	}
2537
2538	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2539					sglist->num_sg, DMA_TO_DEVICE);
2540
2541	if (!sglist->num_dma_sg) {
2542		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2543		dev_err(&ioa_cfg->pdev->dev,
2544			"Failed to map microcode download buffer!\n");
2545		return -EIO;
2546	}
2547
2548	ioa_cfg->ucode_sglist = sglist;
2549	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2550	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2551	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2552
2553	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2554	ioa_cfg->ucode_sglist = NULL;
2555	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2556	return 0;
2557}
2558
2559/**
2560 * ipr_store_update_fw - Update the firmware on the adapter
2561 * @class_dev:	class_device struct
2562 * @buf:		buffer
2563 * @count:		buffer size
2564 *
2565 * This function will update the firmware on the adapter.
2566 *
2567 * Return value:
2568 * 	count on success / other on failure
2569 **/
2570static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2571				       const char *buf, size_t count)
2572{
2573	struct Scsi_Host *shost = class_to_shost(class_dev);
2574	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2575	struct ipr_ucode_image_header *image_hdr;
2576	const struct firmware *fw_entry;
2577	struct ipr_sglist *sglist;
2578	char fname[100];
2579	char *src;
2580	int len, result, dnld_size;
2581
2582	if (!capable(CAP_SYS_ADMIN))
2583		return -EACCES;
2584
2585	len = snprintf(fname, 99, "%s", buf);
2586	fname[len-1] = '\0';
2587
2588	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2589		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2590		return -EIO;
2591	}
2592
2593	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2594
2595	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2596	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
2597	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2598		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2599		release_firmware(fw_entry);
2600		return -EINVAL;
2601	}
2602
2603	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2604	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2605	sglist = ipr_alloc_ucode_buffer(dnld_size);
2606
2607	if (!sglist) {
2608		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2609		release_firmware(fw_entry);
2610		return -ENOMEM;
2611	}
2612
2613	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2614
2615	if (result) {
2616		dev_err(&ioa_cfg->pdev->dev,
2617			"Microcode buffer copy to DMA buffer failed\n");
2618		goto out;
2619	}
2620
2621	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
2622
2623	if (!result)
2624		result = count;
2625out:
2626	ipr_free_ucode_buffer(sglist);
2627	release_firmware(fw_entry);
2628	return result;
2629}
2630
2631static struct class_device_attribute ipr_update_fw_attr = {
2632	.attr = {
2633		.name =		"update_fw",
2634		.mode =		S_IWUSR,
2635	},
2636	.store = ipr_store_update_fw
2637};
2638
2639static struct class_device_attribute *ipr_ioa_attrs[] = {
2640	&ipr_fw_version_attr,
2641	&ipr_log_level_attr,
2642	&ipr_diagnostics_attr,
2643	&ipr_ioa_state_attr,
2644	&ipr_ioa_reset_attr,
2645	&ipr_update_fw_attr,
2646	&ipr_ioa_cache_attr,
2647	NULL,
2648};
2649
2650#ifdef CONFIG_SCSI_IPR_DUMP
2651/**
2652 * ipr_read_dump - Dump the adapter
2653 * @kobj:		kobject struct
2654 * @buf:		buffer
2655 * @off:		offset
2656 * @count:		buffer size
2657 *
2658 * Return value:
2659 *	number of bytes printed to buffer
2660 **/
2661static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2662			      loff_t off, size_t count)
2663{
2664	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2665	struct Scsi_Host *shost = class_to_shost(cdev);
2666	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2667	struct ipr_dump *dump;
2668	unsigned long lock_flags = 0;
2669	char *src;
2670	int len;
2671	size_t rc = count;
2672
2673	if (!capable(CAP_SYS_ADMIN))
2674		return -EACCES;
2675
2676	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2677	dump = ioa_cfg->dump;
2678
2679	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2680		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2681		return 0;
2682	}
2683	kref_get(&dump->kref);
2684	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2685
2686	if (off > dump->driver_dump.hdr.len) {
2687		kref_put(&dump->kref, ipr_release_dump);
2688		return 0;
2689	}
2690
2691	if (off + count > dump->driver_dump.hdr.len) {
2692		count = dump->driver_dump.hdr.len - off;
2693		rc = count;
2694	}
2695
2696	if (count && off < sizeof(dump->driver_dump)) {
2697		if (off + count > sizeof(dump->driver_dump))
2698			len = sizeof(dump->driver_dump) - off;
2699		else
2700			len = count;
2701		src = (u8 *)&dump->driver_dump + off;
2702		memcpy(buf, src, len);
2703		buf += len;
2704		off += len;
2705		count -= len;
2706	}
2707
2708	off -= sizeof(dump->driver_dump);
2709
2710	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2711		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2712			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2713		else
2714			len = count;
2715		src = (u8 *)&dump->ioa_dump + off;
2716		memcpy(buf, src, len);
2717		buf += len;
2718		off += len;
2719		count -= len;
2720	}
2721
2722	off -= offsetof(struct ipr_ioa_dump, ioa_data);
2723
2724	while (count) {
2725		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2726			len = PAGE_ALIGN(off) - off;
2727		else
2728			len = count;
2729		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2730		src += off & ~PAGE_MASK;
2731		memcpy(buf, src, len);
2732		buf += len;
2733		off += len;
2734		count -= len;
2735	}
2736
2737	kref_put(&dump->kref, ipr_release_dump);
2738	return rc;
2739}
2740
2741/**
2742 * ipr_alloc_dump - Prepare for adapter dump
2743 * @ioa_cfg:	ioa config struct
2744 *
2745 * Return value:
2746 *	0 on success / other on failure
2747 **/
2748static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2749{
2750	struct ipr_dump *dump;
2751	unsigned long lock_flags = 0;
2752
2753	ENTER;
2754	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2755
2756	if (!dump) {
2757		ipr_err("Dump memory allocation failed\n");
2758		return -ENOMEM;
2759	}
2760
2761	kref_init(&dump->kref);
2762	dump->ioa_cfg = ioa_cfg;
2763
2764	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2765
2766	if (INACTIVE != ioa_cfg->sdt_state) {
2767		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2768		kfree(dump);
2769		return 0;
2770	}
2771
2772	ioa_cfg->dump = dump;
2773	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2774	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2775		ioa_cfg->dump_taken = 1;
2776		schedule_work(&ioa_cfg->work_q);
2777	}
2778	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2779
2780	LEAVE;
2781	return 0;
2782}
2783
2784/**
2785 * ipr_free_dump - Free adapter dump memory
2786 * @ioa_cfg:	ioa config struct
2787 *
2788 * Return value:
2789 *	0 on success / other on failure
2790 **/
2791static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2792{
2793	struct ipr_dump *dump;
2794	unsigned long lock_flags = 0;
2795
2796	ENTER;
2797
2798	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2799	dump = ioa_cfg->dump;
2800	if (!dump) {
2801		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2802		return 0;
2803	}
2804
2805	ioa_cfg->dump = NULL;
2806	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2807
2808	kref_put(&dump->kref, ipr_release_dump);
2809
2810	LEAVE;
2811	return 0;
2812}
2813
2814/**
2815 * ipr_write_dump - Setup dump state of adapter
2816 * @kobj:		kobject struct
2817 * @buf:		buffer
2818 * @off:		offset
2819 * @count:		buffer size
2820 *
2821 * Return value:
2822 *	number of bytes printed to buffer
2823 **/
2824static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
2825			      loff_t off, size_t count)
2826{
2827	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2828	struct Scsi_Host *shost = class_to_shost(cdev);
2829	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2830	int rc;
2831
2832	if (!capable(CAP_SYS_ADMIN))
2833		return -EACCES;
2834
2835	if (buf[0] == '1')
2836		rc = ipr_alloc_dump(ioa_cfg);
2837	else if (buf[0] == '0')
2838		rc = ipr_free_dump(ioa_cfg);
2839	else
2840		return -EINVAL;
2841
2842	if (rc)
2843		return rc;
2844	else
2845		return count;
2846}
2847
2848static struct bin_attribute ipr_dump_attr = {
2849	.attr =	{
2850		.name = "dump",
2851		.mode = S_IRUSR | S_IWUSR,
2852	},
2853	.size = 0,
2854	.read = ipr_read_dump,
2855	.write = ipr_write_dump
2856};
2857#else
2858static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
2859#endif
2860
2861/**
2862 * ipr_change_queue_depth - Change the device's queue depth
2863 * @sdev:	scsi device struct
2864 * @qdepth:	depth to set
2865 *
2866 * Return value:
2867 * 	actual depth set
2868 **/
2869static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
2870{
2871	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
2872	return sdev->queue_depth;
2873}
2874
2875/**
2876 * ipr_change_queue_type - Change the device's queue type
2877 * @dsev:		scsi device struct
2878 * @tag_type:	type of tags to use
2879 *
2880 * Return value:
2881 * 	actual queue type set
2882 **/
2883static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
2884{
2885	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2886	struct ipr_resource_entry *res;
2887	unsigned long lock_flags = 0;
2888
2889	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2890	res = (struct ipr_resource_entry *)sdev->hostdata;
2891
2892	if (res) {
2893		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
2894			/*
2895			 * We don't bother quiescing the device here since the
2896			 * adapter firmware does it for us.
2897			 */
2898			scsi_set_tag_type(sdev, tag_type);
2899
2900			if (tag_type)
2901				scsi_activate_tcq(sdev, sdev->queue_depth);
2902			else
2903				scsi_deactivate_tcq(sdev, sdev->queue_depth);
2904		} else
2905			tag_type = 0;
2906	} else
2907		tag_type = 0;
2908
2909	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2910	return tag_type;
2911}
2912
2913/**
2914 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
2915 * @dev:	device struct
2916 * @buf:	buffer
2917 *
2918 * Return value:
2919 * 	number of bytes printed to buffer
2920 **/
2921static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
2922{
2923	struct scsi_device *sdev = to_scsi_device(dev);
2924	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
2925	struct ipr_resource_entry *res;
2926	unsigned long lock_flags = 0;
2927	ssize_t len = -ENXIO;
2928
2929	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2930	res = (struct ipr_resource_entry *)sdev->hostdata;
2931	if (res)
2932		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
2933	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2934	return len;
2935}
2936
2937static struct device_attribute ipr_adapter_handle_attr = {
2938	.attr = {
2939		.name = 	"adapter_handle",
2940		.mode =		S_IRUSR,
2941	},
2942	.show = ipr_show_adapter_handle
2943};
2944
2945static struct device_attribute *ipr_dev_attrs[] = {
2946	&ipr_adapter_handle_attr,
2947	NULL,
2948};
2949
2950/**
2951 * ipr_biosparam - Return the HSC mapping
2952 * @sdev:			scsi device struct
2953 * @block_device:	block device pointer
2954 * @capacity:		capacity of the device
2955 * @parm:			Array containing returned HSC values.
2956 *
2957 * This function generates the HSC parms that fdisk uses.
2958 * We want to make sure we return something that places partitions
2959 * on 4k boundaries for best performance with the IOA.
2960 *
2961 * Return value:
2962 * 	0 on success
2963 **/
2964static int ipr_biosparam(struct scsi_device *sdev,
2965			 struct block_device *block_device,
2966			 sector_t capacity, int *parm)
2967{
2968	int heads, sectors;
2969	sector_t cylinders;
2970
2971	heads = 128;
2972	sectors = 32;
2973
2974	cylinders = capacity;
2975	sector_div(cylinders, (128 * 32));
2976
2977	/* return result */
2978	parm[0] = heads;
2979	parm[1] = sectors;
2980	parm[2] = cylinders;
2981
2982	return 0;
2983}
2984
2985/**
2986 * ipr_slave_destroy - Unconfigure a SCSI device
2987 * @sdev:	scsi device struct
2988 *
2989 * Return value:
2990 * 	nothing
2991 **/
2992static void ipr_slave_destroy(struct scsi_device *sdev)
2993{
2994	struct ipr_resource_entry *res;
2995	struct ipr_ioa_cfg *ioa_cfg;
2996	unsigned long lock_flags = 0;
2997
2998	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
2999
3000	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3001	res = (struct ipr_resource_entry *) sdev->hostdata;
3002	if (res) {
3003		sdev->hostdata = NULL;
3004		res->sdev = NULL;
3005	}
3006	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3007}
3008
3009/**
3010 * ipr_slave_configure - Configure a SCSI device
3011 * @sdev:	scsi device struct
3012 *
3013 * This function configures the specified scsi device.
3014 *
3015 * Return value:
3016 * 	0 on success
3017 **/
3018static int ipr_slave_configure(struct scsi_device *sdev)
3019{
3020	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3021	struct ipr_resource_entry *res;
3022	unsigned long lock_flags = 0;
3023
3024	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3025	res = sdev->hostdata;
3026	if (res) {
3027		if (ipr_is_af_dasd_device(res))
3028			sdev->type = TYPE_RAID;
3029		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3030			sdev->scsi_level = 4;
3031			sdev->no_uld_attach = 1;
3032		}
3033		if (ipr_is_vset_device(res)) {
3034			sdev->timeout = IPR_VSET_RW_TIMEOUT;
3035			blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3036		}
3037		if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
3038			sdev->allow_restart = 1;
3039		scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3040	}
3041	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3042	return 0;
3043}
3044
3045/**
3046 * ipr_slave_alloc - Prepare for commands to a device.
3047 * @sdev:	scsi device struct
3048 *
3049 * This function saves a pointer to the resource entry
3050 * in the scsi device struct if the device exists. We
3051 * can then use this pointer in ipr_queuecommand when
3052 * handling new commands.
3053 *
3054 * Return value:
3055 * 	0 on success / -ENXIO if device does not exist
3056 **/
3057static int ipr_slave_alloc(struct scsi_device *sdev)
3058{
3059	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3060	struct ipr_resource_entry *res;
3061	unsigned long lock_flags;
3062	int rc = -ENXIO;
3063
3064	sdev->hostdata = NULL;
3065
3066	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3067
3068	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3069		if ((res->cfgte.res_addr.bus == sdev->channel) &&
3070		    (res->cfgte.res_addr.target == sdev->id) &&
3071		    (res->cfgte.res_addr.lun == sdev->lun)) {
3072			res->sdev = sdev;
3073			res->add_to_ml = 0;
3074			res->in_erp = 0;
3075			sdev->hostdata = res;
3076			res->needs_sync_complete = 1;
3077			rc = 0;
3078			break;
3079		}
3080	}
3081
3082	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3083
3084	return rc;
3085}
3086
3087/**
3088 * ipr_eh_host_reset - Reset the host adapter
3089 * @scsi_cmd:	scsi command struct
3090 *
3091 * Return value:
3092 * 	SUCCESS / FAILED
3093 **/
3094static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3095{
3096	struct ipr_ioa_cfg *ioa_cfg;
3097	int rc;
3098
3099	ENTER;
3100	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3101
3102	dev_err(&ioa_cfg->pdev->dev,
3103		"Adapter being reset as a result of error recovery.\n");
3104
3105	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3106		ioa_cfg->sdt_state = GET_DUMP;
3107
3108	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3109
3110	LEAVE;
3111	return rc;
3112}
3113
3114static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3115{
3116	int rc;
3117
3118	spin_lock_irq(cmd->device->host->host_lock);
3119	rc = __ipr_eh_host_reset(cmd);
3120	spin_unlock_irq(cmd->device->host->host_lock);
3121
3122	return rc;
3123}
3124
3125/**
3126 * ipr_eh_dev_reset - Reset the device
3127 * @scsi_cmd:	scsi command struct
3128 *
3129 * This function issues a device reset to the affected device.
3130 * A LUN reset will be sent to the device first. If that does
3131 * not work, a target reset will be sent.
3132 *
3133 * Return value:
3134 *	SUCCESS / FAILED
3135 **/
3136static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3137{
3138	struct ipr_cmnd *ipr_cmd;
3139	struct ipr_ioa_cfg *ioa_cfg;
3140	struct ipr_resource_entry *res;
3141	struct ipr_cmd_pkt *cmd_pkt;
3142	u32 ioasc;
3143
3144	ENTER;
3145	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3146	res = scsi_cmd->device->hostdata;
3147
3148	if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3149		return FAILED;
3150
3151	/*
3152	 * If we are currently going through reset/reload, return failed. This will force the
3153	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3154	 * reset to complete
3155	 */
3156	if (ioa_cfg->in_reset_reload)
3157		return FAILED;
3158	if (ioa_cfg->ioa_is_dead)
3159		return FAILED;
3160
3161	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3162		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3163			if (ipr_cmd->scsi_cmd)
3164				ipr_cmd->done = ipr_scsi_eh_done;
3165		}
3166	}
3167
3168	res->resetting_device = 1;
3169
3170	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3171
3172	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3173	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3174	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3175	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3176
3177	ipr_sdev_err(scsi_cmd->device, "Resetting device\n");
3178	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3179
3180	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3181
3182	res->resetting_device = 0;
3183
3184	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3185
3186	LEAVE;
3187	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3188}
3189
3190static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3191{
3192	int rc;
3193
3194	spin_lock_irq(cmd->device->host->host_lock);
3195	rc = __ipr_eh_dev_reset(cmd);
3196	spin_unlock_irq(cmd->device->host->host_lock);
3197
3198	return rc;
3199}
3200
3201/**
3202 * ipr_bus_reset_done - Op done function for bus reset.
3203 * @ipr_cmd:	ipr command struct
3204 *
3205 * This function is the op done function for a bus reset
3206 *
3207 * Return value:
3208 * 	none
3209 **/
3210static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3211{
3212	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3213	struct ipr_resource_entry *res;
3214
3215	ENTER;
3216	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3217		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3218			    sizeof(res->cfgte.res_handle))) {
3219			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3220			break;
3221		}
3222	}
3223
3224	/*
3225	 * If abort has not completed, indicate the reset has, else call the
3226	 * abort's done function to wake the sleeping eh thread
3227	 */
3228	if (ipr_cmd->sibling->sibling)
3229		ipr_cmd->sibling->sibling = NULL;
3230	else
3231		ipr_cmd->sibling->done(ipr_cmd->sibling);
3232
3233	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3234	LEAVE;
3235}
3236
3237/**
3238 * ipr_abort_timeout - An abort task has timed out
3239 * @ipr_cmd:	ipr command struct
3240 *
3241 * This function handles when an abort task times out. If this
3242 * happens we issue a bus reset since we have resources tied
3243 * up that must be freed before returning to the midlayer.
3244 *
3245 * Return value:
3246 *	none
3247 **/
3248static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3249{
3250	struct ipr_cmnd *reset_cmd;
3251	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3252	struct ipr_cmd_pkt *cmd_pkt;
3253	unsigned long lock_flags = 0;
3254
3255	ENTER;
3256	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3257	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3258		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3259		return;
3260	}
3261
3262	ipr_sdev_err(ipr_cmd->u.sdev, "Abort timed out. Resetting bus\n");
3263	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3264	ipr_cmd->sibling = reset_cmd;
3265	reset_cmd->sibling = ipr_cmd;
3266	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3267	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3268	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3269	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3270	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3271
3272	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3273	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3274	LEAVE;
3275}
3276
3277/**
3278 * ipr_cancel_op - Cancel specified op
3279 * @scsi_cmd:	scsi command struct
3280 *
3281 * This function cancels specified op.
3282 *
3283 * Return value:
3284 *	SUCCESS / FAILED
3285 **/
3286static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3287{
3288	struct ipr_cmnd *ipr_cmd;
3289	struct ipr_ioa_cfg *ioa_cfg;
3290	struct ipr_resource_entry *res;
3291	struct ipr_cmd_pkt *cmd_pkt;
3292	u32 ioasc;
3293	int op_found = 0;
3294
3295	ENTER;
3296	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3297	res = scsi_cmd->device->hostdata;
3298
3299	/* If we are currently going through reset/reload, return failed.
3300	 * This will force the mid-layer to call ipr_eh_host_reset,
3301	 * which will then go to sleep and wait for the reset to complete
3302	 */
3303	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3304		return FAILED;
3305	if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3306		return FAILED;
3307
3308	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3309		if (ipr_cmd->scsi_cmd == scsi_cmd) {
3310			ipr_cmd->done = ipr_scsi_eh_done;
3311			op_found = 1;
3312			break;
3313		}
3314	}
3315
3316	if (!op_found)
3317		return SUCCESS;
3318
3319	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3320	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3321	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3322	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3323	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3324	ipr_cmd->u.sdev = scsi_cmd->device;
3325
3326	ipr_sdev_err(scsi_cmd->device, "Aborting command: %02X\n", scsi_cmd->cmnd[0]);
3327	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3328	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3329
3330	/*
3331	 * If the abort task timed out and we sent a bus reset, we will get
3332	 * one the following responses to the abort
3333	 */
3334	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3335		ioasc = 0;
3336		ipr_trace;
3337	}
3338
3339	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3340	res->needs_sync_complete = 1;
3341
3342	LEAVE;
3343	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3344}
3345
3346/**
3347 * ipr_eh_abort - Abort a single op
3348 * @scsi_cmd:	scsi command struct
3349 *
3350 * Return value:
3351 * 	SUCCESS / FAILED
3352 **/
3353static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3354{
3355	unsigned long flags;
3356	int rc;
3357
3358	ENTER;
3359
3360	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3361	rc = ipr_cancel_op(scsi_cmd);
3362	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
3363
3364	LEAVE;
3365	return rc;
3366}
3367
3368/**
3369 * ipr_handle_other_interrupt - Handle "other" interrupts
3370 * @ioa_cfg:	ioa config struct
3371 * @int_reg:	interrupt register
3372 *
3373 * Return value:
3374 * 	IRQ_NONE / IRQ_HANDLED
3375 **/
3376static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3377					      volatile u32 int_reg)
3378{
3379	irqreturn_t rc = IRQ_HANDLED;
3380
3381	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3382		/* Mask the interrupt */
3383		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3384
3385		/* Clear the interrupt */
3386		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3387		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3388
3389		list_del(&ioa_cfg->reset_cmd->queue);
3390		del_timer(&ioa_cfg->reset_cmd->timer);
3391		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3392	} else {
3393		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3394			ioa_cfg->ioa_unit_checked = 1;
3395		else
3396			dev_err(&ioa_cfg->pdev->dev,
3397				"Permanent IOA failure. 0x%08X\n", int_reg);
3398
3399		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3400			ioa_cfg->sdt_state = GET_DUMP;
3401
3402		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3403		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3404	}
3405
3406	return rc;
3407}
3408
3409/**
3410 * ipr_isr - Interrupt service routine
3411 * @irq:	irq number
3412 * @devp:	pointer to ioa config struct
3413 * @regs:	pt_regs struct
3414 *
3415 * Return value:
3416 * 	IRQ_NONE / IRQ_HANDLED
3417 **/
3418static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3419{
3420	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3421	unsigned long lock_flags = 0;
3422	volatile u32 int_reg, int_mask_reg;
3423	u32 ioasc;
3424	u16 cmd_index;
3425	struct ipr_cmnd *ipr_cmd;
3426	irqreturn_t rc = IRQ_NONE;
3427
3428	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3429
3430	/* If interrupts are disabled, ignore the interrupt */
3431	if (!ioa_cfg->allow_interrupts) {
3432		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3433		return IRQ_NONE;
3434	}
3435
3436	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3437	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3438
3439	/* If an interrupt on the adapter did not occur, ignore it */
3440	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3441		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3442		return IRQ_NONE;
3443	}
3444
3445	while (1) {
3446		ipr_cmd = NULL;
3447
3448		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3449		       ioa_cfg->toggle_bit) {
3450
3451			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3452				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3453
3454			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3455				ioa_cfg->errors_logged++;
3456				dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3457
3458				if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3459					ioa_cfg->sdt_state = GET_DUMP;
3460
3461				ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3462				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3463				return IRQ_HANDLED;
3464			}
3465
3466			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3467
3468			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3469
3470			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3471
3472			list_del(&ipr_cmd->queue);
3473			del_timer(&ipr_cmd->timer);
3474			ipr_cmd->done(ipr_cmd);
3475
3476			rc = IRQ_HANDLED;
3477
3478			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3479				ioa_cfg->hrrq_curr++;
3480			} else {
3481				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3482				ioa_cfg->toggle_bit ^= 1u;
3483			}
3484		}
3485
3486		if (ipr_cmd != NULL) {
3487			/* Clear the PCI interrupt */
3488			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3489			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3490		} else
3491			break;
3492	}
3493
3494	if (unlikely(rc == IRQ_NONE))
3495		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3496
3497	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3498	return rc;
3499}
3500
3501/**
3502 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3503 * @ioa_cfg:	ioa config struct
3504 * @ipr_cmd:	ipr command struct
3505 *
3506 * Return value:
3507 * 	0 on success / -1 on failure
3508 **/
3509static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3510			   struct ipr_cmnd *ipr_cmd)
3511{
3512	int i;
3513	struct scatterlist *sglist;
3514	u32 length;
3515	u32 ioadl_flags = 0;
3516	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3517	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3518	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3519
3520	length = scsi_cmd->request_bufflen;
3521
3522	if (length == 0)
3523		return 0;
3524
3525	if (scsi_cmd->use_sg) {
3526		ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3527						 scsi_cmd->request_buffer,
3528						 scsi_cmd->use_sg,
3529						 scsi_cmd->sc_data_direction);
3530
3531		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3532			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3533			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3534			ioarcb->write_data_transfer_length = cpu_to_be32(length);
3535			ioarcb->write_ioadl_len =
3536				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3537		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3538			ioadl_flags = IPR_IOADL_FLAGS_READ;
3539			ioarcb->read_data_transfer_length = cpu_to_be32(length);
3540			ioarcb->read_ioadl_len =
3541				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3542		}
3543
3544		sglist = scsi_cmd->request_buffer;
3545
3546		for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3547			ioadl[i].flags_and_data_len =
3548				cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3549			ioadl[i].address =
3550				cpu_to_be32(sg_dma_address(&sglist[i]));
3551		}
3552
3553		if (likely(ipr_cmd->dma_use_sg)) {
3554			ioadl[i-1].flags_and_data_len |=
3555				cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3556			return 0;
3557		} else
3558			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3559	} else {
3560		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3561			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3562			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3563			ioarcb->write_data_transfer_length = cpu_to_be32(length);
3564			ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3565		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3566			ioadl_flags = IPR_IOADL_FLAGS_READ;
3567			ioarcb->read_data_transfer_length = cpu_to_be32(length);
3568			ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3569		}
3570
3571		ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3572						     scsi_cmd->request_buffer, length,
3573						     scsi_cmd->sc_data_direction);
3574
3575		if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3576			ipr_cmd->dma_use_sg = 1;
3577			ioadl[0].flags_and_data_len =
3578				cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3579			ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3580			return 0;
3581		} else
3582			dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3583	}
3584
3585	return -1;
3586}
3587
3588/**
3589 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3590 * @scsi_cmd:	scsi command struct
3591 *
3592 * Return value:
3593 * 	task attributes
3594 **/
3595static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3596{
3597	u8 tag[2];
3598	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3599
3600	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3601		switch (tag[0]) {
3602		case MSG_SIMPLE_TAG:
3603			rc = IPR_FLAGS_LO_SIMPLE_TASK;
3604			break;
3605		case MSG_HEAD_TAG:
3606			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3607			break;
3608		case MSG_ORDERED_TAG:
3609			rc = IPR_FLAGS_LO_ORDERED_TASK;
3610			break;
3611		};
3612	}
3613
3614	return rc;
3615}
3616
3617/**
3618 * ipr_erp_done - Process completion of ERP for a device
3619 * @ipr_cmd:		ipr command struct
3620 *
3621 * This function copies the sense buffer into the scsi_cmd
3622 * struct and pushes the scsi_done function.
3623 *
3624 * Return value:
3625 * 	nothing
3626 **/
3627static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3628{
3629	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3630	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3631	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3632	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3633
3634	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3635		scsi_cmd->result |= (DID_ERROR << 16);
3636		ipr_sdev_err(scsi_cmd->device,
3637			     "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3638	} else {
3639		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3640		       SCSI_SENSE_BUFFERSIZE);
3641	}
3642
3643	if (res) {
3644		res->needs_sync_complete = 1;
3645		res->in_erp = 0;
3646	}
3647	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3648	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3649	scsi_cmd->scsi_done(scsi_cmd);
3650}
3651
3652/**
3653 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3654 * @ipr_cmd:	ipr command struct
3655 *
3656 * Return value:
3657 * 	none
3658 **/
3659static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3660{
3661	struct ipr_ioarcb *ioarcb;
3662	struct ipr_ioasa *ioasa;
3663
3664	ioarcb = &ipr_cmd->ioarcb;
3665	ioasa = &ipr_cmd->ioasa;
3666
3667	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3668	ioarcb->write_data_transfer_length = 0;
3669	ioarcb->read_data_transfer_length = 0;
3670	ioarcb->write_ioadl_len = 0;
3671	ioarcb->read_ioadl_len = 0;
3672	ioasa->ioasc = 0;
3673	ioasa->residual_data_len = 0;
3674}
3675
3676/**
3677 * ipr_erp_request_sense - Send request sense to a device
3678 * @ipr_cmd:	ipr command struct
3679 *
3680 * This function sends a request sense to a device as a result
3681 * of a check condition.
3682 *
3683 * Return value:
3684 * 	nothing
3685 **/
3686static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3687{
3688	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3689	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3690
3691	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3692		ipr_erp_done(ipr_cmd);
3693		return;
3694	}
3695
3696	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3697
3698	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3699	cmd_pkt->cdb[0] = REQUEST_SENSE;
3700	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3701	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3702	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3703	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3704
3705	ipr_cmd->ioadl[0].flags_and_data_len =
3706		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3707	ipr_cmd->ioadl[0].address =
3708		cpu_to_be32(ipr_cmd->sense_buffer_dma);
3709
3710	ipr_cmd->ioarcb.read_ioadl_len =
3711		cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3712	ipr_cmd->ioarcb.read_data_transfer_length =
3713		cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3714
3715	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3716		   IPR_REQUEST_SENSE_TIMEOUT * 2);
3717}
3718
3719/**
3720 * ipr_erp_cancel_all - Send cancel all to a device
3721 * @ipr_cmd:	ipr command struct
3722 *
3723 * This function sends a cancel all to a device to clear the
3724 * queue. If we are running TCQ on the device, QERR is set to 1,
3725 * which means all outstanding ops have been dropped on the floor.
3726 * Cancel all will return them to us.
3727 *
3728 * Return value:
3729 * 	nothing
3730 **/
3731static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3732{
3733	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3734	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3735	struct ipr_cmd_pkt *cmd_pkt;
3736
3737	res->in_erp = 1;
3738
3739	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3740
3741	if (!scsi_get_tag_type(scsi_cmd->device)) {
3742		ipr_erp_request_sense(ipr_cmd);
3743		return;
3744	}
3745
3746	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3747	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3748	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3749
3750	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3751		   IPR_CANCEL_ALL_TIMEOUT);
3752}
3753
3754/**
3755 * ipr_dump_ioasa - Dump contents of IOASA
3756 * @ioa_cfg:	ioa config struct
3757 * @ipr_cmd:	ipr command struct
3758 *
3759 * This function is invoked by the interrupt handler when ops
3760 * fail. It will log the IOASA if appropriate. Only called
3761 * for GPDD ops.
3762 *
3763 * Return value:
3764 * 	none
3765 **/
3766static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3767			   struct ipr_cmnd *ipr_cmd)
3768{
3769	int i;
3770	u16 data_len;
3771	u32 ioasc;
3772	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3773	__be32 *ioasa_data = (__be32 *)ioasa;
3774	int error_index;
3775
3776	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3777
3778	if (0 == ioasc)
3779		return;
3780
3781	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3782		return;
3783
3784	error_index = ipr_get_error(ioasc);
3785
3786	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3787		/* Don't log an error if the IOA already logged one */
3788		if (ioasa->ilid != 0)
3789			return;
3790
3791		if (ipr_error_table[error_index].log_ioasa == 0)
3792			return;
3793	}
3794
3795	ipr_sdev_err(ipr_cmd->scsi_cmd->device, "%s\n",
3796		     ipr_error_table[error_index].error);
3797
3798	if ((ioasa->u.gpdd.end_state <= ARRAY_SIZE(ipr_gpdd_dev_end_states)) &&
3799	    (ioasa->u.gpdd.bus_phase <=  ARRAY_SIZE(ipr_gpdd_dev_bus_phases))) {
3800		ipr_sdev_err(ipr_cmd->scsi_cmd->device,
3801			     "Device End state: %s Phase: %s\n",
3802			     ipr_gpdd_dev_end_states[ioasa->u.gpdd.end_state],
3803			     ipr_gpdd_dev_bus_phases[ioasa->u.gpdd.bus_phase]);
3804	}
3805
3806	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
3807		data_len = sizeof(struct ipr_ioasa);
3808	else
3809		data_len = be16_to_cpu(ioasa->ret_stat_len);
3810
3811	ipr_err("IOASA Dump:\n");
3812
3813	for (i = 0; i < data_len / 4; i += 4) {
3814		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
3815			be32_to_cpu(ioasa_data[i]),
3816			be32_to_cpu(ioasa_data[i+1]),
3817			be32_to_cpu(ioasa_data[i+2]),
3818			be32_to_cpu(ioasa_data[i+3]));
3819	}
3820}
3821
3822/**
3823 * ipr_gen_sense - Generate SCSI sense data from an IOASA
3824 * @ioasa:		IOASA
3825 * @sense_buf:	sense data buffer
3826 *
3827 * Return value:
3828 * 	none
3829 **/
3830static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
3831{
3832	u32 failing_lba;
3833	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
3834	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
3835	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3836	u32 ioasc = be32_to_cpu(ioasa->ioasc);
3837
3838	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
3839
3840	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
3841		return;
3842
3843	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
3844
3845	if (ipr_is_vset_device(res) &&
3846	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
3847	    ioasa->u.vset.failing_lba_hi != 0) {
3848		sense_buf[0] = 0x72;
3849		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
3850		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
3851		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
3852
3853		sense_buf[7] = 12;
3854		sense_buf[8] = 0;
3855		sense_buf[9] = 0x0A;
3856		sense_buf[10] = 0x80;
3857
3858		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
3859
3860		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
3861		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
3862		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
3863		sense_buf[15] = failing_lba & 0x000000ff;
3864
3865		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3866
3867		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
3868		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
3869		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
3870		sense_buf[19] = failing_lba & 0x000000ff;
3871	} else {
3872		sense_buf[0] = 0x70;
3873		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
3874		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
3875		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
3876
3877		/* Illegal request */
3878		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
3879		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
3880			sense_buf[7] = 10;	/* additional length */
3881
3882			/* IOARCB was in error */
3883			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
3884				sense_buf[15] = 0xC0;
3885			else	/* Parameter data was invalid */
3886				sense_buf[15] = 0x80;
3887
3888			sense_buf[16] =
3889			    ((IPR_FIELD_POINTER_MASK &
3890			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
3891			sense_buf[17] =
3892			    (IPR_FIELD_POINTER_MASK &
3893			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
3894		} else {
3895			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
3896				if (ipr_is_vset_device(res))
3897					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
3898				else
3899					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
3900
3901				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
3902				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
3903				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
3904				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
3905				sense_buf[6] = failing_lba & 0x000000ff;
3906			}
3907
3908			sense_buf[7] = 6;	/* additional length */
3909		}
3910	}
3911}
3912
3913/**
3914 * ipr_erp_start - Process an error response for a SCSI op
3915 * @ioa_cfg:	ioa config struct
3916 * @ipr_cmd:	ipr command struct
3917 *
3918 * This function determines whether or not to initiate ERP
3919 * on the affected device.
3920 *
3921 * Return value:
3922 * 	nothing
3923 **/
3924static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
3925			      struct ipr_cmnd *ipr_cmd)
3926{
3927	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3928	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3929	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3930
3931	if (!res) {
3932		ipr_scsi_eh_done(ipr_cmd);
3933		return;
3934	}
3935
3936	if (ipr_is_gscsi(res))
3937		ipr_dump_ioasa(ioa_cfg, ipr_cmd);
3938	else
3939		ipr_gen_sense(ipr_cmd);
3940
3941	switch (ioasc & IPR_IOASC_IOASC_MASK) {
3942	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
3943		scsi_cmd->result |= (DID_IMM_RETRY << 16);
3944		break;
3945	case IPR_IOASC_IR_RESOURCE_HANDLE:
3946	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
3947		scsi_cmd->result |= (DID_NO_CONNECT << 16);
3948		break;
3949	case IPR_IOASC_HW_SEL_TIMEOUT:
3950		scsi_cmd->result |= (DID_NO_CONNECT << 16);
3951		res->needs_sync_complete = 1;
3952		break;
3953	case IPR_IOASC_SYNC_REQUIRED:
3954		if (!res->in_erp)
3955			res->needs_sync_complete = 1;
3956		scsi_cmd->result |= (DID_IMM_RETRY << 16);
3957		break;
3958	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
3959	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
3960		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
3961		break;
3962	case IPR_IOASC_BUS_WAS_RESET:
3963	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
3964		/*
3965		 * Report the bus reset and ask for a retry. The device
3966		 * will give CC/UA the next command.
3967		 */
3968		if (!res->resetting_device)
3969			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
3970		scsi_cmd->result |= (DID_ERROR << 16);
3971		res->needs_sync_complete = 1;
3972		break;
3973	case IPR_IOASC_HW_DEV_BUS_STATUS:
3974		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
3975		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
3976			ipr_erp_cancel_all(ipr_cmd);
3977			return;
3978		}
3979		res->needs_sync_complete = 1;
3980		break;
3981	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
3982		break;
3983	default:
3984		scsi_cmd->result |= (DID_ERROR << 16);
3985		if (!ipr_is_vset_device(res))
3986			res->needs_sync_complete = 1;
3987		break;
3988	}
3989
3990	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3991	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3992	scsi_cmd->scsi_done(scsi_cmd);
3993}
3994
3995/**
3996 * ipr_scsi_done - mid-layer done function
3997 * @ipr_cmd:	ipr command struct
3998 *
3999 * This function is invoked by the interrupt handler for
4000 * ops generated by the SCSI mid-layer
4001 *
4002 * Return value:
4003 * 	none
4004 **/
4005static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4006{
4007	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4008	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4009	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4010
4011	scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4012
4013	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4014		ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4015		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4016		scsi_cmd->scsi_done(scsi_cmd);
4017	} else
4018		ipr_erp_start(ioa_cfg, ipr_cmd);
4019}
4020
4021/**
4022 * ipr_save_ioafp_mode_select - Save adapters mode select data
4023 * @ioa_cfg:	ioa config struct
4024 * @scsi_cmd:	scsi command struct
4025 *
4026 * This function saves mode select data for the adapter to
4027 * use following an adapter reset.
4028 *
4029 * Return value:
4030 *	0 on success / SCSI_MLQUEUE_HOST_BUSY on failure
4031 **/
4032static int ipr_save_ioafp_mode_select(struct ipr_ioa_cfg *ioa_cfg,
4033				       struct scsi_cmnd *scsi_cmd)
4034{
4035	if (!ioa_cfg->saved_mode_pages) {
4036		ioa_cfg->saved_mode_pages  = kmalloc(sizeof(struct ipr_mode_pages),
4037						     GFP_ATOMIC);
4038		if (!ioa_cfg->saved_mode_pages) {
4039			dev_err(&ioa_cfg->pdev->dev,
4040				"IOA mode select buffer allocation failed\n");
4041			return SCSI_MLQUEUE_HOST_BUSY;
4042		}
4043	}
4044
4045	memcpy(ioa_cfg->saved_mode_pages, scsi_cmd->buffer, scsi_cmd->cmnd[4]);
4046	ioa_cfg->saved_mode_page_len = scsi_cmd->cmnd[4];
4047	return 0;
4048}
4049
4050/**
4051 * ipr_queuecommand - Queue a mid-layer request
4052 * @scsi_cmd:	scsi command struct
4053 * @done:		done function
4054 *
4055 * This function queues a request generated by the mid-layer.
4056 *
4057 * Return value:
4058 *	0 on success
4059 *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4060 *	SCSI_MLQUEUE_HOST_BUSY if host is busy
4061 **/
4062static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4063			    void (*done) (struct scsi_cmnd *))
4064{
4065	struct ipr_ioa_cfg *ioa_cfg;
4066	struct ipr_resource_entry *res;
4067	struct ipr_ioarcb *ioarcb;
4068	struct ipr_cmnd *ipr_cmd;
4069	int rc = 0;
4070
4071	scsi_cmd->scsi_done = done;
4072	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4073	res = scsi_cmd->device->hostdata;
4074	scsi_cmd->result = (DID_OK << 16);
4075
4076	/*
4077	 * We are currently blocking all devices due to a host reset
4078	 * We have told the host to stop giving us new requests, but
4079	 * ERP ops don't count. FIXME
4080	 */
4081	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4082		return SCSI_MLQUEUE_HOST_BUSY;
4083
4084	/*
4085	 * FIXME - Create scsi_set_host_offline interface
4086	 *  and the ioa_is_dead check can be removed
4087	 */
4088	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4089		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4090		scsi_cmd->result = (DID_NO_CONNECT << 16);
4091		scsi_cmd->scsi_done(scsi_cmd);
4092		return 0;
4093	}
4094
4095	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4096	ioarcb = &ipr_cmd->ioarcb;
4097	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4098
4099	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4100	ipr_cmd->scsi_cmd = scsi_cmd;
4101	ioarcb->res_handle = res->cfgte.res_handle;
4102	ipr_cmd->done = ipr_scsi_done;
4103	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4104
4105	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4106		if (scsi_cmd->underflow == 0)
4107			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4108
4109		if (res->needs_sync_complete) {
4110			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4111			res->needs_sync_complete = 0;
4112		}
4113
4114		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4115		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4116		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4117		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4118	}
4119
4120	if (scsi_cmd->cmnd[0] >= 0xC0 &&
4121	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4122		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4123
4124	if (ipr_is_ioa_resource(res) && scsi_cmd->cmnd[0] == MODE_SELECT)
4125		rc = ipr_save_ioafp_mode_select(ioa_cfg, scsi_cmd);
4126
4127	if (likely(rc == 0))
4128		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4129
4130	if (likely(rc == 0)) {
4131		mb();
4132		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4133		       ioa_cfg->regs.ioarrin_reg);
4134	} else {
4135		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4136		 return SCSI_MLQUEUE_HOST_BUSY;
4137	}
4138
4139	return 0;
4140}
4141
4142/**
4143 * ipr_info - Get information about the card/driver
4144 * @scsi_host:	scsi host struct
4145 *
4146 * Return value:
4147 * 	pointer to buffer with description string
4148 **/
4149static const char * ipr_ioa_info(struct Scsi_Host *host)
4150{
4151	static char buffer[512];
4152	struct ipr_ioa_cfg *ioa_cfg;
4153	unsigned long lock_flags = 0;
4154
4155	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4156
4157	spin_lock_irqsave(host->host_lock, lock_flags);
4158	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4159	spin_unlock_irqrestore(host->host_lock, lock_flags);
4160
4161	return buffer;
4162}
4163
4164static struct scsi_host_template driver_template = {
4165	.module = THIS_MODULE,
4166	.name = "IPR",
4167	.info = ipr_ioa_info,
4168	.queuecommand = ipr_queuecommand,
4169	.eh_abort_handler = ipr_eh_abort,
4170	.eh_device_reset_handler = ipr_eh_dev_reset,
4171	.eh_host_reset_handler = ipr_eh_host_reset,
4172	.slave_alloc = ipr_slave_alloc,
4173	.slave_configure = ipr_slave_configure,
4174	.slave_destroy = ipr_slave_destroy,
4175	.change_queue_depth = ipr_change_queue_depth,
4176	.change_queue_type = ipr_change_queue_type,
4177	.bios_param = ipr_biosparam,
4178	.can_queue = IPR_MAX_COMMANDS,
4179	.this_id = -1,
4180	.sg_tablesize = IPR_MAX_SGLIST,
4181	.max_sectors = IPR_IOA_MAX_SECTORS,
4182	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4183	.use_clustering = ENABLE_CLUSTERING,
4184	.shost_attrs = ipr_ioa_attrs,
4185	.sdev_attrs = ipr_dev_attrs,
4186	.proc_name = IPR_NAME
4187};
4188
4189#ifdef CONFIG_PPC_PSERIES
4190static const u16 ipr_blocked_processors[] = {
4191	PV_NORTHSTAR,
4192	PV_PULSAR,
4193	PV_POWER4,
4194	PV_ICESTAR,
4195	PV_SSTAR,
4196	PV_POWER4p,
4197	PV_630,
4198	PV_630p
4199};
4200
4201/**
4202 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4203 * @ioa_cfg:	ioa cfg struct
4204 *
4205 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4206 * certain pSeries hardware. This function determines if the given
4207 * adapter is in one of these confgurations or not.
4208 *
4209 * Return value:
4210 * 	1 if adapter is not supported / 0 if adapter is supported
4211 **/
4212static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4213{
4214	u8 rev_id;
4215	int i;
4216
4217	if (ioa_cfg->type == 0x5702) {
4218		if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4219					 &rev_id) == PCIBIOS_SUCCESSFUL) {
4220			if (rev_id < 4) {
4221				for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4222					if (__is_processor(ipr_blocked_processors[i]))
4223						return 1;
4224				}
4225			}
4226		}
4227	}
4228	return 0;
4229}
4230#else
4231#define ipr_invalid_adapter(ioa_cfg) 0
4232#endif
4233
4234/**
4235 * ipr_ioa_bringdown_done - IOA bring down completion.
4236 * @ipr_cmd:	ipr command struct
4237 *
4238 * This function processes the completion of an adapter bring down.
4239 * It wakes any reset sleepers.
4240 *
4241 * Return value:
4242 * 	IPR_RC_JOB_RETURN
4243 **/
4244static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4245{
4246	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4247
4248	ENTER;
4249	ioa_cfg->in_reset_reload = 0;
4250	ioa_cfg->reset_retries = 0;
4251	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4252	wake_up_all(&ioa_cfg->reset_wait_q);
4253
4254	spin_unlock_irq(ioa_cfg->host->host_lock);
4255	scsi_unblock_requests(ioa_cfg->host);
4256	spin_lock_irq(ioa_cfg->host->host_lock);
4257	LEAVE;
4258
4259	return IPR_RC_JOB_RETURN;
4260}
4261
4262/**
4263 * ipr_ioa_reset_done - IOA reset completion.
4264 * @ipr_cmd:	ipr command struct
4265 *
4266 * This function processes the completion of an adapter reset.
4267 * It schedules any necessary mid-layer add/removes and
4268 * wakes any reset sleepers.
4269 *
4270 * Return value:
4271 * 	IPR_RC_JOB_RETURN
4272 **/
4273static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4274{
4275	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4276	struct ipr_resource_entry *res;
4277	struct ipr_hostrcb *hostrcb, *temp;
4278	int i = 0;
4279
4280	ENTER;
4281	ioa_cfg->in_reset_reload = 0;
4282	ioa_cfg->allow_cmds = 1;
4283	ioa_cfg->reset_cmd = NULL;
4284	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
4285
4286	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4287		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4288			ipr_trace;
4289			break;
4290		}
4291	}
4292	schedule_work(&ioa_cfg->work_q);
4293
4294	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4295		list_del(&hostrcb->queue);
4296		if (i++ < IPR_NUM_LOG_HCAMS)
4297			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4298		else
4299			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4300	}
4301
4302	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4303
4304	ioa_cfg->reset_retries = 0;
4305	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4306	wake_up_all(&ioa_cfg->reset_wait_q);
4307
4308	spin_unlock_irq(ioa_cfg->host->host_lock);
4309	scsi_unblock_requests(ioa_cfg->host);
4310	spin_lock_irq(ioa_cfg->host->host_lock);
4311
4312	if (!ioa_cfg->allow_cmds)
4313		scsi_block_requests(ioa_cfg->host);
4314
4315	LEAVE;
4316	return IPR_RC_JOB_RETURN;
4317}
4318
4319/**
4320 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4321 * @supported_dev:	supported device struct
4322 * @vpids:			vendor product id struct
4323 *
4324 * Return value:
4325 * 	none
4326 **/
4327static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4328				 struct ipr_std_inq_vpids *vpids)
4329{
4330	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4331	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4332	supported_dev->num_records = 1;
4333	supported_dev->data_length =
4334		cpu_to_be16(sizeof(struct ipr_supported_device));
4335	supported_dev->reserved = 0;
4336}
4337
4338/**
4339 * ipr_set_supported_devs - Send Set Supported Devices for a device
4340 * @ipr_cmd:	ipr command struct
4341 *
4342 * This function send a Set Supported Devices to the adapter
4343 *
4344 * Return value:
4345 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4346 **/
4347static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4348{
4349	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4350	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4351	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4352	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4353	struct ipr_resource_entry *res = ipr_cmd->u.res;
4354
4355	ipr_cmd->job_step = ipr_ioa_reset_done;
4356
4357	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4358		if (!IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data))
4359			continue;
4360
4361		ipr_cmd->u.res = res;
4362		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4363
4364		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4365		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4366		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4367
4368		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4369		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4370		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4371
4372		ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4373							sizeof(struct ipr_supported_device));
4374		ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4375					     offsetof(struct ipr_misc_cbs, supp_dev));
4376		ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4377		ioarcb->write_data_transfer_length =
4378			cpu_to_be32(sizeof(struct ipr_supported_device));
4379
4380		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4381			   IPR_SET_SUP_DEVICE_TIMEOUT);
4382
4383		ipr_cmd->job_step = ipr_set_supported_devs;
4384		return IPR_RC_JOB_RETURN;
4385	}
4386
4387	return IPR_RC_JOB_CONTINUE;
4388}
4389
4390/**
4391 * ipr_setup_write_cache - Disable write cache if needed
4392 * @ipr_cmd:	ipr command struct
4393 *
4394 * This function sets up adapters write cache to desired setting
4395 *
4396 * Return value:
4397 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4398 **/
4399static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4400{
4401	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4402
4403	ipr_cmd->job_step = ipr_set_supported_devs;
4404	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4405				    struct ipr_resource_entry, queue);
4406
4407	if (ioa_cfg->cache_state != CACHE_DISABLED)
4408		return IPR_RC_JOB_CONTINUE;
4409
4410	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4411	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4412	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4413	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4414
4415	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4416
4417	return IPR_RC_JOB_RETURN;
4418}
4419
4420/**
4421 * ipr_get_mode_page - Locate specified mode page
4422 * @mode_pages:	mode page buffer
4423 * @page_code:	page code to find
4424 * @len:		minimum required length for mode page
4425 *
4426 * Return value:
4427 * 	pointer to mode page / NULL on failure
4428 **/
4429static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4430			       u32 page_code, u32 len)
4431{
4432	struct ipr_mode_page_hdr *mode_hdr;
4433	u32 page_length;
4434	u32 length;
4435
4436	if (!mode_pages || (mode_pages->hdr.length == 0))
4437		return NULL;
4438
4439	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4440	mode_hdr = (struct ipr_mode_page_hdr *)
4441		(mode_pages->data + mode_pages->hdr.block_desc_len);
4442
4443	while (length) {
4444		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4445			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4446				return mode_hdr;
4447			break;
4448		} else {
4449			page_length = (sizeof(struct ipr_mode_page_hdr) +
4450				       mode_hdr->page_length);
4451			length -= page_length;
4452			mode_hdr = (struct ipr_mode_page_hdr *)
4453				((unsigned long)mode_hdr + page_length);
4454		}
4455	}
4456	return NULL;
4457}
4458
4459/**
4460 * ipr_check_term_power - Check for term power errors
4461 * @ioa_cfg:	ioa config struct
4462 * @mode_pages:	IOAFP mode pages buffer
4463 *
4464 * Check the IOAFP's mode page 28 for term power errors
4465 *
4466 * Return value:
4467 * 	nothing
4468 **/
4469static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4470				 struct ipr_mode_pages *mode_pages)
4471{
4472	int i;
4473	int entry_length;
4474	struct ipr_dev_bus_entry *bus;
4475	struct ipr_mode_page28 *mode_page;
4476
4477	mode_page = ipr_get_mode_page(mode_pages, 0x28,
4478				      sizeof(struct ipr_mode_page28));
4479
4480	entry_length = mode_page->entry_length;
4481
4482	bus = mode_page->bus;
4483
4484	for (i = 0; i < mode_page->num_entries; i++) {
4485		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4486			dev_err(&ioa_cfg->pdev->dev,
4487				"Term power is absent on scsi bus %d\n",
4488				bus->res_addr.bus);
4489		}
4490
4491		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4492	}
4493}
4494
4495/**
4496 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4497 * @ioa_cfg:	ioa config struct
4498 *
4499 * Looks through the config table checking for SES devices. If
4500 * the SES device is in the SES table indicating a maximum SCSI
4501 * bus speed, the speed is limited for the bus.
4502 *
4503 * Return value:
4504 * 	none
4505 **/
4506static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4507{
4508	u32 max_xfer_rate;
4509	int i;
4510
4511	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4512		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4513						       ioa_cfg->bus_attr[i].bus_width);
4514
4515		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4516			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4517	}
4518}
4519
4520/**
4521 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4522 * @ioa_cfg:	ioa config struct
4523 * @mode_pages:	mode page 28 buffer
4524 *
4525 * Updates mode page 28 based on driver configuration
4526 *
4527 * Return value:
4528 * 	none
4529 **/
4530static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4531					  	struct ipr_mode_pages *mode_pages)
4532{
4533	int i, entry_length;
4534	struct ipr_dev_bus_entry *bus;
4535	struct ipr_bus_attributes *bus_attr;
4536	struct ipr_mode_page28 *mode_page;
4537
4538	mode_page = ipr_get_mode_page(mode_pages, 0x28,
4539				      sizeof(struct ipr_mode_page28));
4540
4541	entry_length = mode_page->entry_length;
4542
4543	/* Loop for each device bus entry */
4544	for (i = 0, bus = mode_page->bus;
4545	     i < mode_page->num_entries;
4546	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4547		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4548			dev_err(&ioa_cfg->pdev->dev,
4549				"Invalid resource address reported: 0x%08X\n",
4550				IPR_GET_PHYS_LOC(bus->res_addr));
4551			continue;
4552		}
4553
4554		bus_attr = &ioa_cfg->bus_attr[i];
4555		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4556		bus->bus_width = bus_attr->bus_width;
4557		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4558		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4559		if (bus_attr->qas_enabled)
4560			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4561		else
4562			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4563	}
4564}
4565
4566/**
4567 * ipr_build_mode_select - Build a mode select command
4568 * @ipr_cmd:	ipr command struct
4569 * @res_handle:	resource handle to send command to
4570 * @parm:		Byte 2 of Mode Sense command
4571 * @dma_addr:	DMA buffer address
4572 * @xfer_len:	data transfer length
4573 *
4574 * Return value:
4575 * 	none
4576 **/
4577static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4578				  __be32 res_handle, u8 parm, u32 dma_addr,
4579				  u8 xfer_len)
4580{
4581	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4582	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4583
4584	ioarcb->res_handle = res_handle;
4585	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4586	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4587	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4588	ioarcb->cmd_pkt.cdb[1] = parm;
4589	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4590
4591	ioadl->flags_and_data_len =
4592		cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4593	ioadl->address = cpu_to_be32(dma_addr);
4594	ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4595	ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4596}
4597
4598/**
4599 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4600 * @ipr_cmd:	ipr command struct
4601 *
4602 * This function sets up the SCSI bus attributes and sends
4603 * a Mode Select for Page 28 to activate them.
4604 *
4605 * Return value:
4606 * 	IPR_RC_JOB_RETURN
4607 **/
4608static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4609{
4610	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4611	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4612	int length;
4613
4614	ENTER;
4615	if (ioa_cfg->saved_mode_pages) {
4616		memcpy(mode_pages, ioa_cfg->saved_mode_pages,
4617		       ioa_cfg->saved_mode_page_len);
4618		length = ioa_cfg->saved_mode_page_len;
4619	} else {
4620		ipr_scsi_bus_speed_limit(ioa_cfg);
4621		ipr_check_term_power(ioa_cfg, mode_pages);
4622		ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4623		length = mode_pages->hdr.length + 1;
4624		mode_pages->hdr.length = 0;
4625	}
4626
4627	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4628			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4629			      length);
4630
4631	ipr_cmd->job_step = ipr_setup_write_cache;
4632	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4633
4634	LEAVE;
4635	return IPR_RC_JOB_RETURN;
4636}
4637
4638/**
4639 * ipr_build_mode_sense - Builds a mode sense command
4640 * @ipr_cmd:	ipr command struct
4641 * @res:		resource entry struct
4642 * @parm:		Byte 2 of mode sense command
4643 * @dma_addr:	DMA address of mode sense buffer
4644 * @xfer_len:	Size of DMA buffer
4645 *
4646 * Return value:
4647 * 	none
4648 **/
4649static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4650				 __be32 res_handle,
4651				 u8 parm, u32 dma_addr, u8 xfer_len)
4652{
4653	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4654	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4655
4656	ioarcb->res_handle = res_handle;
4657	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4658	ioarcb->cmd_pkt.cdb[2] = parm;
4659	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4660	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4661
4662	ioadl->flags_and_data_len =
4663		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4664	ioadl->address = cpu_to_be32(dma_addr);
4665	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4666	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4667}
4668
4669/**
4670 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4671 * @ipr_cmd:	ipr command struct
4672 *
4673 * This function send a Page 28 mode sense to the IOA to
4674 * retrieve SCSI bus attributes.
4675 *
4676 * Return value:
4677 * 	IPR_RC_JOB_RETURN
4678 **/
4679static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4680{
4681	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4682
4683	ENTER;
4684	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4685			     0x28, ioa_cfg->vpd_cbs_dma +
4686			     offsetof(struct ipr_misc_cbs, mode_pages),
4687			     sizeof(struct ipr_mode_pages));
4688
4689	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4690
4691	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4692
4693	LEAVE;
4694	return IPR_RC_JOB_RETURN;
4695}
4696
4697/**
4698 * ipr_init_res_table - Initialize the resource table
4699 * @ipr_cmd:	ipr command struct
4700 *
4701 * This function looks through the existing resource table, comparing
4702 * it with the config table. This function will take care of old/new
4703 * devices and schedule adding/removing them from the mid-layer
4704 * as appropriate.
4705 *
4706 * Return value:
4707 * 	IPR_RC_JOB_CONTINUE
4708 **/
4709static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4710{
4711	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4712	struct ipr_resource_entry *res, *temp;
4713	struct ipr_config_table_entry *cfgte;
4714	int found, i;
4715	LIST_HEAD(old_res);
4716
4717	ENTER;
4718	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4719		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4720
4721	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4722		list_move_tail(&res->queue, &old_res);
4723
4724	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4725		cfgte = &ioa_cfg->cfg_table->dev[i];
4726		found = 0;
4727
4728		list_for_each_entry_safe(res, temp, &old_res, queue) {
4729			if (!memcmp(&res->cfgte.res_addr,
4730				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4731				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4732				found = 1;
4733				break;
4734			}
4735		}
4736
4737		if (!found) {
4738			if (list_empty(&ioa_cfg->free_res_q)) {
4739				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4740				break;
4741			}
4742
4743			found = 1;
4744			res = list_entry(ioa_cfg->free_res_q.next,
4745					 struct ipr_resource_entry, queue);
4746			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4747			ipr_init_res_entry(res);
4748			res->add_to_ml = 1;
4749		}
4750
4751		if (found)
4752			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4753	}
4754
4755	list_for_each_entry_safe(res, temp, &old_res, queue) {
4756		if (res->sdev) {
4757			res->del_from_ml = 1;
4758			res->sdev->hostdata = NULL;
4759			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4760		} else {
4761			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
4762		}
4763	}
4764
4765	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
4766
4767	LEAVE;
4768	return IPR_RC_JOB_CONTINUE;
4769}
4770
4771/**
4772 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
4773 * @ipr_cmd:	ipr command struct
4774 *
4775 * This function sends a Query IOA Configuration command
4776 * to the adapter to retrieve the IOA configuration table.
4777 *
4778 * Return value:
4779 * 	IPR_RC_JOB_RETURN
4780 **/
4781static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
4782{
4783	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4784	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4785	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4786	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
4787
4788	ENTER;
4789	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
4790		 ucode_vpd->major_release, ucode_vpd->card_type,
4791		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
4792	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4793	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4794
4795	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
4796	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
4797	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
4798
4799	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4800	ioarcb->read_data_transfer_length =
4801		cpu_to_be32(sizeof(struct ipr_config_table));
4802
4803	ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
4804	ioadl->flags_and_data_len =
4805		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
4806
4807	ipr_cmd->job_step = ipr_init_res_table;
4808
4809	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4810
4811	LEAVE;
4812	return IPR_RC_JOB_RETURN;
4813}
4814
4815/**
4816 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
4817 * @ipr_cmd:	ipr command struct
4818 *
4819 * This utility function sends an inquiry to the adapter.
4820 *
4821 * Return value:
4822 * 	none
4823 **/
4824static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
4825			      u32 dma_addr, u8 xfer_len)
4826{
4827	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4828	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4829
4830	ENTER;
4831	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4832	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4833
4834	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
4835	ioarcb->cmd_pkt.cdb[1] = flags;
4836	ioarcb->cmd_pkt.cdb[2] = page;
4837	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4838
4839	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4840	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4841
4842	ioadl->address = cpu_to_be32(dma_addr);
4843	ioadl->flags_and_data_len =
4844		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4845
4846	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4847	LEAVE;
4848}
4849
4850/**
4851 * ipr_inquiry_page_supported - Is the given inquiry page supported
4852 * @page0:		inquiry page 0 buffer
4853 * @page:		page code.
4854 *
4855 * This function determines if the specified inquiry page is supported.
4856 *
4857 * Return value:
4858 *	1 if page is supported / 0 if not
4859 **/
4860static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
4861{
4862	int i;
4863
4864	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
4865		if (page0->page[i] == page)
4866			return 1;
4867
4868	return 0;
4869}
4870
4871/**
4872 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
4873 * @ipr_cmd:	ipr command struct
4874 *
4875 * This function sends a Page 3 inquiry to the adapter
4876 * to retrieve software VPD information.
4877 *
4878 * Return value:
4879 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4880 **/
4881static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
4882{
4883	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4884	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
4885
4886	ENTER;
4887
4888	if (!ipr_inquiry_page_supported(page0, 1))
4889		ioa_cfg->cache_state = CACHE_NONE;
4890
4891	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
4892
4893	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
4894			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
4895			  sizeof(struct ipr_inquiry_page3));
4896
4897	LEAVE;
4898	return IPR_RC_JOB_RETURN;
4899}
4900
4901/**
4902 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
4903 * @ipr_cmd:	ipr command struct
4904 *
4905 * This function sends a Page 0 inquiry to the adapter
4906 * to retrieve supported inquiry pages.
4907 *
4908 * Return value:
4909 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4910 **/
4911static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
4912{
4913	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4914	char type[5];
4915
4916	ENTER;
4917
4918	/* Grab the type out of the VPD and store it away */
4919	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
4920	type[4] = '\0';
4921	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
4922
4923	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
4924
4925	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
4926			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
4927			  sizeof(struct ipr_inquiry_page0));
4928
4929	LEAVE;
4930	return IPR_RC_JOB_RETURN;
4931}
4932
4933/**
4934 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
4935 * @ipr_cmd:	ipr command struct
4936 *
4937 * This function sends a standard inquiry to the adapter.
4938 *
4939 * Return value:
4940 * 	IPR_RC_JOB_RETURN
4941 **/
4942static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
4943{
4944	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4945
4946	ENTER;
4947	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
4948
4949	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
4950			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
4951			  sizeof(struct ipr_ioa_vpd));
4952
4953	LEAVE;
4954	return IPR_RC_JOB_RETURN;
4955}
4956
4957/**
4958 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
4959 * @ipr_cmd:	ipr command struct
4960 *
4961 * This function send an Identify Host Request Response Queue
4962 * command to establish the HRRQ with the adapter.
4963 *
4964 * Return value:
4965 * 	IPR_RC_JOB_RETURN
4966 **/
4967static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
4968{
4969	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4970	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4971
4972	ENTER;
4973	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
4974
4975	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
4976	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4977
4978	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4979	ioarcb->cmd_pkt.cdb[2] =
4980		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
4981	ioarcb->cmd_pkt.cdb[3] =
4982		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
4983	ioarcb->cmd_pkt.cdb[4] =
4984		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
4985	ioarcb->cmd_pkt.cdb[5] =
4986		((u32) ioa_cfg->host_rrq_dma) & 0xff;
4987	ioarcb->cmd_pkt.cdb[7] =
4988		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
4989	ioarcb->cmd_pkt.cdb[8] =
4990		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
4991
4992	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
4993
4994	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4995
4996	LEAVE;
4997	return IPR_RC_JOB_RETURN;
4998}
4999
5000/**
5001 * ipr_reset_timer_done - Adapter reset timer function
5002 * @ipr_cmd:	ipr command struct
5003 *
5004 * Description: This function is used in adapter reset processing
5005 * for timing events. If the reset_cmd pointer in the IOA
5006 * config struct is not this adapter's we are doing nested
5007 * resets and fail_all_ops will take care of freeing the
5008 * command block.
5009 *
5010 * Return value:
5011 * 	none
5012 **/
5013static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5014{
5015	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5016	unsigned long lock_flags = 0;
5017
5018	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5019
5020	if (ioa_cfg->reset_cmd == ipr_cmd) {
5021		list_del(&ipr_cmd->queue);
5022		ipr_cmd->done(ipr_cmd);
5023	}
5024
5025	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5026}
5027
5028/**
5029 * ipr_reset_start_timer - Start a timer for adapter reset job
5030 * @ipr_cmd:	ipr command struct
5031 * @timeout:	timeout value
5032 *
5033 * Description: This function is used in adapter reset processing
5034 * for timing events. If the reset_cmd pointer in the IOA
5035 * config struct is not this adapter's we are doing nested
5036 * resets and fail_all_ops will take care of freeing the
5037 * command block.
5038 *
5039 * Return value:
5040 * 	none
5041 **/
5042static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5043				  unsigned long timeout)
5044{
5045	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5046	ipr_cmd->done = ipr_reset_ioa_job;
5047
5048	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5049	ipr_cmd->timer.expires = jiffies + timeout;
5050	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5051	add_timer(&ipr_cmd->timer);
5052}
5053
5054/**
5055 * ipr_init_ioa_mem - Initialize ioa_cfg control block
5056 * @ioa_cfg:	ioa cfg struct
5057 *
5058 * Return value:
5059 * 	nothing
5060 **/
5061static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5062{
5063	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5064
5065	/* Initialize Host RRQ pointers */
5066	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5067	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5068	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5069	ioa_cfg->toggle_bit = 1;
5070
5071	/* Zero out config table */
5072	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5073}
5074
5075/**
5076 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5077 * @ipr_cmd:	ipr command struct
5078 *
5079 * This function reinitializes some control blocks and
5080 * enables destructive diagnostics on the adapter.
5081 *
5082 * Return value:
5083 * 	IPR_RC_JOB_RETURN
5084 **/
5085static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5086{
5087	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5088	volatile u32 int_reg;
5089
5090	ENTER;
5091	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5092	ipr_init_ioa_mem(ioa_cfg);
5093
5094	ioa_cfg->allow_interrupts = 1;
5095	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5096
5097	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5098		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5099		       ioa_cfg->regs.clr_interrupt_mask_reg);
5100		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5101		return IPR_RC_JOB_CONTINUE;
5102	}
5103
5104	/* Enable destructive diagnostics on IOA */
5105	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
5106
5107	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5108	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5109
5110	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5111
5112	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5113	ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5114	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5115	ipr_cmd->done = ipr_reset_ioa_job;
5116	add_timer(&ipr_cmd->timer);
5117	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5118
5119	LEAVE;
5120	return IPR_RC_JOB_RETURN;
5121}
5122
5123/**
5124 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5125 * @ipr_cmd:	ipr command struct
5126 *
5127 * This function is invoked when an adapter dump has run out
5128 * of processing time.
5129 *
5130 * Return value:
5131 * 	IPR_RC_JOB_CONTINUE
5132 **/
5133static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5134{
5135	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5136
5137	if (ioa_cfg->sdt_state == GET_DUMP)
5138		ioa_cfg->sdt_state = ABORT_DUMP;
5139
5140	ipr_cmd->job_step = ipr_reset_alert;
5141
5142	return IPR_RC_JOB_CONTINUE;
5143}
5144
5145/**
5146 * ipr_unit_check_no_data - Log a unit check/no data error log
5147 * @ioa_cfg:		ioa config struct
5148 *
5149 * Logs an error indicating the adapter unit checked, but for some
5150 * reason, we were unable to fetch the unit check buffer.
5151 *
5152 * Return value:
5153 * 	nothing
5154 **/
5155static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5156{
5157	ioa_cfg->errors_logged++;
5158	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5159}
5160
5161/**
5162 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5163 * @ioa_cfg:		ioa config struct
5164 *
5165 * Fetches the unit check buffer from the adapter by clocking the data
5166 * through the mailbox register.
5167 *
5168 * Return value:
5169 * 	nothing
5170 **/
5171static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5172{
5173	unsigned long mailbox;
5174	struct ipr_hostrcb *hostrcb;
5175	struct ipr_uc_sdt sdt;
5176	int rc, length;
5177
5178	mailbox = readl(ioa_cfg->ioa_mailbox);
5179
5180	if (!ipr_sdt_is_fmt2(mailbox)) {
5181		ipr_unit_check_no_data(ioa_cfg);
5182		return;
5183	}
5184
5185	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5186	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5187					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5188
5189	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5190	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5191		ipr_unit_check_no_data(ioa_cfg);
5192		return;
5193	}
5194
5195	/* Find length of the first sdt entry (UC buffer) */
5196	length = (be32_to_cpu(sdt.entry[0].end_offset) -
5197		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5198
5199	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5200			     struct ipr_hostrcb, queue);
5201	list_del(&hostrcb->queue);
5202	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5203
5204	rc = ipr_get_ldump_data_section(ioa_cfg,
5205					be32_to_cpu(sdt.entry[0].bar_str_offset),
5206					(__be32 *)&hostrcb->hcam,
5207					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5208
5209	if (!rc)
5210		ipr_handle_log_data(ioa_cfg, hostrcb);
5211	else
5212		ipr_unit_check_no_data(ioa_cfg);
5213
5214	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5215}
5216
5217/**
5218 * ipr_reset_restore_cfg_space - Restore PCI config space.
5219 * @ipr_cmd:	ipr command struct
5220 *
5221 * Description: This function restores the saved PCI config space of
5222 * the adapter, fails all outstanding ops back to the callers, and
5223 * fetches the dump/unit check if applicable to this reset.
5224 *
5225 * Return value:
5226 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5227 **/
5228static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5229{
5230	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5231	int rc;
5232
5233	ENTER;
5234	pci_unblock_user_cfg_access(ioa_cfg->pdev);
5235	rc = pci_restore_state(ioa_cfg->pdev);
5236
5237	if (rc != PCIBIOS_SUCCESSFUL) {
5238		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5239		return IPR_RC_JOB_CONTINUE;
5240	}
5241
5242	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5243		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5244		return IPR_RC_JOB_CONTINUE;
5245	}
5246
5247	ipr_fail_all_ops(ioa_cfg);
5248
5249	if (ioa_cfg->ioa_unit_checked) {
5250		ioa_cfg->ioa_unit_checked = 0;
5251		ipr_get_unit_check_buffer(ioa_cfg);
5252		ipr_cmd->job_step = ipr_reset_alert;
5253		ipr_reset_start_timer(ipr_cmd, 0);
5254		return IPR_RC_JOB_RETURN;
5255	}
5256
5257	if (ioa_cfg->in_ioa_bringdown) {
5258		ipr_cmd->job_step = ipr_ioa_bringdown_done;
5259	} else {
5260		ipr_cmd->job_step = ipr_reset_enable_ioa;
5261
5262		if (GET_DUMP == ioa_cfg->sdt_state) {
5263			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5264			ipr_cmd->job_step = ipr_reset_wait_for_dump;
5265			schedule_work(&ioa_cfg->work_q);
5266			return IPR_RC_JOB_RETURN;
5267		}
5268	}
5269
5270	ENTER;
5271	return IPR_RC_JOB_CONTINUE;
5272}
5273
5274/**
5275 * ipr_reset_start_bist - Run BIST on the adapter.
5276 * @ipr_cmd:	ipr command struct
5277 *
5278 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5279 *
5280 * Return value:
5281 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5282 **/
5283static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5284{
5285	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5286	int rc;
5287
5288	ENTER;
5289	pci_block_user_cfg_access(ioa_cfg->pdev);
5290	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5291
5292	if (rc != PCIBIOS_SUCCESSFUL) {
5293		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5294		rc = IPR_RC_JOB_CONTINUE;
5295	} else {
5296		ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5297		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5298		rc = IPR_RC_JOB_RETURN;
5299	}
5300
5301	LEAVE;
5302	return rc;
5303}
5304
5305/**
5306 * ipr_reset_allowed - Query whether or not IOA can be reset
5307 * @ioa_cfg:	ioa config struct
5308 *
5309 * Return value:
5310 * 	0 if reset not allowed / non-zero if reset is allowed
5311 **/
5312static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5313{
5314	volatile u32 temp_reg;
5315
5316	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5317	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5318}
5319
5320/**
5321 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5322 * @ipr_cmd:	ipr command struct
5323 *
5324 * Description: This function waits for adapter permission to run BIST,
5325 * then runs BIST. If the adapter does not give permission after a
5326 * reasonable time, we will reset the adapter anyway. The impact of
5327 * resetting the adapter without warning the adapter is the risk of
5328 * losing the persistent error log on the adapter. If the adapter is
5329 * reset while it is writing to the flash on the adapter, the flash
5330 * segment will have bad ECC and be zeroed.
5331 *
5332 * Return value:
5333 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5334 **/
5335static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5336{
5337	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5338	int rc = IPR_RC_JOB_RETURN;
5339
5340	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5341		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5342		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5343	} else {
5344		ipr_cmd->job_step = ipr_reset_start_bist;
5345		rc = IPR_RC_JOB_CONTINUE;
5346	}
5347
5348	return rc;
5349}
5350
5351/**
5352 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5353 * @ipr_cmd:	ipr command struct
5354 *
5355 * Description: This function alerts the adapter that it will be reset.
5356 * If memory space is not currently enabled, proceed directly
5357 * to running BIST on the adapter. The timer must always be started
5358 * so we guarantee we do not run BIST from ipr_isr.
5359 *
5360 * Return value:
5361 * 	IPR_RC_JOB_RETURN
5362 **/
5363static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5364{
5365	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5366	u16 cmd_reg;
5367	int rc;
5368
5369	ENTER;
5370	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5371
5372	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5373		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5374		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5375		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5376	} else {
5377		ipr_cmd->job_step = ipr_reset_start_bist;
5378	}
5379
5380	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5381	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5382
5383	LEAVE;
5384	return IPR_RC_JOB_RETURN;
5385}
5386
5387/**
5388 * ipr_reset_ucode_download_done - Microcode download completion
5389 * @ipr_cmd:	ipr command struct
5390 *
5391 * Description: This function unmaps the microcode download buffer.
5392 *
5393 * Return value:
5394 * 	IPR_RC_JOB_CONTINUE
5395 **/
5396static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5397{
5398	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5399	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5400
5401	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5402		     sglist->num_sg, DMA_TO_DEVICE);
5403
5404	ipr_cmd->job_step = ipr_reset_alert;
5405	return IPR_RC_JOB_CONTINUE;
5406}
5407
5408/**
5409 * ipr_reset_ucode_download - Download microcode to the adapter
5410 * @ipr_cmd:	ipr command struct
5411 *
5412 * Description: This function checks to see if it there is microcode
5413 * to download to the adapter. If there is, a download is performed.
5414 *
5415 * Return value:
5416 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5417 **/
5418static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5419{
5420	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5421	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5422
5423	ENTER;
5424	ipr_cmd->job_step = ipr_reset_alert;
5425
5426	if (!sglist)
5427		return IPR_RC_JOB_CONTINUE;
5428
5429	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5430	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5431	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5432	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5433	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5434	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5435	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5436
5437	ipr_build_ucode_ioadl(ipr_cmd, sglist);
5438	ipr_cmd->job_step = ipr_reset_ucode_download_done;
5439
5440	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5441		   IPR_WRITE_BUFFER_TIMEOUT);
5442
5443	LEAVE;
5444	return IPR_RC_JOB_RETURN;
5445}
5446
5447/**
5448 * ipr_reset_shutdown_ioa - Shutdown the adapter
5449 * @ipr_cmd:	ipr command struct
5450 *
5451 * Description: This function issues an adapter shutdown of the
5452 * specified type to the specified adapter as part of the
5453 * adapter reset job.
5454 *
5455 * Return value:
5456 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5457 **/
5458static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5459{
5460	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5461	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5462	unsigned long timeout;
5463	int rc = IPR_RC_JOB_CONTINUE;
5464
5465	ENTER;
5466	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5467		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5468		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5469		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5470		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5471
5472		if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5473			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5474		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5475			timeout = IPR_INTERNAL_TIMEOUT;
5476		else
5477			timeout = IPR_SHUTDOWN_TIMEOUT;
5478
5479		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5480
5481		rc = IPR_RC_JOB_RETURN;
5482		ipr_cmd->job_step = ipr_reset_ucode_download;
5483	} else
5484		ipr_cmd->job_step = ipr_reset_alert;
5485
5486	LEAVE;
5487	return rc;
5488}
5489
5490/**
5491 * ipr_reset_ioa_job - Adapter reset job
5492 * @ipr_cmd:	ipr command struct
5493 *
5494 * Description: This function is the job router for the adapter reset job.
5495 *
5496 * Return value:
5497 * 	none
5498 **/
5499static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5500{
5501	u32 rc, ioasc;
5502	unsigned long scratch = ipr_cmd->u.scratch;
5503	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5504
5505	do {
5506		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5507
5508		if (ioa_cfg->reset_cmd != ipr_cmd) {
5509			/*
5510			 * We are doing nested adapter resets and this is
5511			 * not the current reset job.
5512			 */
5513			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5514			return;
5515		}
5516
5517		if (IPR_IOASC_SENSE_KEY(ioasc)) {
5518			dev_err(&ioa_cfg->pdev->dev,
5519				"0x%02X failed with IOASC: 0x%08X\n",
5520				ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
5521
5522			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5523			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5524			return;
5525		}
5526
5527		ipr_reinit_ipr_cmnd(ipr_cmd);
5528		ipr_cmd->u.scratch = scratch;
5529		rc = ipr_cmd->job_step(ipr_cmd);
5530	} while(rc == IPR_RC_JOB_CONTINUE);
5531}
5532
5533/**
5534 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5535 * @ioa_cfg:		ioa config struct
5536 * @job_step:		first job step of reset job
5537 * @shutdown_type:	shutdown type
5538 *
5539 * Description: This function will initiate the reset of the given adapter
5540 * starting at the selected job step.
5541 * If the caller needs to wait on the completion of the reset,
5542 * the caller must sleep on the reset_wait_q.
5543 *
5544 * Return value:
5545 * 	none
5546 **/
5547static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5548				    int (*job_step) (struct ipr_cmnd *),
5549				    enum ipr_shutdown_type shutdown_type)
5550{
5551	struct ipr_cmnd *ipr_cmd;
5552
5553	ioa_cfg->in_reset_reload = 1;
5554	ioa_cfg->allow_cmds = 0;
5555	scsi_block_requests(ioa_cfg->host);
5556
5557	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5558	ioa_cfg->reset_cmd = ipr_cmd;
5559	ipr_cmd->job_step = job_step;
5560	ipr_cmd->u.shutdown_type = shutdown_type;
5561
5562	ipr_reset_ioa_job(ipr_cmd);
5563}
5564
5565/**
5566 * ipr_initiate_ioa_reset - Initiate an adapter reset
5567 * @ioa_cfg:		ioa config struct
5568 * @shutdown_type:	shutdown type
5569 *
5570 * Description: This function will initiate the reset of the given adapter.
5571 * If the caller needs to wait on the completion of the reset,
5572 * the caller must sleep on the reset_wait_q.
5573 *
5574 * Return value:
5575 * 	none
5576 **/
5577static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5578				   enum ipr_shutdown_type shutdown_type)
5579{
5580	if (ioa_cfg->ioa_is_dead)
5581		return;
5582
5583	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5584		ioa_cfg->sdt_state = ABORT_DUMP;
5585
5586	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5587		dev_err(&ioa_cfg->pdev->dev,
5588			"IOA taken offline - error recovery failed\n");
5589
5590		ioa_cfg->reset_retries = 0;
5591		ioa_cfg->ioa_is_dead = 1;
5592
5593		if (ioa_cfg->in_ioa_bringdown) {
5594			ioa_cfg->reset_cmd = NULL;
5595			ioa_cfg->in_reset_reload = 0;
5596			ipr_fail_all_ops(ioa_cfg);
5597			wake_up_all(&ioa_cfg->reset_wait_q);
5598
5599			spin_unlock_irq(ioa_cfg->host->host_lock);
5600			scsi_unblock_requests(ioa_cfg->host);
5601			spin_lock_irq(ioa_cfg->host->host_lock);
5602			return;
5603		} else {
5604			ioa_cfg->in_ioa_bringdown = 1;
5605			shutdown_type = IPR_SHUTDOWN_NONE;
5606		}
5607	}
5608
5609	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5610				shutdown_type);
5611}
5612
5613/**
5614 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5615 * @ioa_cfg:	ioa cfg struct
5616 *
5617 * Description: This is the second phase of adapter intialization
5618 * This function takes care of initilizing the adapter to the point
5619 * where it can accept new commands.
5620
5621 * Return value:
5622 * 	0 on sucess / -EIO on failure
5623 **/
5624static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5625{
5626	int rc = 0;
5627	unsigned long host_lock_flags = 0;
5628
5629	ENTER;
5630	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5631	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5632	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa, IPR_SHUTDOWN_NONE);
5633
5634	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5635	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5636	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5637
5638	if (ioa_cfg->ioa_is_dead) {
5639		rc = -EIO;
5640	} else if (ipr_invalid_adapter(ioa_cfg)) {
5641		if (!ipr_testmode)
5642			rc = -EIO;
5643
5644		dev_err(&ioa_cfg->pdev->dev,
5645			"Adapter not supported in this hardware configuration.\n");
5646	}
5647
5648	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5649
5650	LEAVE;
5651	return rc;
5652}
5653
5654/**
5655 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
5656 * @ioa_cfg:	ioa config struct
5657 *
5658 * Return value:
5659 * 	none
5660 **/
5661static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5662{
5663	int i;
5664
5665	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5666		if (ioa_cfg->ipr_cmnd_list[i])
5667			pci_pool_free(ioa_cfg->ipr_cmd_pool,
5668				      ioa_cfg->ipr_cmnd_list[i],
5669				      ioa_cfg->ipr_cmnd_list_dma[i]);
5670
5671		ioa_cfg->ipr_cmnd_list[i] = NULL;
5672	}
5673
5674	if (ioa_cfg->ipr_cmd_pool)
5675		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
5676
5677	ioa_cfg->ipr_cmd_pool = NULL;
5678}
5679
5680/**
5681 * ipr_free_mem - Frees memory allocated for an adapter
5682 * @ioa_cfg:	ioa cfg struct
5683 *
5684 * Return value:
5685 * 	nothing
5686 **/
5687static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
5688{
5689	int i;
5690
5691	kfree(ioa_cfg->res_entries);
5692	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
5693			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5694	ipr_free_cmd_blks(ioa_cfg);
5695	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5696			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5697	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
5698			    ioa_cfg->cfg_table,
5699			    ioa_cfg->cfg_table_dma);
5700
5701	for (i = 0; i < IPR_NUM_HCAMS; i++) {
5702		pci_free_consistent(ioa_cfg->pdev,
5703				    sizeof(struct ipr_hostrcb),
5704				    ioa_cfg->hostrcb[i],
5705				    ioa_cfg->hostrcb_dma[i]);
5706	}
5707
5708	ipr_free_dump(ioa_cfg);
5709	kfree(ioa_cfg->saved_mode_pages);
5710	kfree(ioa_cfg->trace);
5711}
5712
5713/**
5714 * ipr_free_all_resources - Free all allocated resources for an adapter.
5715 * @ipr_cmd:	ipr command struct
5716 *
5717 * This function frees all allocated resources for the
5718 * specified adapter.
5719 *
5720 * Return value:
5721 * 	none
5722 **/
5723static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
5724{
5725	struct pci_dev *pdev = ioa_cfg->pdev;
5726
5727	ENTER;
5728	free_irq(pdev->irq, ioa_cfg);
5729	iounmap(ioa_cfg->hdw_dma_regs);
5730	pci_release_regions(pdev);
5731	ipr_free_mem(ioa_cfg);
5732	scsi_host_put(ioa_cfg->host);
5733	pci_disable_device(pdev);
5734	LEAVE;
5735}
5736
5737/**
5738 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
5739 * @ioa_cfg:	ioa config struct
5740 *
5741 * Return value:
5742 * 	0 on success / -ENOMEM on allocation failure
5743 **/
5744static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
5745{
5746	struct ipr_cmnd *ipr_cmd;
5747	struct ipr_ioarcb *ioarcb;
5748	dma_addr_t dma_addr;
5749	int i;
5750
5751	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
5752						 sizeof(struct ipr_cmnd), 8, 0);
5753
5754	if (!ioa_cfg->ipr_cmd_pool)
5755		return -ENOMEM;
5756
5757	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
5758		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
5759
5760		if (!ipr_cmd) {
5761			ipr_free_cmd_blks(ioa_cfg);
5762			return -ENOMEM;
5763		}
5764
5765		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
5766		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
5767		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
5768
5769		ioarcb = &ipr_cmd->ioarcb;
5770		ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
5771		ioarcb->host_response_handle = cpu_to_be32(i << 2);
5772		ioarcb->write_ioadl_addr =
5773			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
5774		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5775		ioarcb->ioasa_host_pci_addr =
5776			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
5777		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
5778		ipr_cmd->cmd_index = i;
5779		ipr_cmd->ioa_cfg = ioa_cfg;
5780		ipr_cmd->sense_buffer_dma = dma_addr +
5781			offsetof(struct ipr_cmnd, sense_buffer);
5782
5783		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5784	}
5785
5786	return 0;
5787}
5788
5789/**
5790 * ipr_alloc_mem - Allocate memory for an adapter
5791 * @ioa_cfg:	ioa config struct
5792 *
5793 * Return value:
5794 * 	0 on success / non-zero for error
5795 **/
5796static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
5797{
5798	struct pci_dev *pdev = ioa_cfg->pdev;
5799	int i, rc = -ENOMEM;
5800
5801	ENTER;
5802	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
5803				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
5804
5805	if (!ioa_cfg->res_entries)
5806		goto out;
5807
5808	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
5809		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
5810
5811	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
5812						sizeof(struct ipr_misc_cbs),
5813						&ioa_cfg->vpd_cbs_dma);
5814
5815	if (!ioa_cfg->vpd_cbs)
5816		goto out_free_res_entries;
5817
5818	if (ipr_alloc_cmd_blks(ioa_cfg))
5819		goto out_free_vpd_cbs;
5820
5821	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
5822						 sizeof(u32) * IPR_NUM_CMD_BLKS,
5823						 &ioa_cfg->host_rrq_dma);
5824
5825	if (!ioa_cfg->host_rrq)
5826		goto out_ipr_free_cmd_blocks;
5827
5828	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
5829						  sizeof(struct ipr_config_table),
5830						  &ioa_cfg->cfg_table_dma);
5831
5832	if (!ioa_cfg->cfg_table)
5833		goto out_free_host_rrq;
5834
5835	for (i = 0; i < IPR_NUM_HCAMS; i++) {
5836		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
5837							   sizeof(struct ipr_hostrcb),
5838							   &ioa_cfg->hostrcb_dma[i]);
5839
5840		if (!ioa_cfg->hostrcb[i])
5841			goto out_free_hostrcb_dma;
5842
5843		ioa_cfg->hostrcb[i]->hostrcb_dma =
5844			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
5845		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
5846	}
5847
5848	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
5849				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
5850
5851	if (!ioa_cfg->trace)
5852		goto out_free_hostrcb_dma;
5853
5854	rc = 0;
5855out:
5856	LEAVE;
5857	return rc;
5858
5859out_free_hostrcb_dma:
5860	while (i-- > 0) {
5861		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
5862				    ioa_cfg->hostrcb[i],
5863				    ioa_cfg->hostrcb_dma[i]);
5864	}
5865	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
5866			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
5867out_free_host_rrq:
5868	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
5869			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
5870out_ipr_free_cmd_blocks:
5871	ipr_free_cmd_blks(ioa_cfg);
5872out_free_vpd_cbs:
5873	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
5874			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
5875out_free_res_entries:
5876	kfree(ioa_cfg->res_entries);
5877	goto out;
5878}
5879
5880/**
5881 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
5882 * @ioa_cfg:	ioa config struct
5883 *
5884 * Return value:
5885 * 	none
5886 **/
5887static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
5888{
5889	int i;
5890
5891	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
5892		ioa_cfg->bus_attr[i].bus = i;
5893		ioa_cfg->bus_attr[i].qas_enabled = 0;
5894		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
5895		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
5896			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
5897		else
5898			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
5899	}
5900}
5901
5902/**
5903 * ipr_init_ioa_cfg - Initialize IOA config struct
5904 * @ioa_cfg:	ioa config struct
5905 * @host:		scsi host struct
5906 * @pdev:		PCI dev struct
5907 *
5908 * Return value:
5909 * 	none
5910 **/
5911static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
5912				       struct Scsi_Host *host, struct pci_dev *pdev)
5913{
5914	const struct ipr_interrupt_offsets *p;
5915	struct ipr_interrupts *t;
5916	void __iomem *base;
5917
5918	ioa_cfg->host = host;
5919	ioa_cfg->pdev = pdev;
5920	ioa_cfg->log_level = ipr_log_level;
5921	ioa_cfg->doorbell = IPR_DOORBELL;
5922	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
5923	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
5924	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
5925	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
5926	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
5927	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
5928	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
5929	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
5930
5931	INIT_LIST_HEAD(&ioa_cfg->free_q);
5932	INIT_LIST_HEAD(&ioa_cfg->pending_q);
5933	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
5934	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
5935	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
5936	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
5937	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
5938	init_waitqueue_head(&ioa_cfg->reset_wait_q);
5939	ioa_cfg->sdt_state = INACTIVE;
5940	if (ipr_enable_cache)
5941		ioa_cfg->cache_state = CACHE_ENABLED;
5942	else
5943		ioa_cfg->cache_state = CACHE_DISABLED;
5944
5945	ipr_initialize_bus_attr(ioa_cfg);
5946
5947	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
5948	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
5949	host->max_channel = IPR_MAX_BUS_TO_SCAN;
5950	host->unique_id = host->host_no;
5951	host->max_cmd_len = IPR_MAX_CDB_LEN;
5952	pci_set_drvdata(pdev, ioa_cfg);
5953
5954	p = &ioa_cfg->chip_cfg->regs;
5955	t = &ioa_cfg->regs;
5956	base = ioa_cfg->hdw_dma_regs;
5957
5958	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
5959	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
5960	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
5961	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
5962	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
5963	t->ioarrin_reg = base + p->ioarrin_reg;
5964	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
5965	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
5966	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
5967}
5968
5969/**
5970 * ipr_get_chip_cfg - Find adapter chip configuration
5971 * @dev_id:		PCI device id struct
5972 *
5973 * Return value:
5974 * 	ptr to chip config on success / NULL on failure
5975 **/
5976static const struct ipr_chip_cfg_t * __devinit
5977ipr_get_chip_cfg(const struct pci_device_id *dev_id)
5978{
5979	int i;
5980
5981	if (dev_id->driver_data)
5982		return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
5983
5984	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
5985		if (ipr_chip[i].vendor == dev_id->vendor &&
5986		    ipr_chip[i].device == dev_id->device)
5987			return ipr_chip[i].cfg;
5988	return NULL;
5989}
5990
5991/**
5992 * ipr_probe_ioa - Allocates memory and does first stage of initialization
5993 * @pdev:		PCI device struct
5994 * @dev_id:		PCI device id struct
5995 *
5996 * Return value:
5997 * 	0 on success / non-zero on failure
5998 **/
5999static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6000				   const struct pci_device_id *dev_id)
6001{
6002	struct ipr_ioa_cfg *ioa_cfg;
6003	struct Scsi_Host *host;
6004	unsigned long ipr_regs_pci;
6005	void __iomem *ipr_regs;
6006	u32 rc = PCIBIOS_SUCCESSFUL;
6007
6008	ENTER;
6009
6010	if ((rc = pci_enable_device(pdev))) {
6011		dev_err(&pdev->dev, "Cannot enable adapter\n");
6012		goto out;
6013	}
6014
6015	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6016
6017	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6018
6019	if (!host) {
6020		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6021		rc = -ENOMEM;
6022		goto out_disable;
6023	}
6024
6025	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6026	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
6027
6028	ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6029
6030	if (!ioa_cfg->chip_cfg) {
6031		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
6032			dev_id->vendor, dev_id->device);
6033		goto out_scsi_host_put;
6034	}
6035
6036	ipr_regs_pci = pci_resource_start(pdev, 0);
6037
6038	rc = pci_request_regions(pdev, IPR_NAME);
6039	if (rc < 0) {
6040		dev_err(&pdev->dev,
6041			"Couldn't register memory range of registers\n");
6042		goto out_scsi_host_put;
6043	}
6044
6045	ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
6046
6047	if (!ipr_regs) {
6048		dev_err(&pdev->dev,
6049			"Couldn't map memory range of registers\n");
6050		rc = -ENOMEM;
6051		goto out_release_regions;
6052	}
6053
6054	ioa_cfg->hdw_dma_regs = ipr_regs;
6055	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
6056	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
6057
6058	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
6059
6060	pci_set_master(pdev);
6061
6062	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6063	if (rc < 0) {
6064		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6065		goto cleanup_nomem;
6066	}
6067
6068	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6069				   ioa_cfg->chip_cfg->cache_line_size);
6070
6071	if (rc != PCIBIOS_SUCCESSFUL) {
6072		dev_err(&pdev->dev, "Write of cache line size failed\n");
6073		rc = -EIO;
6074		goto cleanup_nomem;
6075	}
6076
6077	/* Save away PCI config space for use following IOA reset */
6078	rc = pci_save_state(pdev);
6079
6080	if (rc != PCIBIOS_SUCCESSFUL) {
6081		dev_err(&pdev->dev, "Failed to save PCI config space\n");
6082		rc = -EIO;
6083		goto cleanup_nomem;
6084	}
6085
6086	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6087		goto cleanup_nomem;
6088
6089	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6090		goto cleanup_nomem;
6091
6092	rc = ipr_alloc_mem(ioa_cfg);
6093	if (rc < 0) {
6094		dev_err(&pdev->dev,
6095			"Couldn't allocate enough memory for device driver!\n");
6096		goto cleanup_nomem;
6097	}
6098
6099	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6100	rc = request_irq(pdev->irq, ipr_isr, SA_SHIRQ, IPR_NAME, ioa_cfg);
6101
6102	if (rc) {
6103		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6104			pdev->irq, rc);
6105		goto cleanup_nolog;
6106	}
6107
6108	spin_lock(&ipr_driver_lock);
6109	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6110	spin_unlock(&ipr_driver_lock);
6111
6112	LEAVE;
6113out:
6114	return rc;
6115
6116cleanup_nolog:
6117	ipr_free_mem(ioa_cfg);
6118cleanup_nomem:
6119	iounmap(ipr_regs);
6120out_release_regions:
6121	pci_release_regions(pdev);
6122out_scsi_host_put:
6123	scsi_host_put(host);
6124out_disable:
6125	pci_disable_device(pdev);
6126	goto out;
6127}
6128
6129/**
6130 * ipr_scan_vsets - Scans for VSET devices
6131 * @ioa_cfg:	ioa config struct
6132 *
6133 * Description: Since the VSET resources do not follow SAM in that we can have
6134 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6135 *
6136 * Return value:
6137 * 	none
6138 **/
6139static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6140{
6141	int target, lun;
6142
6143	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6144		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6145			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6146}
6147
6148/**
6149 * ipr_initiate_ioa_bringdown - Bring down an adapter
6150 * @ioa_cfg:		ioa config struct
6151 * @shutdown_type:	shutdown type
6152 *
6153 * Description: This function will initiate bringing down the adapter.
6154 * This consists of issuing an IOA shutdown to the adapter
6155 * to flush the cache, and running BIST.
6156 * If the caller needs to wait on the completion of the reset,
6157 * the caller must sleep on the reset_wait_q.
6158 *
6159 * Return value:
6160 * 	none
6161 **/
6162static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6163				       enum ipr_shutdown_type shutdown_type)
6164{
6165	ENTER;
6166	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6167		ioa_cfg->sdt_state = ABORT_DUMP;
6168	ioa_cfg->reset_retries = 0;
6169	ioa_cfg->in_ioa_bringdown = 1;
6170	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6171	LEAVE;
6172}
6173
6174/**
6175 * __ipr_remove - Remove a single adapter
6176 * @pdev:	pci device struct
6177 *
6178 * Adapter hot plug remove entry point.
6179 *
6180 * Return value:
6181 * 	none
6182 **/
6183static void __ipr_remove(struct pci_dev *pdev)
6184{
6185	unsigned long host_lock_flags = 0;
6186	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6187	ENTER;
6188
6189	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6190	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6191
6192	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6193	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6194	flush_scheduled_work();
6195	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6196
6197	spin_lock(&ipr_driver_lock);
6198	list_del(&ioa_cfg->queue);
6199	spin_unlock(&ipr_driver_lock);
6200
6201	if (ioa_cfg->sdt_state == ABORT_DUMP)
6202		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6203	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6204
6205	ipr_free_all_resources(ioa_cfg);
6206
6207	LEAVE;
6208}
6209
6210/**
6211 * ipr_remove - IOA hot plug remove entry point
6212 * @pdev:	pci device struct
6213 *
6214 * Adapter hot plug remove entry point.
6215 *
6216 * Return value:
6217 * 	none
6218 **/
6219static void ipr_remove(struct pci_dev *pdev)
6220{
6221	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6222
6223	ENTER;
6224
6225	ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6226			      &ipr_trace_attr);
6227	ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6228			     &ipr_dump_attr);
6229	scsi_remove_host(ioa_cfg->host);
6230
6231	__ipr_remove(pdev);
6232
6233	LEAVE;
6234}
6235
6236/**
6237 * ipr_probe - Adapter hot plug add entry point
6238 *
6239 * Return value:
6240 * 	0 on success / non-zero on failure
6241 **/
6242static int __devinit ipr_probe(struct pci_dev *pdev,
6243			       const struct pci_device_id *dev_id)
6244{
6245	struct ipr_ioa_cfg *ioa_cfg;
6246	int rc;
6247
6248	rc = ipr_probe_ioa(pdev, dev_id);
6249
6250	if (rc)
6251		return rc;
6252
6253	ioa_cfg = pci_get_drvdata(pdev);
6254	rc = ipr_probe_ioa_part2(ioa_cfg);
6255
6256	if (rc) {
6257		__ipr_remove(pdev);
6258		return rc;
6259	}
6260
6261	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6262
6263	if (rc) {
6264		__ipr_remove(pdev);
6265		return rc;
6266	}
6267
6268	rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6269				   &ipr_trace_attr);
6270
6271	if (rc) {
6272		scsi_remove_host(ioa_cfg->host);
6273		__ipr_remove(pdev);
6274		return rc;
6275	}
6276
6277	rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6278				   &ipr_dump_attr);
6279
6280	if (rc) {
6281		ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6282				      &ipr_trace_attr);
6283		scsi_remove_host(ioa_cfg->host);
6284		__ipr_remove(pdev);
6285		return rc;
6286	}
6287
6288	scsi_scan_host(ioa_cfg->host);
6289	ipr_scan_vsets(ioa_cfg);
6290	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6291	ioa_cfg->allow_ml_add_del = 1;
6292	ioa_cfg->host->max_channel = IPR_VSET_BUS;
6293	schedule_work(&ioa_cfg->work_q);
6294	return 0;
6295}
6296
6297/**
6298 * ipr_shutdown - Shutdown handler.
6299 * @pdev:	pci device struct
6300 *
6301 * This function is invoked upon system shutdown/reboot. It will issue
6302 * an adapter shutdown to the adapter to flush the write cache.
6303 *
6304 * Return value:
6305 * 	none
6306 **/
6307static void ipr_shutdown(struct pci_dev *pdev)
6308{
6309	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6310	unsigned long lock_flags = 0;
6311
6312	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6313	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6314	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6315	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6316}
6317
6318static struct pci_device_id ipr_pci_table[] __devinitdata = {
6319	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6320		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6321		0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6322	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6323		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6324	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6325	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6326		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6327	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6328	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6329		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6330	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6331	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6332		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6333	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6334	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6335		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6336	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6337	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6338		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6339	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6340	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6341		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6342		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6343	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6344		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6345		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6346	{ }
6347};
6348MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6349
6350static struct pci_driver ipr_driver = {
6351	.name = IPR_NAME,
6352	.id_table = ipr_pci_table,
6353	.probe = ipr_probe,
6354	.remove = ipr_remove,
6355	.shutdown = ipr_shutdown,
6356};
6357
6358/**
6359 * ipr_init - Module entry point
6360 *
6361 * Return value:
6362 * 	0 on success / negative value on failure
6363 **/
6364static int __init ipr_init(void)
6365{
6366	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6367		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6368
6369	return pci_module_init(&ipr_driver);
6370}
6371
6372/**
6373 * ipr_exit - Module unload
6374 *
6375 * Module unload entry point.
6376 *
6377 * Return value:
6378 * 	none
6379 **/
6380static void __exit ipr_exit(void)
6381{
6382	pci_unregister_driver(&ipr_driver);
6383}
6384
6385module_init(ipr_init);
6386module_exit(ipr_exit);
6387