ipr.c revision 117d2ce1cea25fc94302ff418ccef644cd3e59af
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 *              Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 *	- Ultra 320 SCSI controller
38 *	- PCI-X host interface
39 *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 *	- Non-Volatile Write Cache
41 *	- Supports attachment of non-RAID disks, tape, and optical devices
42 *	- RAID Levels 0, 5, 10
43 *	- Hot spare
44 *	- Background Parity Checking
45 *	- Background Data Scrubbing
46 *	- Ability to increase the capacity of an existing RAID 5 disk array
47 *		by adding disks
48 *
49 * Driver Features:
50 *	- Tagged command queuing
51 *	- Adapter microcode download
52 *	- PCI hot plug
53 *	- SCSI device hot plug
54 *
55 */
56
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/ioport.h>
63#include <linux/delay.h>
64#include <linux/pci.h>
65#include <linux/wait.h>
66#include <linux/spinlock.h>
67#include <linux/sched.h>
68#include <linux/interrupt.h>
69#include <linux/blkdev.h>
70#include <linux/firmware.h>
71#include <linux/module.h>
72#include <linux/moduleparam.h>
73#include <asm/io.h>
74#include <asm/irq.h>
75#include <asm/processor.h>
76#include <scsi/scsi.h>
77#include <scsi/scsi_host.h>
78#include <scsi/scsi_tcq.h>
79#include <scsi/scsi_eh.h>
80#include <scsi/scsi_cmnd.h>
81#include "ipr.h"
82
83/*
84 *   Global Data
85 */
86static struct list_head ipr_ioa_head = LIST_HEAD_INIT(ipr_ioa_head);
87static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
88static unsigned int ipr_max_speed = 1;
89static int ipr_testmode = 0;
90static unsigned int ipr_fastfail = 0;
91static unsigned int ipr_transop_timeout = IPR_OPERATIONAL_TIMEOUT;
92static unsigned int ipr_enable_cache = 1;
93static unsigned int ipr_debug = 0;
94static int ipr_auto_create = 1;
95static DEFINE_SPINLOCK(ipr_driver_lock);
96
97/* This table describes the differences between DMA controller chips */
98static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
99	{ /* Gemstone, Citrine, and Obsidian */
100		.mailbox = 0x0042C,
101		.cache_line_size = 0x20,
102		{
103			.set_interrupt_mask_reg = 0x0022C,
104			.clr_interrupt_mask_reg = 0x00230,
105			.sense_interrupt_mask_reg = 0x0022C,
106			.clr_interrupt_reg = 0x00228,
107			.sense_interrupt_reg = 0x00224,
108			.ioarrin_reg = 0x00404,
109			.sense_uproc_interrupt_reg = 0x00214,
110			.set_uproc_interrupt_reg = 0x00214,
111			.clr_uproc_interrupt_reg = 0x00218
112		}
113	},
114	{ /* Snipe and Scamp */
115		.mailbox = 0x0052C,
116		.cache_line_size = 0x20,
117		{
118			.set_interrupt_mask_reg = 0x00288,
119			.clr_interrupt_mask_reg = 0x0028C,
120			.sense_interrupt_mask_reg = 0x00288,
121			.clr_interrupt_reg = 0x00284,
122			.sense_interrupt_reg = 0x00280,
123			.ioarrin_reg = 0x00504,
124			.sense_uproc_interrupt_reg = 0x00290,
125			.set_uproc_interrupt_reg = 0x00290,
126			.clr_uproc_interrupt_reg = 0x00294
127		}
128	},
129};
130
131static const struct ipr_chip_t ipr_chip[] = {
132	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, &ipr_chip_cfg[0] },
133	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, &ipr_chip_cfg[0] },
134	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, &ipr_chip_cfg[0] },
135	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, &ipr_chip_cfg[0] },
136	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, &ipr_chip_cfg[1] },
137	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, &ipr_chip_cfg[1] }
138};
139
140static int ipr_max_bus_speeds [] = {
141	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
142};
143
144MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
145MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
146module_param_named(max_speed, ipr_max_speed, uint, 0);
147MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
148module_param_named(log_level, ipr_log_level, uint, 0);
149MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
150module_param_named(testmode, ipr_testmode, int, 0);
151MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
152module_param_named(fastfail, ipr_fastfail, int, 0);
153MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
154module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
155MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
156module_param_named(enable_cache, ipr_enable_cache, int, 0);
157MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
158module_param_named(debug, ipr_debug, int, 0);
159MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
160module_param_named(auto_create, ipr_auto_create, int, 0);
161MODULE_PARM_DESC(auto_create, "Auto-create single device RAID 0 arrays when initialized (default: 1)");
162MODULE_LICENSE("GPL");
163MODULE_VERSION(IPR_DRIVER_VERSION);
164
165/*  A constant array of IOASCs/URCs/Error Messages */
166static const
167struct ipr_error_table_t ipr_error_table[] = {
168	{0x00000000, 1, 1,
169	"8155: An unknown error was received"},
170	{0x00330000, 0, 0,
171	"Soft underlength error"},
172	{0x005A0000, 0, 0,
173	"Command to be cancelled not found"},
174	{0x00808000, 0, 0,
175	"Qualified success"},
176	{0x01080000, 1, 1,
177	"FFFE: Soft device bus error recovered by the IOA"},
178	{0x01088100, 0, 1,
179	"4101: Soft device bus fabric error"},
180	{0x01170600, 0, 1,
181	"FFF9: Device sector reassign successful"},
182	{0x01170900, 0, 1,
183	"FFF7: Media error recovered by device rewrite procedures"},
184	{0x01180200, 0, 1,
185	"7001: IOA sector reassignment successful"},
186	{0x01180500, 0, 1,
187	"FFF9: Soft media error. Sector reassignment recommended"},
188	{0x01180600, 0, 1,
189	"FFF7: Media error recovered by IOA rewrite procedures"},
190	{0x01418000, 0, 1,
191	"FF3D: Soft PCI bus error recovered by the IOA"},
192	{0x01440000, 1, 1,
193	"FFF6: Device hardware error recovered by the IOA"},
194	{0x01448100, 0, 1,
195	"FFF6: Device hardware error recovered by the device"},
196	{0x01448200, 1, 1,
197	"FF3D: Soft IOA error recovered by the IOA"},
198	{0x01448300, 0, 1,
199	"FFFA: Undefined device response recovered by the IOA"},
200	{0x014A0000, 1, 1,
201	"FFF6: Device bus error, message or command phase"},
202	{0x015D0000, 0, 1,
203	"FFF6: Failure prediction threshold exceeded"},
204	{0x015D9200, 0, 1,
205	"8009: Impending cache battery pack failure"},
206	{0x02040400, 0, 0,
207	"34FF: Disk device format in progress"},
208	{0x023F0000, 0, 0,
209	"Synchronization required"},
210	{0x024E0000, 0, 0,
211	"No ready, IOA shutdown"},
212	{0x025A0000, 0, 0,
213	"Not ready, IOA has been shutdown"},
214	{0x02670100, 0, 1,
215	"3020: Storage subsystem configuration error"},
216	{0x03110B00, 0, 0,
217	"FFF5: Medium error, data unreadable, recommend reassign"},
218	{0x03110C00, 0, 0,
219	"7000: Medium error, data unreadable, do not reassign"},
220	{0x03310000, 0, 1,
221	"FFF3: Disk media format bad"},
222	{0x04050000, 0, 1,
223	"3002: Addressed device failed to respond to selection"},
224	{0x04080000, 1, 1,
225	"3100: Device bus error"},
226	{0x04080100, 0, 1,
227	"3109: IOA timed out a device command"},
228	{0x04088000, 0, 0,
229	"3120: SCSI bus is not operational"},
230	{0x04088100, 0, 1,
231	"4100: Hard device bus fabric error"},
232	{0x04118000, 0, 1,
233	"9000: IOA reserved area data check"},
234	{0x04118100, 0, 1,
235	"9001: IOA reserved area invalid data pattern"},
236	{0x04118200, 0, 1,
237	"9002: IOA reserved area LRC error"},
238	{0x04320000, 0, 1,
239	"102E: Out of alternate sectors for disk storage"},
240	{0x04330000, 1, 1,
241	"FFF4: Data transfer underlength error"},
242	{0x04338000, 1, 1,
243	"FFF4: Data transfer overlength error"},
244	{0x043E0100, 0, 1,
245	"3400: Logical unit failure"},
246	{0x04408500, 0, 1,
247	"FFF4: Device microcode is corrupt"},
248	{0x04418000, 1, 1,
249	"8150: PCI bus error"},
250	{0x04430000, 1, 0,
251	"Unsupported device bus message received"},
252	{0x04440000, 1, 1,
253	"FFF4: Disk device problem"},
254	{0x04448200, 1, 1,
255	"8150: Permanent IOA failure"},
256	{0x04448300, 0, 1,
257	"3010: Disk device returned wrong response to IOA"},
258	{0x04448400, 0, 1,
259	"8151: IOA microcode error"},
260	{0x04448500, 0, 0,
261	"Device bus status error"},
262	{0x04448600, 0, 1,
263	"8157: IOA error requiring IOA reset to recover"},
264	{0x04490000, 0, 0,
265	"Message reject received from the device"},
266	{0x04449200, 0, 1,
267	"8008: A permanent cache battery pack failure occurred"},
268	{0x0444A000, 0, 1,
269	"9090: Disk unit has been modified after the last known status"},
270	{0x0444A200, 0, 1,
271	"9081: IOA detected device error"},
272	{0x0444A300, 0, 1,
273	"9082: IOA detected device error"},
274	{0x044A0000, 1, 1,
275	"3110: Device bus error, message or command phase"},
276	{0x04670400, 0, 1,
277	"9091: Incorrect hardware configuration change has been detected"},
278	{0x04678000, 0, 1,
279	"9073: Invalid multi-adapter configuration"},
280	{0x04678100, 0, 1,
281	"4010: Incorrect connection between cascaded expanders"},
282	{0x04678200, 0, 1,
283	"4020: Connections exceed IOA design limits"},
284	{0x04678300, 0, 1,
285	"4030: Incorrect multipath connection"},
286	{0x04679000, 0, 1,
287	"4110: Unsupported enclosure function"},
288	{0x046E0000, 0, 1,
289	"FFF4: Command to logical unit failed"},
290	{0x05240000, 1, 0,
291	"Illegal request, invalid request type or request packet"},
292	{0x05250000, 0, 0,
293	"Illegal request, invalid resource handle"},
294	{0x05258000, 0, 0,
295	"Illegal request, commands not allowed to this device"},
296	{0x05258100, 0, 0,
297	"Illegal request, command not allowed to a secondary adapter"},
298	{0x05260000, 0, 0,
299	"Illegal request, invalid field in parameter list"},
300	{0x05260100, 0, 0,
301	"Illegal request, parameter not supported"},
302	{0x05260200, 0, 0,
303	"Illegal request, parameter value invalid"},
304	{0x052C0000, 0, 0,
305	"Illegal request, command sequence error"},
306	{0x052C8000, 1, 0,
307	"Illegal request, dual adapter support not enabled"},
308	{0x06040500, 0, 1,
309	"9031: Array protection temporarily suspended, protection resuming"},
310	{0x06040600, 0, 1,
311	"9040: Array protection temporarily suspended, protection resuming"},
312	{0x06288000, 0, 1,
313	"3140: Device bus not ready to ready transition"},
314	{0x06290000, 0, 1,
315	"FFFB: SCSI bus was reset"},
316	{0x06290500, 0, 0,
317	"FFFE: SCSI bus transition to single ended"},
318	{0x06290600, 0, 0,
319	"FFFE: SCSI bus transition to LVD"},
320	{0x06298000, 0, 1,
321	"FFFB: SCSI bus was reset by another initiator"},
322	{0x063F0300, 0, 1,
323	"3029: A device replacement has occurred"},
324	{0x064C8000, 0, 1,
325	"9051: IOA cache data exists for a missing or failed device"},
326	{0x064C8100, 0, 1,
327	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
328	{0x06670100, 0, 1,
329	"9025: Disk unit is not supported at its physical location"},
330	{0x06670600, 0, 1,
331	"3020: IOA detected a SCSI bus configuration error"},
332	{0x06678000, 0, 1,
333	"3150: SCSI bus configuration error"},
334	{0x06678100, 0, 1,
335	"9074: Asymmetric advanced function disk configuration"},
336	{0x06678300, 0, 1,
337	"4040: Incomplete multipath connection between IOA and enclosure"},
338	{0x06678400, 0, 1,
339	"4041: Incomplete multipath connection between enclosure and device"},
340	{0x06678500, 0, 1,
341	"9075: Incomplete multipath connection between IOA and remote IOA"},
342	{0x06678600, 0, 1,
343	"9076: Configuration error, missing remote IOA"},
344	{0x06679100, 0, 1,
345	"4050: Enclosure does not support a required multipath function"},
346	{0x06690200, 0, 1,
347	"9041: Array protection temporarily suspended"},
348	{0x06698200, 0, 1,
349	"9042: Corrupt array parity detected on specified device"},
350	{0x066B0200, 0, 1,
351	"9030: Array no longer protected due to missing or failed disk unit"},
352	{0x066B8000, 0, 1,
353	"9071: Link operational transition"},
354	{0x066B8100, 0, 1,
355	"9072: Link not operational transition"},
356	{0x066B8200, 0, 1,
357	"9032: Array exposed but still protected"},
358	{0x066B9100, 0, 1,
359	"4061: Multipath redundancy level got better"},
360	{0x066B9200, 0, 1,
361	"4060: Multipath redundancy level got worse"},
362	{0x07270000, 0, 0,
363	"Failure due to other device"},
364	{0x07278000, 0, 1,
365	"9008: IOA does not support functions expected by devices"},
366	{0x07278100, 0, 1,
367	"9010: Cache data associated with attached devices cannot be found"},
368	{0x07278200, 0, 1,
369	"9011: Cache data belongs to devices other than those attached"},
370	{0x07278400, 0, 1,
371	"9020: Array missing 2 or more devices with only 1 device present"},
372	{0x07278500, 0, 1,
373	"9021: Array missing 2 or more devices with 2 or more devices present"},
374	{0x07278600, 0, 1,
375	"9022: Exposed array is missing a required device"},
376	{0x07278700, 0, 1,
377	"9023: Array member(s) not at required physical locations"},
378	{0x07278800, 0, 1,
379	"9024: Array not functional due to present hardware configuration"},
380	{0x07278900, 0, 1,
381	"9026: Array not functional due to present hardware configuration"},
382	{0x07278A00, 0, 1,
383	"9027: Array is missing a device and parity is out of sync"},
384	{0x07278B00, 0, 1,
385	"9028: Maximum number of arrays already exist"},
386	{0x07278C00, 0, 1,
387	"9050: Required cache data cannot be located for a disk unit"},
388	{0x07278D00, 0, 1,
389	"9052: Cache data exists for a device that has been modified"},
390	{0x07278F00, 0, 1,
391	"9054: IOA resources not available due to previous problems"},
392	{0x07279100, 0, 1,
393	"9092: Disk unit requires initialization before use"},
394	{0x07279200, 0, 1,
395	"9029: Incorrect hardware configuration change has been detected"},
396	{0x07279600, 0, 1,
397	"9060: One or more disk pairs are missing from an array"},
398	{0x07279700, 0, 1,
399	"9061: One or more disks are missing from an array"},
400	{0x07279800, 0, 1,
401	"9062: One or more disks are missing from an array"},
402	{0x07279900, 0, 1,
403	"9063: Maximum number of functional arrays has been exceeded"},
404	{0x0B260000, 0, 0,
405	"Aborted command, invalid descriptor"},
406	{0x0B5A0000, 0, 0,
407	"Command terminated by host"}
408};
409
410static const struct ipr_ses_table_entry ipr_ses_table[] = {
411	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
412	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
413	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
414	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
415	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
416	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
417	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
418	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
419	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
420	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
421	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
422	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
423	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
424};
425
426/*
427 *  Function Prototypes
428 */
429static int ipr_reset_alert(struct ipr_cmnd *);
430static void ipr_process_ccn(struct ipr_cmnd *);
431static void ipr_process_error(struct ipr_cmnd *);
432static void ipr_reset_ioa_job(struct ipr_cmnd *);
433static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
434				   enum ipr_shutdown_type);
435
436#ifdef CONFIG_SCSI_IPR_TRACE
437/**
438 * ipr_trc_hook - Add a trace entry to the driver trace
439 * @ipr_cmd:	ipr command struct
440 * @type:		trace type
441 * @add_data:	additional data
442 *
443 * Return value:
444 * 	none
445 **/
446static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
447			 u8 type, u32 add_data)
448{
449	struct ipr_trace_entry *trace_entry;
450	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
451
452	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
453	trace_entry->time = jiffies;
454	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
455	trace_entry->type = type;
456	trace_entry->cmd_index = ipr_cmd->cmd_index;
457	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
458	trace_entry->u.add_data = add_data;
459}
460#else
461#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
462#endif
463
464/**
465 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
466 * @ipr_cmd:	ipr command struct
467 *
468 * Return value:
469 * 	none
470 **/
471static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
472{
473	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
474	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
475
476	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
477	ioarcb->write_data_transfer_length = 0;
478	ioarcb->read_data_transfer_length = 0;
479	ioarcb->write_ioadl_len = 0;
480	ioarcb->read_ioadl_len = 0;
481	ioasa->ioasc = 0;
482	ioasa->residual_data_len = 0;
483
484	ipr_cmd->scsi_cmd = NULL;
485	ipr_cmd->sense_buffer[0] = 0;
486	ipr_cmd->dma_use_sg = 0;
487}
488
489/**
490 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
491 * @ipr_cmd:	ipr command struct
492 *
493 * Return value:
494 * 	none
495 **/
496static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
497{
498	ipr_reinit_ipr_cmnd(ipr_cmd);
499	ipr_cmd->u.scratch = 0;
500	ipr_cmd->sibling = NULL;
501	init_timer(&ipr_cmd->timer);
502}
503
504/**
505 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
506 * @ioa_cfg:	ioa config struct
507 *
508 * Return value:
509 * 	pointer to ipr command struct
510 **/
511static
512struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
513{
514	struct ipr_cmnd *ipr_cmd;
515
516	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
517	list_del(&ipr_cmd->queue);
518	ipr_init_ipr_cmnd(ipr_cmd);
519
520	return ipr_cmd;
521}
522
523/**
524 * ipr_unmap_sglist - Unmap scatterlist if mapped
525 * @ioa_cfg:	ioa config struct
526 * @ipr_cmd:	ipr command struct
527 *
528 * Return value:
529 * 	nothing
530 **/
531static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
532			     struct ipr_cmnd *ipr_cmd)
533{
534	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
535
536	if (ipr_cmd->dma_use_sg) {
537		if (scsi_cmd->use_sg > 0) {
538			pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
539				     scsi_cmd->use_sg,
540				     scsi_cmd->sc_data_direction);
541		} else {
542			pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
543					 scsi_cmd->request_bufflen,
544					 scsi_cmd->sc_data_direction);
545		}
546	}
547}
548
549/**
550 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
551 * @ioa_cfg:	ioa config struct
552 * @clr_ints:     interrupts to clear
553 *
554 * This function masks all interrupts on the adapter, then clears the
555 * interrupts specified in the mask
556 *
557 * Return value:
558 * 	none
559 **/
560static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
561					  u32 clr_ints)
562{
563	volatile u32 int_reg;
564
565	/* Stop new interrupts */
566	ioa_cfg->allow_interrupts = 0;
567
568	/* Set interrupt mask to stop all new interrupts */
569	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
570
571	/* Clear any pending interrupts */
572	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
573	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
574}
575
576/**
577 * ipr_save_pcix_cmd_reg - Save PCI-X command register
578 * @ioa_cfg:	ioa config struct
579 *
580 * Return value:
581 * 	0 on success / -EIO on failure
582 **/
583static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
584{
585	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
586
587	if (pcix_cmd_reg == 0) {
588		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
589		return -EIO;
590	}
591
592	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
593				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
594		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
595		return -EIO;
596	}
597
598	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
599	return 0;
600}
601
602/**
603 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
604 * @ioa_cfg:	ioa config struct
605 *
606 * Return value:
607 * 	0 on success / -EIO on failure
608 **/
609static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
610{
611	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
612
613	if (pcix_cmd_reg) {
614		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
615					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
616			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
617			return -EIO;
618		}
619	} else {
620		dev_err(&ioa_cfg->pdev->dev,
621			"Failed to setup PCI-X command register\n");
622		return -EIO;
623	}
624
625	return 0;
626}
627
628/**
629 * ipr_scsi_eh_done - mid-layer done function for aborted ops
630 * @ipr_cmd:	ipr command struct
631 *
632 * This function is invoked by the interrupt handler for
633 * ops generated by the SCSI mid-layer which are being aborted.
634 *
635 * Return value:
636 * 	none
637 **/
638static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
639{
640	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
641	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
642
643	scsi_cmd->result |= (DID_ERROR << 16);
644
645	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
646	scsi_cmd->scsi_done(scsi_cmd);
647	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
648}
649
650/**
651 * ipr_fail_all_ops - Fails all outstanding ops.
652 * @ioa_cfg:	ioa config struct
653 *
654 * This function fails all outstanding ops.
655 *
656 * Return value:
657 * 	none
658 **/
659static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
660{
661	struct ipr_cmnd *ipr_cmd, *temp;
662
663	ENTER;
664	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
665		list_del(&ipr_cmd->queue);
666
667		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
668		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
669
670		if (ipr_cmd->scsi_cmd)
671			ipr_cmd->done = ipr_scsi_eh_done;
672
673		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
674		del_timer(&ipr_cmd->timer);
675		ipr_cmd->done(ipr_cmd);
676	}
677
678	LEAVE;
679}
680
681/**
682 * ipr_do_req -  Send driver initiated requests.
683 * @ipr_cmd:		ipr command struct
684 * @done:			done function
685 * @timeout_func:	timeout function
686 * @timeout:		timeout value
687 *
688 * This function sends the specified command to the adapter with the
689 * timeout given. The done function is invoked on command completion.
690 *
691 * Return value:
692 * 	none
693 **/
694static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
695		       void (*done) (struct ipr_cmnd *),
696		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
697{
698	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
699
700	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
701
702	ipr_cmd->done = done;
703
704	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
705	ipr_cmd->timer.expires = jiffies + timeout;
706	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
707
708	add_timer(&ipr_cmd->timer);
709
710	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
711
712	mb();
713	writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
714	       ioa_cfg->regs.ioarrin_reg);
715}
716
717/**
718 * ipr_internal_cmd_done - Op done function for an internally generated op.
719 * @ipr_cmd:	ipr command struct
720 *
721 * This function is the op done function for an internally generated,
722 * blocking op. It simply wakes the sleeping thread.
723 *
724 * Return value:
725 * 	none
726 **/
727static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
728{
729	if (ipr_cmd->sibling)
730		ipr_cmd->sibling = NULL;
731	else
732		complete(&ipr_cmd->completion);
733}
734
735/**
736 * ipr_send_blocking_cmd - Send command and sleep on its completion.
737 * @ipr_cmd:	ipr command struct
738 * @timeout_func:	function to invoke if command times out
739 * @timeout:	timeout
740 *
741 * Return value:
742 * 	none
743 **/
744static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
745				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
746				  u32 timeout)
747{
748	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
749
750	init_completion(&ipr_cmd->completion);
751	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
752
753	spin_unlock_irq(ioa_cfg->host->host_lock);
754	wait_for_completion(&ipr_cmd->completion);
755	spin_lock_irq(ioa_cfg->host->host_lock);
756}
757
758/**
759 * ipr_send_hcam - Send an HCAM to the adapter.
760 * @ioa_cfg:	ioa config struct
761 * @type:		HCAM type
762 * @hostrcb:	hostrcb struct
763 *
764 * This function will send a Host Controlled Async command to the adapter.
765 * If HCAMs are currently not allowed to be issued to the adapter, it will
766 * place the hostrcb on the free queue.
767 *
768 * Return value:
769 * 	none
770 **/
771static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
772			  struct ipr_hostrcb *hostrcb)
773{
774	struct ipr_cmnd *ipr_cmd;
775	struct ipr_ioarcb *ioarcb;
776
777	if (ioa_cfg->allow_cmds) {
778		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
779		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
780		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
781
782		ipr_cmd->u.hostrcb = hostrcb;
783		ioarcb = &ipr_cmd->ioarcb;
784
785		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
786		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
787		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
788		ioarcb->cmd_pkt.cdb[1] = type;
789		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
790		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
791
792		ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
793		ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
794		ipr_cmd->ioadl[0].flags_and_data_len =
795			cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
796		ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
797
798		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
799			ipr_cmd->done = ipr_process_ccn;
800		else
801			ipr_cmd->done = ipr_process_error;
802
803		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
804
805		mb();
806		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
807		       ioa_cfg->regs.ioarrin_reg);
808	} else {
809		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
810	}
811}
812
813/**
814 * ipr_init_res_entry - Initialize a resource entry struct.
815 * @res:	resource entry struct
816 *
817 * Return value:
818 * 	none
819 **/
820static void ipr_init_res_entry(struct ipr_resource_entry *res)
821{
822	res->needs_sync_complete = 0;
823	res->in_erp = 0;
824	res->add_to_ml = 0;
825	res->del_from_ml = 0;
826	res->resetting_device = 0;
827	res->sdev = NULL;
828}
829
830/**
831 * ipr_handle_config_change - Handle a config change from the adapter
832 * @ioa_cfg:	ioa config struct
833 * @hostrcb:	hostrcb
834 *
835 * Return value:
836 * 	none
837 **/
838static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
839			      struct ipr_hostrcb *hostrcb)
840{
841	struct ipr_resource_entry *res = NULL;
842	struct ipr_config_table_entry *cfgte;
843	u32 is_ndn = 1;
844
845	cfgte = &hostrcb->hcam.u.ccn.cfgte;
846
847	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
848		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
849			    sizeof(cfgte->res_addr))) {
850			is_ndn = 0;
851			break;
852		}
853	}
854
855	if (is_ndn) {
856		if (list_empty(&ioa_cfg->free_res_q)) {
857			ipr_send_hcam(ioa_cfg,
858				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
859				      hostrcb);
860			return;
861		}
862
863		res = list_entry(ioa_cfg->free_res_q.next,
864				 struct ipr_resource_entry, queue);
865
866		list_del(&res->queue);
867		ipr_init_res_entry(res);
868		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
869	}
870
871	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
872
873	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
874		if (res->sdev) {
875			res->del_from_ml = 1;
876			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
877			if (ioa_cfg->allow_ml_add_del)
878				schedule_work(&ioa_cfg->work_q);
879		} else
880			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
881	} else if (!res->sdev) {
882		res->add_to_ml = 1;
883		if (ioa_cfg->allow_ml_add_del)
884			schedule_work(&ioa_cfg->work_q);
885	}
886
887	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
888}
889
890/**
891 * ipr_process_ccn - Op done function for a CCN.
892 * @ipr_cmd:	ipr command struct
893 *
894 * This function is the op done function for a configuration
895 * change notification host controlled async from the adapter.
896 *
897 * Return value:
898 * 	none
899 **/
900static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
901{
902	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
903	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
904	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
905
906	list_del(&hostrcb->queue);
907	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
908
909	if (ioasc) {
910		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
911			dev_err(&ioa_cfg->pdev->dev,
912				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
913
914		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
915	} else {
916		ipr_handle_config_change(ioa_cfg, hostrcb);
917	}
918}
919
920/**
921 * ipr_log_vpd - Log the passed VPD to the error log.
922 * @vpd:		vendor/product id/sn struct
923 *
924 * Return value:
925 * 	none
926 **/
927static void ipr_log_vpd(struct ipr_vpd *vpd)
928{
929	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
930		    + IPR_SERIAL_NUM_LEN];
931
932	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
933	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
934	       IPR_PROD_ID_LEN);
935	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
936	ipr_err("Vendor/Product ID: %s\n", buffer);
937
938	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
939	buffer[IPR_SERIAL_NUM_LEN] = '\0';
940	ipr_err("    Serial Number: %s\n", buffer);
941}
942
943/**
944 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
945 * @vpd:		vendor/product id/sn/wwn struct
946 *
947 * Return value:
948 * 	none
949 **/
950static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
951{
952	ipr_log_vpd(&vpd->vpd);
953	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
954		be32_to_cpu(vpd->wwid[1]));
955}
956
957/**
958 * ipr_log_enhanced_cache_error - Log a cache error.
959 * @ioa_cfg:	ioa config struct
960 * @hostrcb:	hostrcb struct
961 *
962 * Return value:
963 * 	none
964 **/
965static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
966					 struct ipr_hostrcb *hostrcb)
967{
968	struct ipr_hostrcb_type_12_error *error =
969		&hostrcb->hcam.u.error.u.type_12_error;
970
971	ipr_err("-----Current Configuration-----\n");
972	ipr_err("Cache Directory Card Information:\n");
973	ipr_log_ext_vpd(&error->ioa_vpd);
974	ipr_err("Adapter Card Information:\n");
975	ipr_log_ext_vpd(&error->cfc_vpd);
976
977	ipr_err("-----Expected Configuration-----\n");
978	ipr_err("Cache Directory Card Information:\n");
979	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
980	ipr_err("Adapter Card Information:\n");
981	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
982
983	ipr_err("Additional IOA Data: %08X %08X %08X\n",
984		     be32_to_cpu(error->ioa_data[0]),
985		     be32_to_cpu(error->ioa_data[1]),
986		     be32_to_cpu(error->ioa_data[2]));
987}
988
989/**
990 * ipr_log_cache_error - Log a cache error.
991 * @ioa_cfg:	ioa config struct
992 * @hostrcb:	hostrcb struct
993 *
994 * Return value:
995 * 	none
996 **/
997static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
998				struct ipr_hostrcb *hostrcb)
999{
1000	struct ipr_hostrcb_type_02_error *error =
1001		&hostrcb->hcam.u.error.u.type_02_error;
1002
1003	ipr_err("-----Current Configuration-----\n");
1004	ipr_err("Cache Directory Card Information:\n");
1005	ipr_log_vpd(&error->ioa_vpd);
1006	ipr_err("Adapter Card Information:\n");
1007	ipr_log_vpd(&error->cfc_vpd);
1008
1009	ipr_err("-----Expected Configuration-----\n");
1010	ipr_err("Cache Directory Card Information:\n");
1011	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1012	ipr_err("Adapter Card Information:\n");
1013	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1014
1015	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1016		     be32_to_cpu(error->ioa_data[0]),
1017		     be32_to_cpu(error->ioa_data[1]),
1018		     be32_to_cpu(error->ioa_data[2]));
1019}
1020
1021/**
1022 * ipr_log_enhanced_config_error - Log a configuration error.
1023 * @ioa_cfg:	ioa config struct
1024 * @hostrcb:	hostrcb struct
1025 *
1026 * Return value:
1027 * 	none
1028 **/
1029static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1030					  struct ipr_hostrcb *hostrcb)
1031{
1032	int errors_logged, i;
1033	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1034	struct ipr_hostrcb_type_13_error *error;
1035
1036	error = &hostrcb->hcam.u.error.u.type_13_error;
1037	errors_logged = be32_to_cpu(error->errors_logged);
1038
1039	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1040		be32_to_cpu(error->errors_detected), errors_logged);
1041
1042	dev_entry = error->dev;
1043
1044	for (i = 0; i < errors_logged; i++, dev_entry++) {
1045		ipr_err_separator;
1046
1047		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1048		ipr_log_ext_vpd(&dev_entry->vpd);
1049
1050		ipr_err("-----New Device Information-----\n");
1051		ipr_log_ext_vpd(&dev_entry->new_vpd);
1052
1053		ipr_err("Cache Directory Card Information:\n");
1054		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1055
1056		ipr_err("Adapter Card Information:\n");
1057		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1058	}
1059}
1060
1061/**
1062 * ipr_log_config_error - Log a configuration error.
1063 * @ioa_cfg:	ioa config struct
1064 * @hostrcb:	hostrcb struct
1065 *
1066 * Return value:
1067 * 	none
1068 **/
1069static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1070				 struct ipr_hostrcb *hostrcb)
1071{
1072	int errors_logged, i;
1073	struct ipr_hostrcb_device_data_entry *dev_entry;
1074	struct ipr_hostrcb_type_03_error *error;
1075
1076	error = &hostrcb->hcam.u.error.u.type_03_error;
1077	errors_logged = be32_to_cpu(error->errors_logged);
1078
1079	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1080		be32_to_cpu(error->errors_detected), errors_logged);
1081
1082	dev_entry = error->dev;
1083
1084	for (i = 0; i < errors_logged; i++, dev_entry++) {
1085		ipr_err_separator;
1086
1087		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1088		ipr_log_vpd(&dev_entry->vpd);
1089
1090		ipr_err("-----New Device Information-----\n");
1091		ipr_log_vpd(&dev_entry->new_vpd);
1092
1093		ipr_err("Cache Directory Card Information:\n");
1094		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1095
1096		ipr_err("Adapter Card Information:\n");
1097		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1098
1099		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1100			be32_to_cpu(dev_entry->ioa_data[0]),
1101			be32_to_cpu(dev_entry->ioa_data[1]),
1102			be32_to_cpu(dev_entry->ioa_data[2]),
1103			be32_to_cpu(dev_entry->ioa_data[3]),
1104			be32_to_cpu(dev_entry->ioa_data[4]));
1105	}
1106}
1107
1108/**
1109 * ipr_log_enhanced_array_error - Log an array configuration error.
1110 * @ioa_cfg:	ioa config struct
1111 * @hostrcb:	hostrcb struct
1112 *
1113 * Return value:
1114 * 	none
1115 **/
1116static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1117					 struct ipr_hostrcb *hostrcb)
1118{
1119	int i, num_entries;
1120	struct ipr_hostrcb_type_14_error *error;
1121	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1122	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1123
1124	error = &hostrcb->hcam.u.error.u.type_14_error;
1125
1126	ipr_err_separator;
1127
1128	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1129		error->protection_level,
1130		ioa_cfg->host->host_no,
1131		error->last_func_vset_res_addr.bus,
1132		error->last_func_vset_res_addr.target,
1133		error->last_func_vset_res_addr.lun);
1134
1135	ipr_err_separator;
1136
1137	array_entry = error->array_member;
1138	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1139			    sizeof(error->array_member));
1140
1141	for (i = 0; i < num_entries; i++, array_entry++) {
1142		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1143			continue;
1144
1145		if (be32_to_cpu(error->exposed_mode_adn) == i)
1146			ipr_err("Exposed Array Member %d:\n", i);
1147		else
1148			ipr_err("Array Member %d:\n", i);
1149
1150		ipr_log_ext_vpd(&array_entry->vpd);
1151		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1152		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1153				 "Expected Location");
1154
1155		ipr_err_separator;
1156	}
1157}
1158
1159/**
1160 * ipr_log_array_error - Log an array configuration error.
1161 * @ioa_cfg:	ioa config struct
1162 * @hostrcb:	hostrcb struct
1163 *
1164 * Return value:
1165 * 	none
1166 **/
1167static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1168				struct ipr_hostrcb *hostrcb)
1169{
1170	int i;
1171	struct ipr_hostrcb_type_04_error *error;
1172	struct ipr_hostrcb_array_data_entry *array_entry;
1173	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1174
1175	error = &hostrcb->hcam.u.error.u.type_04_error;
1176
1177	ipr_err_separator;
1178
1179	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1180		error->protection_level,
1181		ioa_cfg->host->host_no,
1182		error->last_func_vset_res_addr.bus,
1183		error->last_func_vset_res_addr.target,
1184		error->last_func_vset_res_addr.lun);
1185
1186	ipr_err_separator;
1187
1188	array_entry = error->array_member;
1189
1190	for (i = 0; i < 18; i++) {
1191		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1192			continue;
1193
1194		if (be32_to_cpu(error->exposed_mode_adn) == i)
1195			ipr_err("Exposed Array Member %d:\n", i);
1196		else
1197			ipr_err("Array Member %d:\n", i);
1198
1199		ipr_log_vpd(&array_entry->vpd);
1200
1201		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1202		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1203				 "Expected Location");
1204
1205		ipr_err_separator;
1206
1207		if (i == 9)
1208			array_entry = error->array_member2;
1209		else
1210			array_entry++;
1211	}
1212}
1213
1214/**
1215 * ipr_log_hex_data - Log additional hex IOA error data.
1216 * @data:		IOA error data
1217 * @len:		data length
1218 *
1219 * Return value:
1220 * 	none
1221 **/
1222static void ipr_log_hex_data(u32 *data, int len)
1223{
1224	int i;
1225
1226	if (len == 0)
1227		return;
1228
1229	for (i = 0; i < len / 4; i += 4) {
1230		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1231			be32_to_cpu(data[i]),
1232			be32_to_cpu(data[i+1]),
1233			be32_to_cpu(data[i+2]),
1234			be32_to_cpu(data[i+3]));
1235	}
1236}
1237
1238/**
1239 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1240 * @ioa_cfg:	ioa config struct
1241 * @hostrcb:	hostrcb struct
1242 *
1243 * Return value:
1244 * 	none
1245 **/
1246static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1247					    struct ipr_hostrcb *hostrcb)
1248{
1249	struct ipr_hostrcb_type_17_error *error;
1250
1251	error = &hostrcb->hcam.u.error.u.type_17_error;
1252	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1253
1254	ipr_err("%s\n", error->failure_reason);
1255	ipr_err("Remote Adapter VPD:\n");
1256	ipr_log_ext_vpd(&error->vpd);
1257	ipr_log_hex_data(error->data,
1258			 be32_to_cpu(hostrcb->hcam.length) -
1259			 (offsetof(struct ipr_hostrcb_error, u) +
1260			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1261}
1262
1263/**
1264 * ipr_log_dual_ioa_error - Log a dual adapter error.
1265 * @ioa_cfg:	ioa config struct
1266 * @hostrcb:	hostrcb struct
1267 *
1268 * Return value:
1269 * 	none
1270 **/
1271static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1272				   struct ipr_hostrcb *hostrcb)
1273{
1274	struct ipr_hostrcb_type_07_error *error;
1275
1276	error = &hostrcb->hcam.u.error.u.type_07_error;
1277	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1278
1279	ipr_err("%s\n", error->failure_reason);
1280	ipr_err("Remote Adapter VPD:\n");
1281	ipr_log_vpd(&error->vpd);
1282	ipr_log_hex_data(error->data,
1283			 be32_to_cpu(hostrcb->hcam.length) -
1284			 (offsetof(struct ipr_hostrcb_error, u) +
1285			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1286}
1287
1288/**
1289 * ipr_log_generic_error - Log an adapter error.
1290 * @ioa_cfg:	ioa config struct
1291 * @hostrcb:	hostrcb struct
1292 *
1293 * Return value:
1294 * 	none
1295 **/
1296static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
1297				  struct ipr_hostrcb *hostrcb)
1298{
1299	ipr_log_hex_data(hostrcb->hcam.u.raw.data,
1300			 be32_to_cpu(hostrcb->hcam.length));
1301}
1302
1303/**
1304 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
1305 * @ioasc:	IOASC
1306 *
1307 * This function will return the index of into the ipr_error_table
1308 * for the specified IOASC. If the IOASC is not in the table,
1309 * 0 will be returned, which points to the entry used for unknown errors.
1310 *
1311 * Return value:
1312 * 	index into the ipr_error_table
1313 **/
1314static u32 ipr_get_error(u32 ioasc)
1315{
1316	int i;
1317
1318	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
1319		if (ipr_error_table[i].ioasc == ioasc)
1320			return i;
1321
1322	return 0;
1323}
1324
1325/**
1326 * ipr_handle_log_data - Log an adapter error.
1327 * @ioa_cfg:	ioa config struct
1328 * @hostrcb:	hostrcb struct
1329 *
1330 * This function logs an adapter error to the system.
1331 *
1332 * Return value:
1333 * 	none
1334 **/
1335static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
1336				struct ipr_hostrcb *hostrcb)
1337{
1338	u32 ioasc;
1339	int error_index;
1340
1341	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
1342		return;
1343
1344	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
1345		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
1346
1347	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
1348
1349	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
1350	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
1351		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
1352		scsi_report_bus_reset(ioa_cfg->host,
1353				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
1354	}
1355
1356	error_index = ipr_get_error(ioasc);
1357
1358	if (!ipr_error_table[error_index].log_hcam)
1359		return;
1360
1361	if (ipr_is_device(&hostrcb->hcam.u.error.failing_dev_res_addr)) {
1362		ipr_ra_err(ioa_cfg, hostrcb->hcam.u.error.failing_dev_res_addr,
1363			   "%s\n", ipr_error_table[error_index].error);
1364	} else {
1365		dev_err(&ioa_cfg->pdev->dev, "%s\n",
1366			ipr_error_table[error_index].error);
1367	}
1368
1369	/* Set indication we have logged an error */
1370	ioa_cfg->errors_logged++;
1371
1372	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
1373		return;
1374	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
1375		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
1376
1377	switch (hostrcb->hcam.overlay_id) {
1378	case IPR_HOST_RCB_OVERLAY_ID_2:
1379		ipr_log_cache_error(ioa_cfg, hostrcb);
1380		break;
1381	case IPR_HOST_RCB_OVERLAY_ID_3:
1382		ipr_log_config_error(ioa_cfg, hostrcb);
1383		break;
1384	case IPR_HOST_RCB_OVERLAY_ID_4:
1385	case IPR_HOST_RCB_OVERLAY_ID_6:
1386		ipr_log_array_error(ioa_cfg, hostrcb);
1387		break;
1388	case IPR_HOST_RCB_OVERLAY_ID_7:
1389		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
1390		break;
1391	case IPR_HOST_RCB_OVERLAY_ID_12:
1392		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
1393		break;
1394	case IPR_HOST_RCB_OVERLAY_ID_13:
1395		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
1396		break;
1397	case IPR_HOST_RCB_OVERLAY_ID_14:
1398	case IPR_HOST_RCB_OVERLAY_ID_16:
1399		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
1400		break;
1401	case IPR_HOST_RCB_OVERLAY_ID_17:
1402		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
1403		break;
1404	case IPR_HOST_RCB_OVERLAY_ID_1:
1405	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
1406	default:
1407		ipr_log_generic_error(ioa_cfg, hostrcb);
1408		break;
1409	}
1410}
1411
1412/**
1413 * ipr_process_error - Op done function for an adapter error log.
1414 * @ipr_cmd:	ipr command struct
1415 *
1416 * This function is the op done function for an error log host
1417 * controlled async from the adapter. It will log the error and
1418 * send the HCAM back to the adapter.
1419 *
1420 * Return value:
1421 * 	none
1422 **/
1423static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
1424{
1425	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1426	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1427	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
1428
1429	list_del(&hostrcb->queue);
1430	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1431
1432	if (!ioasc) {
1433		ipr_handle_log_data(ioa_cfg, hostrcb);
1434	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
1435		dev_err(&ioa_cfg->pdev->dev,
1436			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1437	}
1438
1439	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
1440}
1441
1442/**
1443 * ipr_timeout -  An internally generated op has timed out.
1444 * @ipr_cmd:	ipr command struct
1445 *
1446 * This function blocks host requests and initiates an
1447 * adapter reset.
1448 *
1449 * Return value:
1450 * 	none
1451 **/
1452static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
1453{
1454	unsigned long lock_flags = 0;
1455	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1456
1457	ENTER;
1458	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1459
1460	ioa_cfg->errors_logged++;
1461	dev_err(&ioa_cfg->pdev->dev,
1462		"Adapter being reset due to command timeout.\n");
1463
1464	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1465		ioa_cfg->sdt_state = GET_DUMP;
1466
1467	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
1468		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1469
1470	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1471	LEAVE;
1472}
1473
1474/**
1475 * ipr_oper_timeout -  Adapter timed out transitioning to operational
1476 * @ipr_cmd:	ipr command struct
1477 *
1478 * This function blocks host requests and initiates an
1479 * adapter reset.
1480 *
1481 * Return value:
1482 * 	none
1483 **/
1484static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
1485{
1486	unsigned long lock_flags = 0;
1487	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1488
1489	ENTER;
1490	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1491
1492	ioa_cfg->errors_logged++;
1493	dev_err(&ioa_cfg->pdev->dev,
1494		"Adapter timed out transitioning to operational.\n");
1495
1496	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
1497		ioa_cfg->sdt_state = GET_DUMP;
1498
1499	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
1500		if (ipr_fastfail)
1501			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
1502		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
1503	}
1504
1505	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1506	LEAVE;
1507}
1508
1509/**
1510 * ipr_reset_reload - Reset/Reload the IOA
1511 * @ioa_cfg:		ioa config struct
1512 * @shutdown_type:	shutdown type
1513 *
1514 * This function resets the adapter and re-initializes it.
1515 * This function assumes that all new host commands have been stopped.
1516 * Return value:
1517 * 	SUCCESS / FAILED
1518 **/
1519static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
1520			    enum ipr_shutdown_type shutdown_type)
1521{
1522	if (!ioa_cfg->in_reset_reload)
1523		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
1524
1525	spin_unlock_irq(ioa_cfg->host->host_lock);
1526	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
1527	spin_lock_irq(ioa_cfg->host->host_lock);
1528
1529	/* If we got hit with a host reset while we were already resetting
1530	 the adapter for some reason, and the reset failed. */
1531	if (ioa_cfg->ioa_is_dead) {
1532		ipr_trace;
1533		return FAILED;
1534	}
1535
1536	return SUCCESS;
1537}
1538
1539/**
1540 * ipr_find_ses_entry - Find matching SES in SES table
1541 * @res:	resource entry struct of SES
1542 *
1543 * Return value:
1544 * 	pointer to SES table entry / NULL on failure
1545 **/
1546static const struct ipr_ses_table_entry *
1547ipr_find_ses_entry(struct ipr_resource_entry *res)
1548{
1549	int i, j, matches;
1550	const struct ipr_ses_table_entry *ste = ipr_ses_table;
1551
1552	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
1553		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
1554			if (ste->compare_product_id_byte[j] == 'X') {
1555				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
1556					matches++;
1557				else
1558					break;
1559			} else
1560				matches++;
1561		}
1562
1563		if (matches == IPR_PROD_ID_LEN)
1564			return ste;
1565	}
1566
1567	return NULL;
1568}
1569
1570/**
1571 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
1572 * @ioa_cfg:	ioa config struct
1573 * @bus:		SCSI bus
1574 * @bus_width:	bus width
1575 *
1576 * Return value:
1577 *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
1578 *	For a 2-byte wide SCSI bus, the maximum transfer speed is
1579 *	twice the maximum transfer rate (e.g. for a wide enabled bus,
1580 *	max 160MHz = max 320MB/sec).
1581 **/
1582static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
1583{
1584	struct ipr_resource_entry *res;
1585	const struct ipr_ses_table_entry *ste;
1586	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
1587
1588	/* Loop through each config table entry in the config table buffer */
1589	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1590		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
1591			continue;
1592
1593		if (bus != res->cfgte.res_addr.bus)
1594			continue;
1595
1596		if (!(ste = ipr_find_ses_entry(res)))
1597			continue;
1598
1599		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
1600	}
1601
1602	return max_xfer_rate;
1603}
1604
1605/**
1606 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
1607 * @ioa_cfg:		ioa config struct
1608 * @max_delay:		max delay in micro-seconds to wait
1609 *
1610 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
1611 *
1612 * Return value:
1613 * 	0 on success / other on failure
1614 **/
1615static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
1616{
1617	volatile u32 pcii_reg;
1618	int delay = 1;
1619
1620	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
1621	while (delay < max_delay) {
1622		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
1623
1624		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
1625			return 0;
1626
1627		/* udelay cannot be used if delay is more than a few milliseconds */
1628		if ((delay / 1000) > MAX_UDELAY_MS)
1629			mdelay(delay / 1000);
1630		else
1631			udelay(delay);
1632
1633		delay += delay;
1634	}
1635	return -EIO;
1636}
1637
1638/**
1639 * ipr_get_ldump_data_section - Dump IOA memory
1640 * @ioa_cfg:			ioa config struct
1641 * @start_addr:			adapter address to dump
1642 * @dest:				destination kernel buffer
1643 * @length_in_words:	length to dump in 4 byte words
1644 *
1645 * Return value:
1646 * 	0 on success / -EIO on failure
1647 **/
1648static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
1649				      u32 start_addr,
1650				      __be32 *dest, u32 length_in_words)
1651{
1652	volatile u32 temp_pcii_reg;
1653	int i, delay = 0;
1654
1655	/* Write IOA interrupt reg starting LDUMP state  */
1656	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
1657	       ioa_cfg->regs.set_uproc_interrupt_reg);
1658
1659	/* Wait for IO debug acknowledge */
1660	if (ipr_wait_iodbg_ack(ioa_cfg,
1661			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
1662		dev_err(&ioa_cfg->pdev->dev,
1663			"IOA dump long data transfer timeout\n");
1664		return -EIO;
1665	}
1666
1667	/* Signal LDUMP interlocked - clear IO debug ack */
1668	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1669	       ioa_cfg->regs.clr_interrupt_reg);
1670
1671	/* Write Mailbox with starting address */
1672	writel(start_addr, ioa_cfg->ioa_mailbox);
1673
1674	/* Signal address valid - clear IOA Reset alert */
1675	writel(IPR_UPROCI_RESET_ALERT,
1676	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1677
1678	for (i = 0; i < length_in_words; i++) {
1679		/* Wait for IO debug acknowledge */
1680		if (ipr_wait_iodbg_ack(ioa_cfg,
1681				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
1682			dev_err(&ioa_cfg->pdev->dev,
1683				"IOA dump short data transfer timeout\n");
1684			return -EIO;
1685		}
1686
1687		/* Read data from mailbox and increment destination pointer */
1688		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
1689		dest++;
1690
1691		/* For all but the last word of data, signal data received */
1692		if (i < (length_in_words - 1)) {
1693			/* Signal dump data received - Clear IO debug Ack */
1694			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1695			       ioa_cfg->regs.clr_interrupt_reg);
1696		}
1697	}
1698
1699	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
1700	writel(IPR_UPROCI_RESET_ALERT,
1701	       ioa_cfg->regs.set_uproc_interrupt_reg);
1702
1703	writel(IPR_UPROCI_IO_DEBUG_ALERT,
1704	       ioa_cfg->regs.clr_uproc_interrupt_reg);
1705
1706	/* Signal dump data received - Clear IO debug Ack */
1707	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
1708	       ioa_cfg->regs.clr_interrupt_reg);
1709
1710	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
1711	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
1712		temp_pcii_reg =
1713		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
1714
1715		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
1716			return 0;
1717
1718		udelay(10);
1719		delay += 10;
1720	}
1721
1722	return 0;
1723}
1724
1725#ifdef CONFIG_SCSI_IPR_DUMP
1726/**
1727 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
1728 * @ioa_cfg:		ioa config struct
1729 * @pci_address:	adapter address
1730 * @length:			length of data to copy
1731 *
1732 * Copy data from PCI adapter to kernel buffer.
1733 * Note: length MUST be a 4 byte multiple
1734 * Return value:
1735 * 	0 on success / other on failure
1736 **/
1737static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
1738			unsigned long pci_address, u32 length)
1739{
1740	int bytes_copied = 0;
1741	int cur_len, rc, rem_len, rem_page_len;
1742	__be32 *page;
1743	unsigned long lock_flags = 0;
1744	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
1745
1746	while (bytes_copied < length &&
1747	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
1748		if (ioa_dump->page_offset >= PAGE_SIZE ||
1749		    ioa_dump->page_offset == 0) {
1750			page = (__be32 *)__get_free_page(GFP_ATOMIC);
1751
1752			if (!page) {
1753				ipr_trace;
1754				return bytes_copied;
1755			}
1756
1757			ioa_dump->page_offset = 0;
1758			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
1759			ioa_dump->next_page_index++;
1760		} else
1761			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
1762
1763		rem_len = length - bytes_copied;
1764		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
1765		cur_len = min(rem_len, rem_page_len);
1766
1767		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1768		if (ioa_cfg->sdt_state == ABORT_DUMP) {
1769			rc = -EIO;
1770		} else {
1771			rc = ipr_get_ldump_data_section(ioa_cfg,
1772							pci_address + bytes_copied,
1773							&page[ioa_dump->page_offset / 4],
1774							(cur_len / sizeof(u32)));
1775		}
1776		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1777
1778		if (!rc) {
1779			ioa_dump->page_offset += cur_len;
1780			bytes_copied += cur_len;
1781		} else {
1782			ipr_trace;
1783			break;
1784		}
1785		schedule();
1786	}
1787
1788	return bytes_copied;
1789}
1790
1791/**
1792 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
1793 * @hdr:	dump entry header struct
1794 *
1795 * Return value:
1796 * 	nothing
1797 **/
1798static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
1799{
1800	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
1801	hdr->num_elems = 1;
1802	hdr->offset = sizeof(*hdr);
1803	hdr->status = IPR_DUMP_STATUS_SUCCESS;
1804}
1805
1806/**
1807 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
1808 * @ioa_cfg:	ioa config struct
1809 * @driver_dump:	driver dump struct
1810 *
1811 * Return value:
1812 * 	nothing
1813 **/
1814static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
1815				   struct ipr_driver_dump *driver_dump)
1816{
1817	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
1818
1819	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
1820	driver_dump->ioa_type_entry.hdr.len =
1821		sizeof(struct ipr_dump_ioa_type_entry) -
1822		sizeof(struct ipr_dump_entry_header);
1823	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1824	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
1825	driver_dump->ioa_type_entry.type = ioa_cfg->type;
1826	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
1827		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
1828		ucode_vpd->minor_release[1];
1829	driver_dump->hdr.num_entries++;
1830}
1831
1832/**
1833 * ipr_dump_version_data - Fill in the driver version in the dump.
1834 * @ioa_cfg:	ioa config struct
1835 * @driver_dump:	driver dump struct
1836 *
1837 * Return value:
1838 * 	nothing
1839 **/
1840static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
1841				  struct ipr_driver_dump *driver_dump)
1842{
1843	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
1844	driver_dump->version_entry.hdr.len =
1845		sizeof(struct ipr_dump_version_entry) -
1846		sizeof(struct ipr_dump_entry_header);
1847	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1848	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
1849	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
1850	driver_dump->hdr.num_entries++;
1851}
1852
1853/**
1854 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
1855 * @ioa_cfg:	ioa config struct
1856 * @driver_dump:	driver dump struct
1857 *
1858 * Return value:
1859 * 	nothing
1860 **/
1861static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
1862				   struct ipr_driver_dump *driver_dump)
1863{
1864	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
1865	driver_dump->trace_entry.hdr.len =
1866		sizeof(struct ipr_dump_trace_entry) -
1867		sizeof(struct ipr_dump_entry_header);
1868	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1869	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
1870	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
1871	driver_dump->hdr.num_entries++;
1872}
1873
1874/**
1875 * ipr_dump_location_data - Fill in the IOA location in the dump.
1876 * @ioa_cfg:	ioa config struct
1877 * @driver_dump:	driver dump struct
1878 *
1879 * Return value:
1880 * 	nothing
1881 **/
1882static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
1883				   struct ipr_driver_dump *driver_dump)
1884{
1885	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
1886	driver_dump->location_entry.hdr.len =
1887		sizeof(struct ipr_dump_location_entry) -
1888		sizeof(struct ipr_dump_entry_header);
1889	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
1890	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
1891	strcpy(driver_dump->location_entry.location, ioa_cfg->pdev->dev.bus_id);
1892	driver_dump->hdr.num_entries++;
1893}
1894
1895/**
1896 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
1897 * @ioa_cfg:	ioa config struct
1898 * @dump:		dump struct
1899 *
1900 * Return value:
1901 * 	nothing
1902 **/
1903static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
1904{
1905	unsigned long start_addr, sdt_word;
1906	unsigned long lock_flags = 0;
1907	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
1908	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
1909	u32 num_entries, start_off, end_off;
1910	u32 bytes_to_copy, bytes_copied, rc;
1911	struct ipr_sdt *sdt;
1912	int i;
1913
1914	ENTER;
1915
1916	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
1917
1918	if (ioa_cfg->sdt_state != GET_DUMP) {
1919		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1920		return;
1921	}
1922
1923	start_addr = readl(ioa_cfg->ioa_mailbox);
1924
1925	if (!ipr_sdt_is_fmt2(start_addr)) {
1926		dev_err(&ioa_cfg->pdev->dev,
1927			"Invalid dump table format: %lx\n", start_addr);
1928		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1929		return;
1930	}
1931
1932	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
1933
1934	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
1935
1936	/* Initialize the overall dump header */
1937	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
1938	driver_dump->hdr.num_entries = 1;
1939	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
1940	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
1941	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
1942	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
1943
1944	ipr_dump_version_data(ioa_cfg, driver_dump);
1945	ipr_dump_location_data(ioa_cfg, driver_dump);
1946	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
1947	ipr_dump_trace_data(ioa_cfg, driver_dump);
1948
1949	/* Update dump_header */
1950	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
1951
1952	/* IOA Dump entry */
1953	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
1954	ioa_dump->format = IPR_SDT_FMT2;
1955	ioa_dump->hdr.len = 0;
1956	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
1957	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
1958
1959	/* First entries in sdt are actually a list of dump addresses and
1960	 lengths to gather the real dump data.  sdt represents the pointer
1961	 to the ioa generated dump table.  Dump data will be extracted based
1962	 on entries in this table */
1963	sdt = &ioa_dump->sdt;
1964
1965	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
1966					sizeof(struct ipr_sdt) / sizeof(__be32));
1967
1968	/* Smart Dump table is ready to use and the first entry is valid */
1969	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
1970		dev_err(&ioa_cfg->pdev->dev,
1971			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
1972			rc, be32_to_cpu(sdt->hdr.state));
1973		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
1974		ioa_cfg->sdt_state = DUMP_OBTAINED;
1975		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1976		return;
1977	}
1978
1979	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
1980
1981	if (num_entries > IPR_NUM_SDT_ENTRIES)
1982		num_entries = IPR_NUM_SDT_ENTRIES;
1983
1984	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
1985
1986	for (i = 0; i < num_entries; i++) {
1987		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
1988			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
1989			break;
1990		}
1991
1992		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
1993			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
1994			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
1995			end_off = be32_to_cpu(sdt->entry[i].end_offset);
1996
1997			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
1998				bytes_to_copy = end_off - start_off;
1999				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2000					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2001					continue;
2002				}
2003
2004				/* Copy data from adapter to driver buffers */
2005				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2006							    bytes_to_copy);
2007
2008				ioa_dump->hdr.len += bytes_copied;
2009
2010				if (bytes_copied != bytes_to_copy) {
2011					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2012					break;
2013				}
2014			}
2015		}
2016	}
2017
2018	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2019
2020	/* Update dump_header */
2021	driver_dump->hdr.len += ioa_dump->hdr.len;
2022	wmb();
2023	ioa_cfg->sdt_state = DUMP_OBTAINED;
2024	LEAVE;
2025}
2026
2027#else
2028#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
2029#endif
2030
2031/**
2032 * ipr_release_dump - Free adapter dump memory
2033 * @kref:	kref struct
2034 *
2035 * Return value:
2036 *	nothing
2037 **/
2038static void ipr_release_dump(struct kref *kref)
2039{
2040	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
2041	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
2042	unsigned long lock_flags = 0;
2043	int i;
2044
2045	ENTER;
2046	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2047	ioa_cfg->dump = NULL;
2048	ioa_cfg->sdt_state = INACTIVE;
2049	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2050
2051	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
2052		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
2053
2054	kfree(dump);
2055	LEAVE;
2056}
2057
2058/**
2059 * ipr_worker_thread - Worker thread
2060 * @data:		ioa config struct
2061 *
2062 * Called at task level from a work thread. This function takes care
2063 * of adding and removing device from the mid-layer as configuration
2064 * changes are detected by the adapter.
2065 *
2066 * Return value:
2067 * 	nothing
2068 **/
2069static void ipr_worker_thread(void *data)
2070{
2071	unsigned long lock_flags;
2072	struct ipr_resource_entry *res;
2073	struct scsi_device *sdev;
2074	struct ipr_dump *dump;
2075	struct ipr_ioa_cfg *ioa_cfg = data;
2076	u8 bus, target, lun;
2077	int did_work;
2078
2079	ENTER;
2080	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2081
2082	if (ioa_cfg->sdt_state == GET_DUMP) {
2083		dump = ioa_cfg->dump;
2084		if (!dump) {
2085			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2086			return;
2087		}
2088		kref_get(&dump->kref);
2089		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2090		ipr_get_ioa_dump(ioa_cfg, dump);
2091		kref_put(&dump->kref, ipr_release_dump);
2092
2093		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2094		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
2095			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2096		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2097		return;
2098	}
2099
2100restart:
2101	do {
2102		did_work = 0;
2103		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
2104			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2105			return;
2106		}
2107
2108		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2109			if (res->del_from_ml && res->sdev) {
2110				did_work = 1;
2111				sdev = res->sdev;
2112				if (!scsi_device_get(sdev)) {
2113					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
2114					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2115					scsi_remove_device(sdev);
2116					scsi_device_put(sdev);
2117					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2118				}
2119				break;
2120			}
2121		}
2122	} while(did_work);
2123
2124	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2125		if (res->add_to_ml) {
2126			bus = res->cfgte.res_addr.bus;
2127			target = res->cfgte.res_addr.target;
2128			lun = res->cfgte.res_addr.lun;
2129			res->add_to_ml = 0;
2130			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2131			scsi_add_device(ioa_cfg->host, bus, target, lun);
2132			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2133			goto restart;
2134		}
2135	}
2136
2137	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2138	kobject_uevent(&ioa_cfg->host->shost_classdev.kobj, KOBJ_CHANGE);
2139	LEAVE;
2140}
2141
2142#ifdef CONFIG_SCSI_IPR_TRACE
2143/**
2144 * ipr_read_trace - Dump the adapter trace
2145 * @kobj:		kobject struct
2146 * @buf:		buffer
2147 * @off:		offset
2148 * @count:		buffer size
2149 *
2150 * Return value:
2151 *	number of bytes printed to buffer
2152 **/
2153static ssize_t ipr_read_trace(struct kobject *kobj, char *buf,
2154			      loff_t off, size_t count)
2155{
2156	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2157	struct Scsi_Host *shost = class_to_shost(cdev);
2158	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2159	unsigned long lock_flags = 0;
2160	int size = IPR_TRACE_SIZE;
2161	char *src = (char *)ioa_cfg->trace;
2162
2163	if (off > size)
2164		return 0;
2165	if (off + count > size) {
2166		size -= off;
2167		count = size;
2168	}
2169
2170	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2171	memcpy(buf, &src[off], count);
2172	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2173	return count;
2174}
2175
2176static struct bin_attribute ipr_trace_attr = {
2177	.attr =	{
2178		.name = "trace",
2179		.mode = S_IRUGO,
2180	},
2181	.size = 0,
2182	.read = ipr_read_trace,
2183};
2184#endif
2185
2186static const struct {
2187	enum ipr_cache_state state;
2188	char *name;
2189} cache_state [] = {
2190	{ CACHE_NONE, "none" },
2191	{ CACHE_DISABLED, "disabled" },
2192	{ CACHE_ENABLED, "enabled" }
2193};
2194
2195/**
2196 * ipr_show_write_caching - Show the write caching attribute
2197 * @class_dev:	class device struct
2198 * @buf:		buffer
2199 *
2200 * Return value:
2201 *	number of bytes printed to buffer
2202 **/
2203static ssize_t ipr_show_write_caching(struct class_device *class_dev, char *buf)
2204{
2205	struct Scsi_Host *shost = class_to_shost(class_dev);
2206	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2207	unsigned long lock_flags = 0;
2208	int i, len = 0;
2209
2210	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2211	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2212		if (cache_state[i].state == ioa_cfg->cache_state) {
2213			len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
2214			break;
2215		}
2216	}
2217	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2218	return len;
2219}
2220
2221
2222/**
2223 * ipr_store_write_caching - Enable/disable adapter write cache
2224 * @class_dev:	class_device struct
2225 * @buf:		buffer
2226 * @count:		buffer size
2227 *
2228 * This function will enable/disable adapter write cache.
2229 *
2230 * Return value:
2231 * 	count on success / other on failure
2232 **/
2233static ssize_t ipr_store_write_caching(struct class_device *class_dev,
2234					const char *buf, size_t count)
2235{
2236	struct Scsi_Host *shost = class_to_shost(class_dev);
2237	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2238	unsigned long lock_flags = 0;
2239	enum ipr_cache_state new_state = CACHE_INVALID;
2240	int i;
2241
2242	if (!capable(CAP_SYS_ADMIN))
2243		return -EACCES;
2244	if (ioa_cfg->cache_state == CACHE_NONE)
2245		return -EINVAL;
2246
2247	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
2248		if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
2249			new_state = cache_state[i].state;
2250			break;
2251		}
2252	}
2253
2254	if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
2255		return -EINVAL;
2256
2257	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2258	if (ioa_cfg->cache_state == new_state) {
2259		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2260		return count;
2261	}
2262
2263	ioa_cfg->cache_state = new_state;
2264	dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
2265		 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
2266	if (!ioa_cfg->in_reset_reload)
2267		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2268	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2269	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2270
2271	return count;
2272}
2273
2274static struct class_device_attribute ipr_ioa_cache_attr = {
2275	.attr = {
2276		.name =		"write_cache",
2277		.mode =		S_IRUGO | S_IWUSR,
2278	},
2279	.show = ipr_show_write_caching,
2280	.store = ipr_store_write_caching
2281};
2282
2283/**
2284 * ipr_show_fw_version - Show the firmware version
2285 * @class_dev:	class device struct
2286 * @buf:		buffer
2287 *
2288 * Return value:
2289 *	number of bytes printed to buffer
2290 **/
2291static ssize_t ipr_show_fw_version(struct class_device *class_dev, char *buf)
2292{
2293	struct Scsi_Host *shost = class_to_shost(class_dev);
2294	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2295	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2296	unsigned long lock_flags = 0;
2297	int len;
2298
2299	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2300	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
2301		       ucode_vpd->major_release, ucode_vpd->card_type,
2302		       ucode_vpd->minor_release[0],
2303		       ucode_vpd->minor_release[1]);
2304	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2305	return len;
2306}
2307
2308static struct class_device_attribute ipr_fw_version_attr = {
2309	.attr = {
2310		.name =		"fw_version",
2311		.mode =		S_IRUGO,
2312	},
2313	.show = ipr_show_fw_version,
2314};
2315
2316/**
2317 * ipr_show_log_level - Show the adapter's error logging level
2318 * @class_dev:	class device struct
2319 * @buf:		buffer
2320 *
2321 * Return value:
2322 * 	number of bytes printed to buffer
2323 **/
2324static ssize_t ipr_show_log_level(struct class_device *class_dev, char *buf)
2325{
2326	struct Scsi_Host *shost = class_to_shost(class_dev);
2327	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2328	unsigned long lock_flags = 0;
2329	int len;
2330
2331	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2332	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
2333	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2334	return len;
2335}
2336
2337/**
2338 * ipr_store_log_level - Change the adapter's error logging level
2339 * @class_dev:	class device struct
2340 * @buf:		buffer
2341 *
2342 * Return value:
2343 * 	number of bytes printed to buffer
2344 **/
2345static ssize_t ipr_store_log_level(struct class_device *class_dev,
2346				   const char *buf, size_t count)
2347{
2348	struct Scsi_Host *shost = class_to_shost(class_dev);
2349	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2350	unsigned long lock_flags = 0;
2351
2352	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2353	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
2354	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2355	return strlen(buf);
2356}
2357
2358static struct class_device_attribute ipr_log_level_attr = {
2359	.attr = {
2360		.name =		"log_level",
2361		.mode =		S_IRUGO | S_IWUSR,
2362	},
2363	.show = ipr_show_log_level,
2364	.store = ipr_store_log_level
2365};
2366
2367/**
2368 * ipr_store_diagnostics - IOA Diagnostics interface
2369 * @class_dev:	class_device struct
2370 * @buf:		buffer
2371 * @count:		buffer size
2372 *
2373 * This function will reset the adapter and wait a reasonable
2374 * amount of time for any errors that the adapter might log.
2375 *
2376 * Return value:
2377 * 	count on success / other on failure
2378 **/
2379static ssize_t ipr_store_diagnostics(struct class_device *class_dev,
2380				     const char *buf, size_t count)
2381{
2382	struct Scsi_Host *shost = class_to_shost(class_dev);
2383	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2384	unsigned long lock_flags = 0;
2385	int rc = count;
2386
2387	if (!capable(CAP_SYS_ADMIN))
2388		return -EACCES;
2389
2390	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2391	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2392	ioa_cfg->errors_logged = 0;
2393	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2394
2395	if (ioa_cfg->in_reset_reload) {
2396		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2397		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2398
2399		/* Wait for a second for any errors to be logged */
2400		msleep(1000);
2401	} else {
2402		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2403		return -EIO;
2404	}
2405
2406	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2407	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
2408		rc = -EIO;
2409	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2410
2411	return rc;
2412}
2413
2414static struct class_device_attribute ipr_diagnostics_attr = {
2415	.attr = {
2416		.name =		"run_diagnostics",
2417		.mode =		S_IWUSR,
2418	},
2419	.store = ipr_store_diagnostics
2420};
2421
2422/**
2423 * ipr_show_adapter_state - Show the adapter's state
2424 * @class_dev:	class device struct
2425 * @buf:		buffer
2426 *
2427 * Return value:
2428 * 	number of bytes printed to buffer
2429 **/
2430static ssize_t ipr_show_adapter_state(struct class_device *class_dev, char *buf)
2431{
2432	struct Scsi_Host *shost = class_to_shost(class_dev);
2433	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2434	unsigned long lock_flags = 0;
2435	int len;
2436
2437	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2438	if (ioa_cfg->ioa_is_dead)
2439		len = snprintf(buf, PAGE_SIZE, "offline\n");
2440	else
2441		len = snprintf(buf, PAGE_SIZE, "online\n");
2442	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2443	return len;
2444}
2445
2446/**
2447 * ipr_store_adapter_state - Change adapter state
2448 * @class_dev:	class_device struct
2449 * @buf:		buffer
2450 * @count:		buffer size
2451 *
2452 * This function will change the adapter's state.
2453 *
2454 * Return value:
2455 * 	count on success / other on failure
2456 **/
2457static ssize_t ipr_store_adapter_state(struct class_device *class_dev,
2458				       const char *buf, size_t count)
2459{
2460	struct Scsi_Host *shost = class_to_shost(class_dev);
2461	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2462	unsigned long lock_flags;
2463	int result = count;
2464
2465	if (!capable(CAP_SYS_ADMIN))
2466		return -EACCES;
2467
2468	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2469	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
2470		ioa_cfg->ioa_is_dead = 0;
2471		ioa_cfg->reset_retries = 0;
2472		ioa_cfg->in_ioa_bringdown = 0;
2473		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2474	}
2475	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2476	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2477
2478	return result;
2479}
2480
2481static struct class_device_attribute ipr_ioa_state_attr = {
2482	.attr = {
2483		.name =		"state",
2484		.mode =		S_IRUGO | S_IWUSR,
2485	},
2486	.show = ipr_show_adapter_state,
2487	.store = ipr_store_adapter_state
2488};
2489
2490/**
2491 * ipr_store_reset_adapter - Reset the adapter
2492 * @class_dev:	class_device struct
2493 * @buf:		buffer
2494 * @count:		buffer size
2495 *
2496 * This function will reset the adapter.
2497 *
2498 * Return value:
2499 * 	count on success / other on failure
2500 **/
2501static ssize_t ipr_store_reset_adapter(struct class_device *class_dev,
2502				       const char *buf, size_t count)
2503{
2504	struct Scsi_Host *shost = class_to_shost(class_dev);
2505	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2506	unsigned long lock_flags;
2507	int result = count;
2508
2509	if (!capable(CAP_SYS_ADMIN))
2510		return -EACCES;
2511
2512	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2513	if (!ioa_cfg->in_reset_reload)
2514		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2515	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2516	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2517
2518	return result;
2519}
2520
2521static struct class_device_attribute ipr_ioa_reset_attr = {
2522	.attr = {
2523		.name =		"reset_host",
2524		.mode =		S_IWUSR,
2525	},
2526	.store = ipr_store_reset_adapter
2527};
2528
2529/**
2530 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
2531 * @buf_len:		buffer length
2532 *
2533 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
2534 * list to use for microcode download
2535 *
2536 * Return value:
2537 * 	pointer to sglist / NULL on failure
2538 **/
2539static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
2540{
2541	int sg_size, order, bsize_elem, num_elem, i, j;
2542	struct ipr_sglist *sglist;
2543	struct scatterlist *scatterlist;
2544	struct page *page;
2545
2546	/* Get the minimum size per scatter/gather element */
2547	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
2548
2549	/* Get the actual size per element */
2550	order = get_order(sg_size);
2551
2552	/* Determine the actual number of bytes per element */
2553	bsize_elem = PAGE_SIZE * (1 << order);
2554
2555	/* Determine the actual number of sg entries needed */
2556	if (buf_len % bsize_elem)
2557		num_elem = (buf_len / bsize_elem) + 1;
2558	else
2559		num_elem = buf_len / bsize_elem;
2560
2561	/* Allocate a scatter/gather list for the DMA */
2562	sglist = kzalloc(sizeof(struct ipr_sglist) +
2563			 (sizeof(struct scatterlist) * (num_elem - 1)),
2564			 GFP_KERNEL);
2565
2566	if (sglist == NULL) {
2567		ipr_trace;
2568		return NULL;
2569	}
2570
2571	scatterlist = sglist->scatterlist;
2572
2573	sglist->order = order;
2574	sglist->num_sg = num_elem;
2575
2576	/* Allocate a bunch of sg elements */
2577	for (i = 0; i < num_elem; i++) {
2578		page = alloc_pages(GFP_KERNEL, order);
2579		if (!page) {
2580			ipr_trace;
2581
2582			/* Free up what we already allocated */
2583			for (j = i - 1; j >= 0; j--)
2584				__free_pages(scatterlist[j].page, order);
2585			kfree(sglist);
2586			return NULL;
2587		}
2588
2589		scatterlist[i].page = page;
2590	}
2591
2592	return sglist;
2593}
2594
2595/**
2596 * ipr_free_ucode_buffer - Frees a microcode download buffer
2597 * @p_dnld:		scatter/gather list pointer
2598 *
2599 * Free a DMA'able ucode download buffer previously allocated with
2600 * ipr_alloc_ucode_buffer
2601 *
2602 * Return value:
2603 * 	nothing
2604 **/
2605static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
2606{
2607	int i;
2608
2609	for (i = 0; i < sglist->num_sg; i++)
2610		__free_pages(sglist->scatterlist[i].page, sglist->order);
2611
2612	kfree(sglist);
2613}
2614
2615/**
2616 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
2617 * @sglist:		scatter/gather list pointer
2618 * @buffer:		buffer pointer
2619 * @len:		buffer length
2620 *
2621 * Copy a microcode image from a user buffer into a buffer allocated by
2622 * ipr_alloc_ucode_buffer
2623 *
2624 * Return value:
2625 * 	0 on success / other on failure
2626 **/
2627static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
2628				 u8 *buffer, u32 len)
2629{
2630	int bsize_elem, i, result = 0;
2631	struct scatterlist *scatterlist;
2632	void *kaddr;
2633
2634	/* Determine the actual number of bytes per element */
2635	bsize_elem = PAGE_SIZE * (1 << sglist->order);
2636
2637	scatterlist = sglist->scatterlist;
2638
2639	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
2640		kaddr = kmap(scatterlist[i].page);
2641		memcpy(kaddr, buffer, bsize_elem);
2642		kunmap(scatterlist[i].page);
2643
2644		scatterlist[i].length = bsize_elem;
2645
2646		if (result != 0) {
2647			ipr_trace;
2648			return result;
2649		}
2650	}
2651
2652	if (len % bsize_elem) {
2653		kaddr = kmap(scatterlist[i].page);
2654		memcpy(kaddr, buffer, len % bsize_elem);
2655		kunmap(scatterlist[i].page);
2656
2657		scatterlist[i].length = len % bsize_elem;
2658	}
2659
2660	sglist->buffer_len = len;
2661	return result;
2662}
2663
2664/**
2665 * ipr_build_ucode_ioadl - Build a microcode download IOADL
2666 * @ipr_cmd:	ipr command struct
2667 * @sglist:		scatter/gather list
2668 *
2669 * Builds a microcode download IOA data list (IOADL).
2670 *
2671 **/
2672static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
2673				  struct ipr_sglist *sglist)
2674{
2675	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
2676	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
2677	struct scatterlist *scatterlist = sglist->scatterlist;
2678	int i;
2679
2680	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
2681	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
2682	ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
2683	ioarcb->write_ioadl_len =
2684		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
2685
2686	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
2687		ioadl[i].flags_and_data_len =
2688			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
2689		ioadl[i].address =
2690			cpu_to_be32(sg_dma_address(&scatterlist[i]));
2691	}
2692
2693	ioadl[i-1].flags_and_data_len |=
2694		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
2695}
2696
2697/**
2698 * ipr_update_ioa_ucode - Update IOA's microcode
2699 * @ioa_cfg:	ioa config struct
2700 * @sglist:		scatter/gather list
2701 *
2702 * Initiate an adapter reset to update the IOA's microcode
2703 *
2704 * Return value:
2705 * 	0 on success / -EIO on failure
2706 **/
2707static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
2708				struct ipr_sglist *sglist)
2709{
2710	unsigned long lock_flags;
2711
2712	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2713
2714	if (ioa_cfg->ucode_sglist) {
2715		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2716		dev_err(&ioa_cfg->pdev->dev,
2717			"Microcode download already in progress\n");
2718		return -EIO;
2719	}
2720
2721	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
2722					sglist->num_sg, DMA_TO_DEVICE);
2723
2724	if (!sglist->num_dma_sg) {
2725		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2726		dev_err(&ioa_cfg->pdev->dev,
2727			"Failed to map microcode download buffer!\n");
2728		return -EIO;
2729	}
2730
2731	ioa_cfg->ucode_sglist = sglist;
2732	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
2733	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2734	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2735
2736	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2737	ioa_cfg->ucode_sglist = NULL;
2738	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2739	return 0;
2740}
2741
2742/**
2743 * ipr_store_update_fw - Update the firmware on the adapter
2744 * @class_dev:	class_device struct
2745 * @buf:		buffer
2746 * @count:		buffer size
2747 *
2748 * This function will update the firmware on the adapter.
2749 *
2750 * Return value:
2751 * 	count on success / other on failure
2752 **/
2753static ssize_t ipr_store_update_fw(struct class_device *class_dev,
2754				       const char *buf, size_t count)
2755{
2756	struct Scsi_Host *shost = class_to_shost(class_dev);
2757	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2758	struct ipr_ucode_image_header *image_hdr;
2759	const struct firmware *fw_entry;
2760	struct ipr_sglist *sglist;
2761	char fname[100];
2762	char *src;
2763	int len, result, dnld_size;
2764
2765	if (!capable(CAP_SYS_ADMIN))
2766		return -EACCES;
2767
2768	len = snprintf(fname, 99, "%s", buf);
2769	fname[len-1] = '\0';
2770
2771	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
2772		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
2773		return -EIO;
2774	}
2775
2776	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
2777
2778	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
2779	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
2780	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
2781		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
2782		release_firmware(fw_entry);
2783		return -EINVAL;
2784	}
2785
2786	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
2787	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
2788	sglist = ipr_alloc_ucode_buffer(dnld_size);
2789
2790	if (!sglist) {
2791		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
2792		release_firmware(fw_entry);
2793		return -ENOMEM;
2794	}
2795
2796	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
2797
2798	if (result) {
2799		dev_err(&ioa_cfg->pdev->dev,
2800			"Microcode buffer copy to DMA buffer failed\n");
2801		goto out;
2802	}
2803
2804	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
2805
2806	if (!result)
2807		result = count;
2808out:
2809	ipr_free_ucode_buffer(sglist);
2810	release_firmware(fw_entry);
2811	return result;
2812}
2813
2814static struct class_device_attribute ipr_update_fw_attr = {
2815	.attr = {
2816		.name =		"update_fw",
2817		.mode =		S_IWUSR,
2818	},
2819	.store = ipr_store_update_fw
2820};
2821
2822static struct class_device_attribute *ipr_ioa_attrs[] = {
2823	&ipr_fw_version_attr,
2824	&ipr_log_level_attr,
2825	&ipr_diagnostics_attr,
2826	&ipr_ioa_state_attr,
2827	&ipr_ioa_reset_attr,
2828	&ipr_update_fw_attr,
2829	&ipr_ioa_cache_attr,
2830	NULL,
2831};
2832
2833#ifdef CONFIG_SCSI_IPR_DUMP
2834/**
2835 * ipr_read_dump - Dump the adapter
2836 * @kobj:		kobject struct
2837 * @buf:		buffer
2838 * @off:		offset
2839 * @count:		buffer size
2840 *
2841 * Return value:
2842 *	number of bytes printed to buffer
2843 **/
2844static ssize_t ipr_read_dump(struct kobject *kobj, char *buf,
2845			      loff_t off, size_t count)
2846{
2847	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
2848	struct Scsi_Host *shost = class_to_shost(cdev);
2849	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
2850	struct ipr_dump *dump;
2851	unsigned long lock_flags = 0;
2852	char *src;
2853	int len;
2854	size_t rc = count;
2855
2856	if (!capable(CAP_SYS_ADMIN))
2857		return -EACCES;
2858
2859	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2860	dump = ioa_cfg->dump;
2861
2862	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
2863		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2864		return 0;
2865	}
2866	kref_get(&dump->kref);
2867	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2868
2869	if (off > dump->driver_dump.hdr.len) {
2870		kref_put(&dump->kref, ipr_release_dump);
2871		return 0;
2872	}
2873
2874	if (off + count > dump->driver_dump.hdr.len) {
2875		count = dump->driver_dump.hdr.len - off;
2876		rc = count;
2877	}
2878
2879	if (count && off < sizeof(dump->driver_dump)) {
2880		if (off + count > sizeof(dump->driver_dump))
2881			len = sizeof(dump->driver_dump) - off;
2882		else
2883			len = count;
2884		src = (u8 *)&dump->driver_dump + off;
2885		memcpy(buf, src, len);
2886		buf += len;
2887		off += len;
2888		count -= len;
2889	}
2890
2891	off -= sizeof(dump->driver_dump);
2892
2893	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
2894		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
2895			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
2896		else
2897			len = count;
2898		src = (u8 *)&dump->ioa_dump + off;
2899		memcpy(buf, src, len);
2900		buf += len;
2901		off += len;
2902		count -= len;
2903	}
2904
2905	off -= offsetof(struct ipr_ioa_dump, ioa_data);
2906
2907	while (count) {
2908		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
2909			len = PAGE_ALIGN(off) - off;
2910		else
2911			len = count;
2912		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
2913		src += off & ~PAGE_MASK;
2914		memcpy(buf, src, len);
2915		buf += len;
2916		off += len;
2917		count -= len;
2918	}
2919
2920	kref_put(&dump->kref, ipr_release_dump);
2921	return rc;
2922}
2923
2924/**
2925 * ipr_alloc_dump - Prepare for adapter dump
2926 * @ioa_cfg:	ioa config struct
2927 *
2928 * Return value:
2929 *	0 on success / other on failure
2930 **/
2931static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
2932{
2933	struct ipr_dump *dump;
2934	unsigned long lock_flags = 0;
2935
2936	ENTER;
2937	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
2938
2939	if (!dump) {
2940		ipr_err("Dump memory allocation failed\n");
2941		return -ENOMEM;
2942	}
2943
2944	kref_init(&dump->kref);
2945	dump->ioa_cfg = ioa_cfg;
2946
2947	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2948
2949	if (INACTIVE != ioa_cfg->sdt_state) {
2950		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2951		kfree(dump);
2952		return 0;
2953	}
2954
2955	ioa_cfg->dump = dump;
2956	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
2957	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
2958		ioa_cfg->dump_taken = 1;
2959		schedule_work(&ioa_cfg->work_q);
2960	}
2961	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2962
2963	LEAVE;
2964	return 0;
2965}
2966
2967/**
2968 * ipr_free_dump - Free adapter dump memory
2969 * @ioa_cfg:	ioa config struct
2970 *
2971 * Return value:
2972 *	0 on success / other on failure
2973 **/
2974static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
2975{
2976	struct ipr_dump *dump;
2977	unsigned long lock_flags = 0;
2978
2979	ENTER;
2980
2981	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2982	dump = ioa_cfg->dump;
2983	if (!dump) {
2984		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2985		return 0;
2986	}
2987
2988	ioa_cfg->dump = NULL;
2989	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2990
2991	kref_put(&dump->kref, ipr_release_dump);
2992
2993	LEAVE;
2994	return 0;
2995}
2996
2997/**
2998 * ipr_write_dump - Setup dump state of adapter
2999 * @kobj:		kobject struct
3000 * @buf:		buffer
3001 * @off:		offset
3002 * @count:		buffer size
3003 *
3004 * Return value:
3005 *	number of bytes printed to buffer
3006 **/
3007static ssize_t ipr_write_dump(struct kobject *kobj, char *buf,
3008			      loff_t off, size_t count)
3009{
3010	struct class_device *cdev = container_of(kobj,struct class_device,kobj);
3011	struct Scsi_Host *shost = class_to_shost(cdev);
3012	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3013	int rc;
3014
3015	if (!capable(CAP_SYS_ADMIN))
3016		return -EACCES;
3017
3018	if (buf[0] == '1')
3019		rc = ipr_alloc_dump(ioa_cfg);
3020	else if (buf[0] == '0')
3021		rc = ipr_free_dump(ioa_cfg);
3022	else
3023		return -EINVAL;
3024
3025	if (rc)
3026		return rc;
3027	else
3028		return count;
3029}
3030
3031static struct bin_attribute ipr_dump_attr = {
3032	.attr =	{
3033		.name = "dump",
3034		.mode = S_IRUSR | S_IWUSR,
3035	},
3036	.size = 0,
3037	.read = ipr_read_dump,
3038	.write = ipr_write_dump
3039};
3040#else
3041static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3042#endif
3043
3044/**
3045 * ipr_change_queue_depth - Change the device's queue depth
3046 * @sdev:	scsi device struct
3047 * @qdepth:	depth to set
3048 *
3049 * Return value:
3050 * 	actual depth set
3051 **/
3052static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth)
3053{
3054	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
3055	return sdev->queue_depth;
3056}
3057
3058/**
3059 * ipr_change_queue_type - Change the device's queue type
3060 * @dsev:		scsi device struct
3061 * @tag_type:	type of tags to use
3062 *
3063 * Return value:
3064 * 	actual queue type set
3065 **/
3066static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
3067{
3068	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3069	struct ipr_resource_entry *res;
3070	unsigned long lock_flags = 0;
3071
3072	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3073	res = (struct ipr_resource_entry *)sdev->hostdata;
3074
3075	if (res) {
3076		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
3077			/*
3078			 * We don't bother quiescing the device here since the
3079			 * adapter firmware does it for us.
3080			 */
3081			scsi_set_tag_type(sdev, tag_type);
3082
3083			if (tag_type)
3084				scsi_activate_tcq(sdev, sdev->queue_depth);
3085			else
3086				scsi_deactivate_tcq(sdev, sdev->queue_depth);
3087		} else
3088			tag_type = 0;
3089	} else
3090		tag_type = 0;
3091
3092	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3093	return tag_type;
3094}
3095
3096/**
3097 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
3098 * @dev:	device struct
3099 * @buf:	buffer
3100 *
3101 * Return value:
3102 * 	number of bytes printed to buffer
3103 **/
3104static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
3105{
3106	struct scsi_device *sdev = to_scsi_device(dev);
3107	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3108	struct ipr_resource_entry *res;
3109	unsigned long lock_flags = 0;
3110	ssize_t len = -ENXIO;
3111
3112	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3113	res = (struct ipr_resource_entry *)sdev->hostdata;
3114	if (res)
3115		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
3116	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3117	return len;
3118}
3119
3120static struct device_attribute ipr_adapter_handle_attr = {
3121	.attr = {
3122		.name = 	"adapter_handle",
3123		.mode =		S_IRUSR,
3124	},
3125	.show = ipr_show_adapter_handle
3126};
3127
3128static struct device_attribute *ipr_dev_attrs[] = {
3129	&ipr_adapter_handle_attr,
3130	NULL,
3131};
3132
3133/**
3134 * ipr_biosparam - Return the HSC mapping
3135 * @sdev:			scsi device struct
3136 * @block_device:	block device pointer
3137 * @capacity:		capacity of the device
3138 * @parm:			Array containing returned HSC values.
3139 *
3140 * This function generates the HSC parms that fdisk uses.
3141 * We want to make sure we return something that places partitions
3142 * on 4k boundaries for best performance with the IOA.
3143 *
3144 * Return value:
3145 * 	0 on success
3146 **/
3147static int ipr_biosparam(struct scsi_device *sdev,
3148			 struct block_device *block_device,
3149			 sector_t capacity, int *parm)
3150{
3151	int heads, sectors;
3152	sector_t cylinders;
3153
3154	heads = 128;
3155	sectors = 32;
3156
3157	cylinders = capacity;
3158	sector_div(cylinders, (128 * 32));
3159
3160	/* return result */
3161	parm[0] = heads;
3162	parm[1] = sectors;
3163	parm[2] = cylinders;
3164
3165	return 0;
3166}
3167
3168/**
3169 * ipr_slave_destroy - Unconfigure a SCSI device
3170 * @sdev:	scsi device struct
3171 *
3172 * Return value:
3173 * 	nothing
3174 **/
3175static void ipr_slave_destroy(struct scsi_device *sdev)
3176{
3177	struct ipr_resource_entry *res;
3178	struct ipr_ioa_cfg *ioa_cfg;
3179	unsigned long lock_flags = 0;
3180
3181	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3182
3183	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3184	res = (struct ipr_resource_entry *) sdev->hostdata;
3185	if (res) {
3186		sdev->hostdata = NULL;
3187		res->sdev = NULL;
3188	}
3189	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3190}
3191
3192/**
3193 * ipr_slave_configure - Configure a SCSI device
3194 * @sdev:	scsi device struct
3195 *
3196 * This function configures the specified scsi device.
3197 *
3198 * Return value:
3199 * 	0 on success
3200 **/
3201static int ipr_slave_configure(struct scsi_device *sdev)
3202{
3203	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3204	struct ipr_resource_entry *res;
3205	unsigned long lock_flags = 0;
3206
3207	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3208	res = sdev->hostdata;
3209	if (res) {
3210		if (ipr_is_af_dasd_device(res))
3211			sdev->type = TYPE_RAID;
3212		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
3213			sdev->scsi_level = 4;
3214			sdev->no_uld_attach = 1;
3215		}
3216		if (ipr_is_vset_device(res)) {
3217			sdev->timeout = IPR_VSET_RW_TIMEOUT;
3218			blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
3219		}
3220		if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
3221			sdev->allow_restart = 1;
3222		scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
3223	}
3224	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3225	return 0;
3226}
3227
3228/**
3229 * ipr_slave_alloc - Prepare for commands to a device.
3230 * @sdev:	scsi device struct
3231 *
3232 * This function saves a pointer to the resource entry
3233 * in the scsi device struct if the device exists. We
3234 * can then use this pointer in ipr_queuecommand when
3235 * handling new commands.
3236 *
3237 * Return value:
3238 * 	0 on success / -ENXIO if device does not exist
3239 **/
3240static int ipr_slave_alloc(struct scsi_device *sdev)
3241{
3242	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
3243	struct ipr_resource_entry *res;
3244	unsigned long lock_flags;
3245	int rc = -ENXIO;
3246
3247	sdev->hostdata = NULL;
3248
3249	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3250
3251	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3252		if ((res->cfgte.res_addr.bus == sdev->channel) &&
3253		    (res->cfgte.res_addr.target == sdev->id) &&
3254		    (res->cfgte.res_addr.lun == sdev->lun)) {
3255			res->sdev = sdev;
3256			res->add_to_ml = 0;
3257			res->in_erp = 0;
3258			sdev->hostdata = res;
3259			if (!ipr_is_naca_model(res))
3260				res->needs_sync_complete = 1;
3261			rc = 0;
3262			break;
3263		}
3264	}
3265
3266	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3267
3268	return rc;
3269}
3270
3271/**
3272 * ipr_eh_host_reset - Reset the host adapter
3273 * @scsi_cmd:	scsi command struct
3274 *
3275 * Return value:
3276 * 	SUCCESS / FAILED
3277 **/
3278static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
3279{
3280	struct ipr_ioa_cfg *ioa_cfg;
3281	int rc;
3282
3283	ENTER;
3284	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3285
3286	dev_err(&ioa_cfg->pdev->dev,
3287		"Adapter being reset as a result of error recovery.\n");
3288
3289	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3290		ioa_cfg->sdt_state = GET_DUMP;
3291
3292	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
3293
3294	LEAVE;
3295	return rc;
3296}
3297
3298static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
3299{
3300	int rc;
3301
3302	spin_lock_irq(cmd->device->host->host_lock);
3303	rc = __ipr_eh_host_reset(cmd);
3304	spin_unlock_irq(cmd->device->host->host_lock);
3305
3306	return rc;
3307}
3308
3309/**
3310 * ipr_device_reset - Reset the device
3311 * @ioa_cfg:	ioa config struct
3312 * @res:		resource entry struct
3313 *
3314 * This function issues a device reset to the affected device.
3315 * If the device is a SCSI device, a LUN reset will be sent
3316 * to the device first. If that does not work, a target reset
3317 * will be sent.
3318 *
3319 * Return value:
3320 *	0 on success / non-zero on failure
3321 **/
3322static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
3323			    struct ipr_resource_entry *res)
3324{
3325	struct ipr_cmnd *ipr_cmd;
3326	struct ipr_ioarcb *ioarcb;
3327	struct ipr_cmd_pkt *cmd_pkt;
3328	u32 ioasc;
3329
3330	ENTER;
3331	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3332	ioarcb = &ipr_cmd->ioarcb;
3333	cmd_pkt = &ioarcb->cmd_pkt;
3334
3335	ioarcb->res_handle = res->cfgte.res_handle;
3336	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3337	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3338
3339	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3340	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3341	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3342
3343	LEAVE;
3344	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
3345}
3346
3347/**
3348 * ipr_eh_dev_reset - Reset the device
3349 * @scsi_cmd:	scsi command struct
3350 *
3351 * This function issues a device reset to the affected device.
3352 * A LUN reset will be sent to the device first. If that does
3353 * not work, a target reset will be sent.
3354 *
3355 * Return value:
3356 *	SUCCESS / FAILED
3357 **/
3358static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
3359{
3360	struct ipr_cmnd *ipr_cmd;
3361	struct ipr_ioa_cfg *ioa_cfg;
3362	struct ipr_resource_entry *res;
3363	int rc;
3364
3365	ENTER;
3366	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
3367	res = scsi_cmd->device->hostdata;
3368
3369	if (!res)
3370		return FAILED;
3371
3372	/*
3373	 * If we are currently going through reset/reload, return failed. This will force the
3374	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
3375	 * reset to complete
3376	 */
3377	if (ioa_cfg->in_reset_reload)
3378		return FAILED;
3379	if (ioa_cfg->ioa_is_dead)
3380		return FAILED;
3381
3382	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3383		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
3384			if (ipr_cmd->scsi_cmd)
3385				ipr_cmd->done = ipr_scsi_eh_done;
3386		}
3387	}
3388
3389	res->resetting_device = 1;
3390	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
3391	rc = ipr_device_reset(ioa_cfg, res);
3392	res->resetting_device = 0;
3393
3394	LEAVE;
3395	return (rc ? FAILED : SUCCESS);
3396}
3397
3398static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
3399{
3400	int rc;
3401
3402	spin_lock_irq(cmd->device->host->host_lock);
3403	rc = __ipr_eh_dev_reset(cmd);
3404	spin_unlock_irq(cmd->device->host->host_lock);
3405
3406	return rc;
3407}
3408
3409/**
3410 * ipr_bus_reset_done - Op done function for bus reset.
3411 * @ipr_cmd:	ipr command struct
3412 *
3413 * This function is the op done function for a bus reset
3414 *
3415 * Return value:
3416 * 	none
3417 **/
3418static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
3419{
3420	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3421	struct ipr_resource_entry *res;
3422
3423	ENTER;
3424	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3425		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
3426			    sizeof(res->cfgte.res_handle))) {
3427			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
3428			break;
3429		}
3430	}
3431
3432	/*
3433	 * If abort has not completed, indicate the reset has, else call the
3434	 * abort's done function to wake the sleeping eh thread
3435	 */
3436	if (ipr_cmd->sibling->sibling)
3437		ipr_cmd->sibling->sibling = NULL;
3438	else
3439		ipr_cmd->sibling->done(ipr_cmd->sibling);
3440
3441	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3442	LEAVE;
3443}
3444
3445/**
3446 * ipr_abort_timeout - An abort task has timed out
3447 * @ipr_cmd:	ipr command struct
3448 *
3449 * This function handles when an abort task times out. If this
3450 * happens we issue a bus reset since we have resources tied
3451 * up that must be freed before returning to the midlayer.
3452 *
3453 * Return value:
3454 *	none
3455 **/
3456static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
3457{
3458	struct ipr_cmnd *reset_cmd;
3459	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3460	struct ipr_cmd_pkt *cmd_pkt;
3461	unsigned long lock_flags = 0;
3462
3463	ENTER;
3464	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3465	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
3466		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3467		return;
3468	}
3469
3470	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
3471	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3472	ipr_cmd->sibling = reset_cmd;
3473	reset_cmd->sibling = ipr_cmd;
3474	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
3475	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
3476	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3477	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
3478	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
3479
3480	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
3481	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3482	LEAVE;
3483}
3484
3485/**
3486 * ipr_cancel_op - Cancel specified op
3487 * @scsi_cmd:	scsi command struct
3488 *
3489 * This function cancels specified op.
3490 *
3491 * Return value:
3492 *	SUCCESS / FAILED
3493 **/
3494static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
3495{
3496	struct ipr_cmnd *ipr_cmd;
3497	struct ipr_ioa_cfg *ioa_cfg;
3498	struct ipr_resource_entry *res;
3499	struct ipr_cmd_pkt *cmd_pkt;
3500	u32 ioasc;
3501	int op_found = 0;
3502
3503	ENTER;
3504	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
3505	res = scsi_cmd->device->hostdata;
3506
3507	/* If we are currently going through reset/reload, return failed.
3508	 * This will force the mid-layer to call ipr_eh_host_reset,
3509	 * which will then go to sleep and wait for the reset to complete
3510	 */
3511	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
3512		return FAILED;
3513	if (!res || (!ipr_is_gscsi(res) && !ipr_is_vset_device(res)))
3514		return FAILED;
3515
3516	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
3517		if (ipr_cmd->scsi_cmd == scsi_cmd) {
3518			ipr_cmd->done = ipr_scsi_eh_done;
3519			op_found = 1;
3520			break;
3521		}
3522	}
3523
3524	if (!op_found)
3525		return SUCCESS;
3526
3527	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
3528	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
3529	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3530	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3531	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3532	ipr_cmd->u.sdev = scsi_cmd->device;
3533
3534	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
3535		    scsi_cmd->cmnd[0]);
3536	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
3537	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3538
3539	/*
3540	 * If the abort task timed out and we sent a bus reset, we will get
3541	 * one the following responses to the abort
3542	 */
3543	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
3544		ioasc = 0;
3545		ipr_trace;
3546	}
3547
3548	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3549	if (!ipr_is_naca_model(res))
3550		res->needs_sync_complete = 1;
3551
3552	LEAVE;
3553	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
3554}
3555
3556/**
3557 * ipr_eh_abort - Abort a single op
3558 * @scsi_cmd:	scsi command struct
3559 *
3560 * Return value:
3561 * 	SUCCESS / FAILED
3562 **/
3563static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
3564{
3565	unsigned long flags;
3566	int rc;
3567
3568	ENTER;
3569
3570	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
3571	rc = ipr_cancel_op(scsi_cmd);
3572	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
3573
3574	LEAVE;
3575	return rc;
3576}
3577
3578/**
3579 * ipr_handle_other_interrupt - Handle "other" interrupts
3580 * @ioa_cfg:	ioa config struct
3581 * @int_reg:	interrupt register
3582 *
3583 * Return value:
3584 * 	IRQ_NONE / IRQ_HANDLED
3585 **/
3586static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
3587					      volatile u32 int_reg)
3588{
3589	irqreturn_t rc = IRQ_HANDLED;
3590
3591	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
3592		/* Mask the interrupt */
3593		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
3594
3595		/* Clear the interrupt */
3596		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
3597		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
3598
3599		list_del(&ioa_cfg->reset_cmd->queue);
3600		del_timer(&ioa_cfg->reset_cmd->timer);
3601		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
3602	} else {
3603		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
3604			ioa_cfg->ioa_unit_checked = 1;
3605		else
3606			dev_err(&ioa_cfg->pdev->dev,
3607				"Permanent IOA failure. 0x%08X\n", int_reg);
3608
3609		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3610			ioa_cfg->sdt_state = GET_DUMP;
3611
3612		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
3613		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3614	}
3615
3616	return rc;
3617}
3618
3619/**
3620 * ipr_isr - Interrupt service routine
3621 * @irq:	irq number
3622 * @devp:	pointer to ioa config struct
3623 * @regs:	pt_regs struct
3624 *
3625 * Return value:
3626 * 	IRQ_NONE / IRQ_HANDLED
3627 **/
3628static irqreturn_t ipr_isr(int irq, void *devp, struct pt_regs *regs)
3629{
3630	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
3631	unsigned long lock_flags = 0;
3632	volatile u32 int_reg, int_mask_reg;
3633	u32 ioasc;
3634	u16 cmd_index;
3635	struct ipr_cmnd *ipr_cmd;
3636	irqreturn_t rc = IRQ_NONE;
3637
3638	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3639
3640	/* If interrupts are disabled, ignore the interrupt */
3641	if (!ioa_cfg->allow_interrupts) {
3642		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3643		return IRQ_NONE;
3644	}
3645
3646	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
3647	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3648
3649	/* If an interrupt on the adapter did not occur, ignore it */
3650	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
3651		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3652		return IRQ_NONE;
3653	}
3654
3655	while (1) {
3656		ipr_cmd = NULL;
3657
3658		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
3659		       ioa_cfg->toggle_bit) {
3660
3661			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
3662				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
3663
3664			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
3665				ioa_cfg->errors_logged++;
3666				dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
3667
3668				if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
3669					ioa_cfg->sdt_state = GET_DUMP;
3670
3671				ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3672				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3673				return IRQ_HANDLED;
3674			}
3675
3676			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
3677
3678			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3679
3680			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
3681
3682			list_del(&ipr_cmd->queue);
3683			del_timer(&ipr_cmd->timer);
3684			ipr_cmd->done(ipr_cmd);
3685
3686			rc = IRQ_HANDLED;
3687
3688			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
3689				ioa_cfg->hrrq_curr++;
3690			} else {
3691				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
3692				ioa_cfg->toggle_bit ^= 1u;
3693			}
3694		}
3695
3696		if (ipr_cmd != NULL) {
3697			/* Clear the PCI interrupt */
3698			writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
3699			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
3700		} else
3701			break;
3702	}
3703
3704	if (unlikely(rc == IRQ_NONE))
3705		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
3706
3707	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3708	return rc;
3709}
3710
3711/**
3712 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
3713 * @ioa_cfg:	ioa config struct
3714 * @ipr_cmd:	ipr command struct
3715 *
3716 * Return value:
3717 * 	0 on success / -1 on failure
3718 **/
3719static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
3720			   struct ipr_cmnd *ipr_cmd)
3721{
3722	int i;
3723	struct scatterlist *sglist;
3724	u32 length;
3725	u32 ioadl_flags = 0;
3726	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3727	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3728	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
3729
3730	length = scsi_cmd->request_bufflen;
3731
3732	if (length == 0)
3733		return 0;
3734
3735	if (scsi_cmd->use_sg) {
3736		ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev,
3737						 scsi_cmd->request_buffer,
3738						 scsi_cmd->use_sg,
3739						 scsi_cmd->sc_data_direction);
3740
3741		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3742			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3743			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3744			ioarcb->write_data_transfer_length = cpu_to_be32(length);
3745			ioarcb->write_ioadl_len =
3746				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3747		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3748			ioadl_flags = IPR_IOADL_FLAGS_READ;
3749			ioarcb->read_data_transfer_length = cpu_to_be32(length);
3750			ioarcb->read_ioadl_len =
3751				cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3752		}
3753
3754		sglist = scsi_cmd->request_buffer;
3755
3756		for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3757			ioadl[i].flags_and_data_len =
3758				cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i]));
3759			ioadl[i].address =
3760				cpu_to_be32(sg_dma_address(&sglist[i]));
3761		}
3762
3763		if (likely(ipr_cmd->dma_use_sg)) {
3764			ioadl[i-1].flags_and_data_len |=
3765				cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3766			return 0;
3767		} else
3768			dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
3769	} else {
3770		if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
3771			ioadl_flags = IPR_IOADL_FLAGS_WRITE;
3772			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3773			ioarcb->write_data_transfer_length = cpu_to_be32(length);
3774			ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3775		} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
3776			ioadl_flags = IPR_IOADL_FLAGS_READ;
3777			ioarcb->read_data_transfer_length = cpu_to_be32(length);
3778			ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3779		}
3780
3781		ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev,
3782						     scsi_cmd->request_buffer, length,
3783						     scsi_cmd->sc_data_direction);
3784
3785		if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
3786			ipr_cmd->dma_use_sg = 1;
3787			ioadl[0].flags_and_data_len =
3788				cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
3789			ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
3790			return 0;
3791		} else
3792			dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
3793	}
3794
3795	return -1;
3796}
3797
3798/**
3799 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
3800 * @scsi_cmd:	scsi command struct
3801 *
3802 * Return value:
3803 * 	task attributes
3804 **/
3805static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
3806{
3807	u8 tag[2];
3808	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
3809
3810	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
3811		switch (tag[0]) {
3812		case MSG_SIMPLE_TAG:
3813			rc = IPR_FLAGS_LO_SIMPLE_TASK;
3814			break;
3815		case MSG_HEAD_TAG:
3816			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
3817			break;
3818		case MSG_ORDERED_TAG:
3819			rc = IPR_FLAGS_LO_ORDERED_TASK;
3820			break;
3821		};
3822	}
3823
3824	return rc;
3825}
3826
3827/**
3828 * ipr_erp_done - Process completion of ERP for a device
3829 * @ipr_cmd:		ipr command struct
3830 *
3831 * This function copies the sense buffer into the scsi_cmd
3832 * struct and pushes the scsi_done function.
3833 *
3834 * Return value:
3835 * 	nothing
3836 **/
3837static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
3838{
3839	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3840	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3841	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
3842	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3843
3844	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3845		scsi_cmd->result |= (DID_ERROR << 16);
3846		scmd_printk(KERN_ERR, scsi_cmd,
3847			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
3848	} else {
3849		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
3850		       SCSI_SENSE_BUFFERSIZE);
3851	}
3852
3853	if (res) {
3854		if (!ipr_is_naca_model(res))
3855			res->needs_sync_complete = 1;
3856		res->in_erp = 0;
3857	}
3858	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
3859	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
3860	scsi_cmd->scsi_done(scsi_cmd);
3861}
3862
3863/**
3864 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
3865 * @ipr_cmd:	ipr command struct
3866 *
3867 * Return value:
3868 * 	none
3869 **/
3870static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
3871{
3872	struct ipr_ioarcb *ioarcb;
3873	struct ipr_ioasa *ioasa;
3874
3875	ioarcb = &ipr_cmd->ioarcb;
3876	ioasa = &ipr_cmd->ioasa;
3877
3878	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
3879	ioarcb->write_data_transfer_length = 0;
3880	ioarcb->read_data_transfer_length = 0;
3881	ioarcb->write_ioadl_len = 0;
3882	ioarcb->read_ioadl_len = 0;
3883	ioasa->ioasc = 0;
3884	ioasa->residual_data_len = 0;
3885}
3886
3887/**
3888 * ipr_erp_request_sense - Send request sense to a device
3889 * @ipr_cmd:	ipr command struct
3890 *
3891 * This function sends a request sense to a device as a result
3892 * of a check condition.
3893 *
3894 * Return value:
3895 * 	nothing
3896 **/
3897static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
3898{
3899	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3900	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
3901
3902	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
3903		ipr_erp_done(ipr_cmd);
3904		return;
3905	}
3906
3907	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3908
3909	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
3910	cmd_pkt->cdb[0] = REQUEST_SENSE;
3911	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
3912	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
3913	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
3914	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
3915
3916	ipr_cmd->ioadl[0].flags_and_data_len =
3917		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
3918	ipr_cmd->ioadl[0].address =
3919		cpu_to_be32(ipr_cmd->sense_buffer_dma);
3920
3921	ipr_cmd->ioarcb.read_ioadl_len =
3922		cpu_to_be32(sizeof(struct ipr_ioadl_desc));
3923	ipr_cmd->ioarcb.read_data_transfer_length =
3924		cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
3925
3926	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
3927		   IPR_REQUEST_SENSE_TIMEOUT * 2);
3928}
3929
3930/**
3931 * ipr_erp_cancel_all - Send cancel all to a device
3932 * @ipr_cmd:	ipr command struct
3933 *
3934 * This function sends a cancel all to a device to clear the
3935 * queue. If we are running TCQ on the device, QERR is set to 1,
3936 * which means all outstanding ops have been dropped on the floor.
3937 * Cancel all will return them to us.
3938 *
3939 * Return value:
3940 * 	nothing
3941 **/
3942static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
3943{
3944	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
3945	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
3946	struct ipr_cmd_pkt *cmd_pkt;
3947
3948	res->in_erp = 1;
3949
3950	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
3951
3952	if (!scsi_get_tag_type(scsi_cmd->device)) {
3953		ipr_erp_request_sense(ipr_cmd);
3954		return;
3955	}
3956
3957	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
3958	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
3959	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
3960
3961	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
3962		   IPR_CANCEL_ALL_TIMEOUT);
3963}
3964
3965/**
3966 * ipr_dump_ioasa - Dump contents of IOASA
3967 * @ioa_cfg:	ioa config struct
3968 * @ipr_cmd:	ipr command struct
3969 * @res:		resource entry struct
3970 *
3971 * This function is invoked by the interrupt handler when ops
3972 * fail. It will log the IOASA if appropriate. Only called
3973 * for GPDD ops.
3974 *
3975 * Return value:
3976 * 	none
3977 **/
3978static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
3979			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
3980{
3981	int i;
3982	u16 data_len;
3983	u32 ioasc;
3984	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
3985	__be32 *ioasa_data = (__be32 *)ioasa;
3986	int error_index;
3987
3988	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
3989
3990	if (0 == ioasc)
3991		return;
3992
3993	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
3994		return;
3995
3996	error_index = ipr_get_error(ioasc);
3997
3998	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
3999		/* Don't log an error if the IOA already logged one */
4000		if (ioasa->ilid != 0)
4001			return;
4002
4003		if (ipr_error_table[error_index].log_ioasa == 0)
4004			return;
4005	}
4006
4007	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
4008
4009	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
4010		data_len = sizeof(struct ipr_ioasa);
4011	else
4012		data_len = be16_to_cpu(ioasa->ret_stat_len);
4013
4014	ipr_err("IOASA Dump:\n");
4015
4016	for (i = 0; i < data_len / 4; i += 4) {
4017		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
4018			be32_to_cpu(ioasa_data[i]),
4019			be32_to_cpu(ioasa_data[i+1]),
4020			be32_to_cpu(ioasa_data[i+2]),
4021			be32_to_cpu(ioasa_data[i+3]));
4022	}
4023}
4024
4025/**
4026 * ipr_gen_sense - Generate SCSI sense data from an IOASA
4027 * @ioasa:		IOASA
4028 * @sense_buf:	sense data buffer
4029 *
4030 * Return value:
4031 * 	none
4032 **/
4033static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
4034{
4035	u32 failing_lba;
4036	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
4037	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
4038	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4039	u32 ioasc = be32_to_cpu(ioasa->ioasc);
4040
4041	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
4042
4043	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
4044		return;
4045
4046	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
4047
4048	if (ipr_is_vset_device(res) &&
4049	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
4050	    ioasa->u.vset.failing_lba_hi != 0) {
4051		sense_buf[0] = 0x72;
4052		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
4053		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
4054		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
4055
4056		sense_buf[7] = 12;
4057		sense_buf[8] = 0;
4058		sense_buf[9] = 0x0A;
4059		sense_buf[10] = 0x80;
4060
4061		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
4062
4063		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
4064		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
4065		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
4066		sense_buf[15] = failing_lba & 0x000000ff;
4067
4068		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4069
4070		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
4071		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
4072		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
4073		sense_buf[19] = failing_lba & 0x000000ff;
4074	} else {
4075		sense_buf[0] = 0x70;
4076		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
4077		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
4078		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
4079
4080		/* Illegal request */
4081		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
4082		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
4083			sense_buf[7] = 10;	/* additional length */
4084
4085			/* IOARCB was in error */
4086			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
4087				sense_buf[15] = 0xC0;
4088			else	/* Parameter data was invalid */
4089				sense_buf[15] = 0x80;
4090
4091			sense_buf[16] =
4092			    ((IPR_FIELD_POINTER_MASK &
4093			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
4094			sense_buf[17] =
4095			    (IPR_FIELD_POINTER_MASK &
4096			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
4097		} else {
4098			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
4099				if (ipr_is_vset_device(res))
4100					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
4101				else
4102					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
4103
4104				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
4105				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
4106				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
4107				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
4108				sense_buf[6] = failing_lba & 0x000000ff;
4109			}
4110
4111			sense_buf[7] = 6;	/* additional length */
4112		}
4113	}
4114}
4115
4116/**
4117 * ipr_get_autosense - Copy autosense data to sense buffer
4118 * @ipr_cmd:	ipr command struct
4119 *
4120 * This function copies the autosense buffer to the buffer
4121 * in the scsi_cmd, if there is autosense available.
4122 *
4123 * Return value:
4124 *	1 if autosense was available / 0 if not
4125 **/
4126static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
4127{
4128	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
4129
4130	if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
4131		return 0;
4132
4133	memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
4134	       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
4135		   SCSI_SENSE_BUFFERSIZE));
4136	return 1;
4137}
4138
4139/**
4140 * ipr_erp_start - Process an error response for a SCSI op
4141 * @ioa_cfg:	ioa config struct
4142 * @ipr_cmd:	ipr command struct
4143 *
4144 * This function determines whether or not to initiate ERP
4145 * on the affected device.
4146 *
4147 * Return value:
4148 * 	nothing
4149 **/
4150static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4151			      struct ipr_cmnd *ipr_cmd)
4152{
4153	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4154	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
4155	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4156
4157	if (!res) {
4158		ipr_scsi_eh_done(ipr_cmd);
4159		return;
4160	}
4161
4162	if (ipr_is_gscsi(res))
4163		ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
4164	else
4165		ipr_gen_sense(ipr_cmd);
4166
4167	switch (ioasc & IPR_IOASC_IOASC_MASK) {
4168	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
4169		if (ipr_is_naca_model(res))
4170			scsi_cmd->result |= (DID_ABORT << 16);
4171		else
4172			scsi_cmd->result |= (DID_IMM_RETRY << 16);
4173		break;
4174	case IPR_IOASC_IR_RESOURCE_HANDLE:
4175	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
4176		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4177		break;
4178	case IPR_IOASC_HW_SEL_TIMEOUT:
4179		scsi_cmd->result |= (DID_NO_CONNECT << 16);
4180		if (!ipr_is_naca_model(res))
4181			res->needs_sync_complete = 1;
4182		break;
4183	case IPR_IOASC_SYNC_REQUIRED:
4184		if (!res->in_erp)
4185			res->needs_sync_complete = 1;
4186		scsi_cmd->result |= (DID_IMM_RETRY << 16);
4187		break;
4188	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
4189	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
4190		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
4191		break;
4192	case IPR_IOASC_BUS_WAS_RESET:
4193	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
4194		/*
4195		 * Report the bus reset and ask for a retry. The device
4196		 * will give CC/UA the next command.
4197		 */
4198		if (!res->resetting_device)
4199			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
4200		scsi_cmd->result |= (DID_ERROR << 16);
4201		if (!ipr_is_naca_model(res))
4202			res->needs_sync_complete = 1;
4203		break;
4204	case IPR_IOASC_HW_DEV_BUS_STATUS:
4205		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
4206		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
4207			if (!ipr_get_autosense(ipr_cmd)) {
4208				if (!ipr_is_naca_model(res)) {
4209					ipr_erp_cancel_all(ipr_cmd);
4210					return;
4211				}
4212			}
4213		}
4214		if (!ipr_is_naca_model(res))
4215			res->needs_sync_complete = 1;
4216		break;
4217	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
4218		break;
4219	default:
4220		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
4221			scsi_cmd->result |= (DID_ERROR << 16);
4222		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
4223			res->needs_sync_complete = 1;
4224		break;
4225	}
4226
4227	ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4228	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4229	scsi_cmd->scsi_done(scsi_cmd);
4230}
4231
4232/**
4233 * ipr_scsi_done - mid-layer done function
4234 * @ipr_cmd:	ipr command struct
4235 *
4236 * This function is invoked by the interrupt handler for
4237 * ops generated by the SCSI mid-layer
4238 *
4239 * Return value:
4240 * 	none
4241 **/
4242static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4243{
4244	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4245	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4246	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4247
4248	scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len);
4249
4250	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4251		ipr_unmap_sglist(ioa_cfg, ipr_cmd);
4252		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4253		scsi_cmd->scsi_done(scsi_cmd);
4254	} else
4255		ipr_erp_start(ioa_cfg, ipr_cmd);
4256}
4257
4258/**
4259 * ipr_queuecommand - Queue a mid-layer request
4260 * @scsi_cmd:	scsi command struct
4261 * @done:		done function
4262 *
4263 * This function queues a request generated by the mid-layer.
4264 *
4265 * Return value:
4266 *	0 on success
4267 *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
4268 *	SCSI_MLQUEUE_HOST_BUSY if host is busy
4269 **/
4270static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
4271			    void (*done) (struct scsi_cmnd *))
4272{
4273	struct ipr_ioa_cfg *ioa_cfg;
4274	struct ipr_resource_entry *res;
4275	struct ipr_ioarcb *ioarcb;
4276	struct ipr_cmnd *ipr_cmd;
4277	int rc = 0;
4278
4279	scsi_cmd->scsi_done = done;
4280	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4281	res = scsi_cmd->device->hostdata;
4282	scsi_cmd->result = (DID_OK << 16);
4283
4284	/*
4285	 * We are currently blocking all devices due to a host reset
4286	 * We have told the host to stop giving us new requests, but
4287	 * ERP ops don't count. FIXME
4288	 */
4289	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
4290		return SCSI_MLQUEUE_HOST_BUSY;
4291
4292	/*
4293	 * FIXME - Create scsi_set_host_offline interface
4294	 *  and the ioa_is_dead check can be removed
4295	 */
4296	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
4297		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
4298		scsi_cmd->result = (DID_NO_CONNECT << 16);
4299		scsi_cmd->scsi_done(scsi_cmd);
4300		return 0;
4301	}
4302
4303	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4304	ioarcb = &ipr_cmd->ioarcb;
4305	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
4306
4307	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
4308	ipr_cmd->scsi_cmd = scsi_cmd;
4309	ioarcb->res_handle = res->cfgte.res_handle;
4310	ipr_cmd->done = ipr_scsi_done;
4311	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
4312
4313	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
4314		if (scsi_cmd->underflow == 0)
4315			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
4316
4317		if (res->needs_sync_complete) {
4318			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
4319			res->needs_sync_complete = 0;
4320		}
4321
4322		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
4323		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
4324		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
4325		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
4326	}
4327
4328	if (scsi_cmd->cmnd[0] >= 0xC0 &&
4329	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
4330		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4331
4332	if (likely(rc == 0))
4333		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
4334
4335	if (likely(rc == 0)) {
4336		mb();
4337		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
4338		       ioa_cfg->regs.ioarrin_reg);
4339	} else {
4340		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4341		 return SCSI_MLQUEUE_HOST_BUSY;
4342	}
4343
4344	return 0;
4345}
4346
4347/**
4348 * ipr_info - Get information about the card/driver
4349 * @scsi_host:	scsi host struct
4350 *
4351 * Return value:
4352 * 	pointer to buffer with description string
4353 **/
4354static const char * ipr_ioa_info(struct Scsi_Host *host)
4355{
4356	static char buffer[512];
4357	struct ipr_ioa_cfg *ioa_cfg;
4358	unsigned long lock_flags = 0;
4359
4360	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
4361
4362	spin_lock_irqsave(host->host_lock, lock_flags);
4363	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
4364	spin_unlock_irqrestore(host->host_lock, lock_flags);
4365
4366	return buffer;
4367}
4368
4369static struct scsi_host_template driver_template = {
4370	.module = THIS_MODULE,
4371	.name = "IPR",
4372	.info = ipr_ioa_info,
4373	.queuecommand = ipr_queuecommand,
4374	.eh_abort_handler = ipr_eh_abort,
4375	.eh_device_reset_handler = ipr_eh_dev_reset,
4376	.eh_host_reset_handler = ipr_eh_host_reset,
4377	.slave_alloc = ipr_slave_alloc,
4378	.slave_configure = ipr_slave_configure,
4379	.slave_destroy = ipr_slave_destroy,
4380	.change_queue_depth = ipr_change_queue_depth,
4381	.change_queue_type = ipr_change_queue_type,
4382	.bios_param = ipr_biosparam,
4383	.can_queue = IPR_MAX_COMMANDS,
4384	.this_id = -1,
4385	.sg_tablesize = IPR_MAX_SGLIST,
4386	.max_sectors = IPR_IOA_MAX_SECTORS,
4387	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
4388	.use_clustering = ENABLE_CLUSTERING,
4389	.shost_attrs = ipr_ioa_attrs,
4390	.sdev_attrs = ipr_dev_attrs,
4391	.proc_name = IPR_NAME
4392};
4393
4394#ifdef CONFIG_PPC_PSERIES
4395static const u16 ipr_blocked_processors[] = {
4396	PV_NORTHSTAR,
4397	PV_PULSAR,
4398	PV_POWER4,
4399	PV_ICESTAR,
4400	PV_SSTAR,
4401	PV_POWER4p,
4402	PV_630,
4403	PV_630p
4404};
4405
4406/**
4407 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
4408 * @ioa_cfg:	ioa cfg struct
4409 *
4410 * Adapters that use Gemstone revision < 3.1 do not work reliably on
4411 * certain pSeries hardware. This function determines if the given
4412 * adapter is in one of these confgurations or not.
4413 *
4414 * Return value:
4415 * 	1 if adapter is not supported / 0 if adapter is supported
4416 **/
4417static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
4418{
4419	u8 rev_id;
4420	int i;
4421
4422	if (ioa_cfg->type == 0x5702) {
4423		if (pci_read_config_byte(ioa_cfg->pdev, PCI_REVISION_ID,
4424					 &rev_id) == PCIBIOS_SUCCESSFUL) {
4425			if (rev_id < 4) {
4426				for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
4427					if (__is_processor(ipr_blocked_processors[i]))
4428						return 1;
4429				}
4430			}
4431		}
4432	}
4433	return 0;
4434}
4435#else
4436#define ipr_invalid_adapter(ioa_cfg) 0
4437#endif
4438
4439/**
4440 * ipr_ioa_bringdown_done - IOA bring down completion.
4441 * @ipr_cmd:	ipr command struct
4442 *
4443 * This function processes the completion of an adapter bring down.
4444 * It wakes any reset sleepers.
4445 *
4446 * Return value:
4447 * 	IPR_RC_JOB_RETURN
4448 **/
4449static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
4450{
4451	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4452
4453	ENTER;
4454	ioa_cfg->in_reset_reload = 0;
4455	ioa_cfg->reset_retries = 0;
4456	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4457	wake_up_all(&ioa_cfg->reset_wait_q);
4458
4459	spin_unlock_irq(ioa_cfg->host->host_lock);
4460	scsi_unblock_requests(ioa_cfg->host);
4461	spin_lock_irq(ioa_cfg->host->host_lock);
4462	LEAVE;
4463
4464	return IPR_RC_JOB_RETURN;
4465}
4466
4467/**
4468 * ipr_ioa_reset_done - IOA reset completion.
4469 * @ipr_cmd:	ipr command struct
4470 *
4471 * This function processes the completion of an adapter reset.
4472 * It schedules any necessary mid-layer add/removes and
4473 * wakes any reset sleepers.
4474 *
4475 * Return value:
4476 * 	IPR_RC_JOB_RETURN
4477 **/
4478static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
4479{
4480	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4481	struct ipr_resource_entry *res;
4482	struct ipr_hostrcb *hostrcb, *temp;
4483	int i = 0;
4484
4485	ENTER;
4486	ioa_cfg->in_reset_reload = 0;
4487	ioa_cfg->allow_cmds = 1;
4488	ioa_cfg->reset_cmd = NULL;
4489	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
4490
4491	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4492		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
4493			ipr_trace;
4494			break;
4495		}
4496	}
4497	schedule_work(&ioa_cfg->work_q);
4498
4499	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
4500		list_del(&hostrcb->queue);
4501		if (i++ < IPR_NUM_LOG_HCAMS)
4502			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
4503		else
4504			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
4505	}
4506
4507	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
4508
4509	ioa_cfg->reset_retries = 0;
4510	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4511	wake_up_all(&ioa_cfg->reset_wait_q);
4512
4513	spin_unlock_irq(ioa_cfg->host->host_lock);
4514	scsi_unblock_requests(ioa_cfg->host);
4515	spin_lock_irq(ioa_cfg->host->host_lock);
4516
4517	if (!ioa_cfg->allow_cmds)
4518		scsi_block_requests(ioa_cfg->host);
4519
4520	LEAVE;
4521	return IPR_RC_JOB_RETURN;
4522}
4523
4524/**
4525 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
4526 * @supported_dev:	supported device struct
4527 * @vpids:			vendor product id struct
4528 *
4529 * Return value:
4530 * 	none
4531 **/
4532static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
4533				 struct ipr_std_inq_vpids *vpids)
4534{
4535	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
4536	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
4537	supported_dev->num_records = 1;
4538	supported_dev->data_length =
4539		cpu_to_be16(sizeof(struct ipr_supported_device));
4540	supported_dev->reserved = 0;
4541}
4542
4543/**
4544 * ipr_set_supported_devs - Send Set Supported Devices for a device
4545 * @ipr_cmd:	ipr command struct
4546 *
4547 * This function send a Set Supported Devices to the adapter
4548 *
4549 * Return value:
4550 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4551 **/
4552static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
4553{
4554	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4555	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
4556	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4557	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4558	struct ipr_resource_entry *res = ipr_cmd->u.res;
4559
4560	ipr_cmd->job_step = ipr_ioa_reset_done;
4561
4562	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
4563		if (!ipr_is_scsi_disk(res))
4564			continue;
4565
4566		ipr_cmd->u.res = res;
4567		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
4568
4569		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4570		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4571		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4572
4573		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
4574		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
4575		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
4576
4577		ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
4578							sizeof(struct ipr_supported_device));
4579		ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
4580					     offsetof(struct ipr_misc_cbs, supp_dev));
4581		ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4582		ioarcb->write_data_transfer_length =
4583			cpu_to_be32(sizeof(struct ipr_supported_device));
4584
4585		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
4586			   IPR_SET_SUP_DEVICE_TIMEOUT);
4587
4588		ipr_cmd->job_step = ipr_set_supported_devs;
4589		return IPR_RC_JOB_RETURN;
4590	}
4591
4592	return IPR_RC_JOB_CONTINUE;
4593}
4594
4595/**
4596 * ipr_setup_write_cache - Disable write cache if needed
4597 * @ipr_cmd:	ipr command struct
4598 *
4599 * This function sets up adapters write cache to desired setting
4600 *
4601 * Return value:
4602 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4603 **/
4604static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
4605{
4606	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4607
4608	ipr_cmd->job_step = ipr_set_supported_devs;
4609	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
4610				    struct ipr_resource_entry, queue);
4611
4612	if (ioa_cfg->cache_state != CACHE_DISABLED)
4613		return IPR_RC_JOB_CONTINUE;
4614
4615	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
4616	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
4617	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
4618	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
4619
4620	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4621
4622	return IPR_RC_JOB_RETURN;
4623}
4624
4625/**
4626 * ipr_get_mode_page - Locate specified mode page
4627 * @mode_pages:	mode page buffer
4628 * @page_code:	page code to find
4629 * @len:		minimum required length for mode page
4630 *
4631 * Return value:
4632 * 	pointer to mode page / NULL on failure
4633 **/
4634static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
4635			       u32 page_code, u32 len)
4636{
4637	struct ipr_mode_page_hdr *mode_hdr;
4638	u32 page_length;
4639	u32 length;
4640
4641	if (!mode_pages || (mode_pages->hdr.length == 0))
4642		return NULL;
4643
4644	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
4645	mode_hdr = (struct ipr_mode_page_hdr *)
4646		(mode_pages->data + mode_pages->hdr.block_desc_len);
4647
4648	while (length) {
4649		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
4650			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
4651				return mode_hdr;
4652			break;
4653		} else {
4654			page_length = (sizeof(struct ipr_mode_page_hdr) +
4655				       mode_hdr->page_length);
4656			length -= page_length;
4657			mode_hdr = (struct ipr_mode_page_hdr *)
4658				((unsigned long)mode_hdr + page_length);
4659		}
4660	}
4661	return NULL;
4662}
4663
4664/**
4665 * ipr_check_term_power - Check for term power errors
4666 * @ioa_cfg:	ioa config struct
4667 * @mode_pages:	IOAFP mode pages buffer
4668 *
4669 * Check the IOAFP's mode page 28 for term power errors
4670 *
4671 * Return value:
4672 * 	nothing
4673 **/
4674static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
4675				 struct ipr_mode_pages *mode_pages)
4676{
4677	int i;
4678	int entry_length;
4679	struct ipr_dev_bus_entry *bus;
4680	struct ipr_mode_page28 *mode_page;
4681
4682	mode_page = ipr_get_mode_page(mode_pages, 0x28,
4683				      sizeof(struct ipr_mode_page28));
4684
4685	entry_length = mode_page->entry_length;
4686
4687	bus = mode_page->bus;
4688
4689	for (i = 0; i < mode_page->num_entries; i++) {
4690		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
4691			dev_err(&ioa_cfg->pdev->dev,
4692				"Term power is absent on scsi bus %d\n",
4693				bus->res_addr.bus);
4694		}
4695
4696		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
4697	}
4698}
4699
4700/**
4701 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
4702 * @ioa_cfg:	ioa config struct
4703 *
4704 * Looks through the config table checking for SES devices. If
4705 * the SES device is in the SES table indicating a maximum SCSI
4706 * bus speed, the speed is limited for the bus.
4707 *
4708 * Return value:
4709 * 	none
4710 **/
4711static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
4712{
4713	u32 max_xfer_rate;
4714	int i;
4715
4716	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
4717		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
4718						       ioa_cfg->bus_attr[i].bus_width);
4719
4720		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
4721			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
4722	}
4723}
4724
4725/**
4726 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
4727 * @ioa_cfg:	ioa config struct
4728 * @mode_pages:	mode page 28 buffer
4729 *
4730 * Updates mode page 28 based on driver configuration
4731 *
4732 * Return value:
4733 * 	none
4734 **/
4735static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
4736					  	struct ipr_mode_pages *mode_pages)
4737{
4738	int i, entry_length;
4739	struct ipr_dev_bus_entry *bus;
4740	struct ipr_bus_attributes *bus_attr;
4741	struct ipr_mode_page28 *mode_page;
4742
4743	mode_page = ipr_get_mode_page(mode_pages, 0x28,
4744				      sizeof(struct ipr_mode_page28));
4745
4746	entry_length = mode_page->entry_length;
4747
4748	/* Loop for each device bus entry */
4749	for (i = 0, bus = mode_page->bus;
4750	     i < mode_page->num_entries;
4751	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
4752		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
4753			dev_err(&ioa_cfg->pdev->dev,
4754				"Invalid resource address reported: 0x%08X\n",
4755				IPR_GET_PHYS_LOC(bus->res_addr));
4756			continue;
4757		}
4758
4759		bus_attr = &ioa_cfg->bus_attr[i];
4760		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
4761		bus->bus_width = bus_attr->bus_width;
4762		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
4763		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
4764		if (bus_attr->qas_enabled)
4765			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
4766		else
4767			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
4768	}
4769}
4770
4771/**
4772 * ipr_build_mode_select - Build a mode select command
4773 * @ipr_cmd:	ipr command struct
4774 * @res_handle:	resource handle to send command to
4775 * @parm:		Byte 2 of Mode Sense command
4776 * @dma_addr:	DMA buffer address
4777 * @xfer_len:	data transfer length
4778 *
4779 * Return value:
4780 * 	none
4781 **/
4782static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
4783				  __be32 res_handle, u8 parm, u32 dma_addr,
4784				  u8 xfer_len)
4785{
4786	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4787	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4788
4789	ioarcb->res_handle = res_handle;
4790	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4791	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4792	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
4793	ioarcb->cmd_pkt.cdb[1] = parm;
4794	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4795
4796	ioadl->flags_and_data_len =
4797		cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
4798	ioadl->address = cpu_to_be32(dma_addr);
4799	ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4800	ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
4801}
4802
4803/**
4804 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
4805 * @ipr_cmd:	ipr command struct
4806 *
4807 * This function sets up the SCSI bus attributes and sends
4808 * a Mode Select for Page 28 to activate them.
4809 *
4810 * Return value:
4811 * 	IPR_RC_JOB_RETURN
4812 **/
4813static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
4814{
4815	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4816	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
4817	int length;
4818
4819	ENTER;
4820	ipr_scsi_bus_speed_limit(ioa_cfg);
4821	ipr_check_term_power(ioa_cfg, mode_pages);
4822	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
4823	length = mode_pages->hdr.length + 1;
4824	mode_pages->hdr.length = 0;
4825
4826	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
4827			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
4828			      length);
4829
4830	ipr_cmd->job_step = ipr_setup_write_cache;
4831	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4832
4833	LEAVE;
4834	return IPR_RC_JOB_RETURN;
4835}
4836
4837/**
4838 * ipr_build_mode_sense - Builds a mode sense command
4839 * @ipr_cmd:	ipr command struct
4840 * @res:		resource entry struct
4841 * @parm:		Byte 2 of mode sense command
4842 * @dma_addr:	DMA address of mode sense buffer
4843 * @xfer_len:	Size of DMA buffer
4844 *
4845 * Return value:
4846 * 	none
4847 **/
4848static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
4849				 __be32 res_handle,
4850				 u8 parm, u32 dma_addr, u8 xfer_len)
4851{
4852	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4853	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4854
4855	ioarcb->res_handle = res_handle;
4856	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
4857	ioarcb->cmd_pkt.cdb[2] = parm;
4858	ioarcb->cmd_pkt.cdb[4] = xfer_len;
4859	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
4860
4861	ioadl->flags_and_data_len =
4862		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
4863	ioadl->address = cpu_to_be32(dma_addr);
4864	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4865	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
4866}
4867
4868/**
4869 * ipr_reset_cmd_failed - Handle failure of IOA reset command
4870 * @ipr_cmd:	ipr command struct
4871 *
4872 * This function handles the failure of an IOA bringup command.
4873 *
4874 * Return value:
4875 * 	IPR_RC_JOB_RETURN
4876 **/
4877static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
4878{
4879	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4880	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4881
4882	dev_err(&ioa_cfg->pdev->dev,
4883		"0x%02X failed with IOASC: 0x%08X\n",
4884		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
4885
4886	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4887	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4888	return IPR_RC_JOB_RETURN;
4889}
4890
4891/**
4892 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
4893 * @ipr_cmd:	ipr command struct
4894 *
4895 * This function handles the failure of a Mode Sense to the IOAFP.
4896 * Some adapters do not handle all mode pages.
4897 *
4898 * Return value:
4899 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
4900 **/
4901static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
4902{
4903	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4904
4905	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
4906		ipr_cmd->job_step = ipr_setup_write_cache;
4907		return IPR_RC_JOB_CONTINUE;
4908	}
4909
4910	return ipr_reset_cmd_failed(ipr_cmd);
4911}
4912
4913/**
4914 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
4915 * @ipr_cmd:	ipr command struct
4916 *
4917 * This function send a Page 28 mode sense to the IOA to
4918 * retrieve SCSI bus attributes.
4919 *
4920 * Return value:
4921 * 	IPR_RC_JOB_RETURN
4922 **/
4923static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
4924{
4925	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4926
4927	ENTER;
4928	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
4929			     0x28, ioa_cfg->vpd_cbs_dma +
4930			     offsetof(struct ipr_misc_cbs, mode_pages),
4931			     sizeof(struct ipr_mode_pages));
4932
4933	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
4934	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
4935
4936	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
4937
4938	LEAVE;
4939	return IPR_RC_JOB_RETURN;
4940}
4941
4942/**
4943 * ipr_init_res_table - Initialize the resource table
4944 * @ipr_cmd:	ipr command struct
4945 *
4946 * This function looks through the existing resource table, comparing
4947 * it with the config table. This function will take care of old/new
4948 * devices and schedule adding/removing them from the mid-layer
4949 * as appropriate.
4950 *
4951 * Return value:
4952 * 	IPR_RC_JOB_CONTINUE
4953 **/
4954static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
4955{
4956	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4957	struct ipr_resource_entry *res, *temp;
4958	struct ipr_config_table_entry *cfgte;
4959	int found, i;
4960	LIST_HEAD(old_res);
4961
4962	ENTER;
4963	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
4964		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
4965
4966	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
4967		list_move_tail(&res->queue, &old_res);
4968
4969	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
4970		cfgte = &ioa_cfg->cfg_table->dev[i];
4971		found = 0;
4972
4973		list_for_each_entry_safe(res, temp, &old_res, queue) {
4974			if (!memcmp(&res->cfgte.res_addr,
4975				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
4976				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4977				found = 1;
4978				break;
4979			}
4980		}
4981
4982		if (!found) {
4983			if (list_empty(&ioa_cfg->free_res_q)) {
4984				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
4985				break;
4986			}
4987
4988			found = 1;
4989			res = list_entry(ioa_cfg->free_res_q.next,
4990					 struct ipr_resource_entry, queue);
4991			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
4992			ipr_init_res_entry(res);
4993			res->add_to_ml = 1;
4994		}
4995
4996		if (found)
4997			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
4998	}
4999
5000	list_for_each_entry_safe(res, temp, &old_res, queue) {
5001		if (res->sdev) {
5002			res->del_from_ml = 1;
5003			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
5004			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
5005		} else {
5006			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
5007		}
5008	}
5009
5010	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
5011
5012	LEAVE;
5013	return IPR_RC_JOB_CONTINUE;
5014}
5015
5016/**
5017 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
5018 * @ipr_cmd:	ipr command struct
5019 *
5020 * This function sends a Query IOA Configuration command
5021 * to the adapter to retrieve the IOA configuration table.
5022 *
5023 * Return value:
5024 * 	IPR_RC_JOB_RETURN
5025 **/
5026static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
5027{
5028	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5029	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5030	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5031	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
5032
5033	ENTER;
5034	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
5035		 ucode_vpd->major_release, ucode_vpd->card_type,
5036		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
5037	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5038	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5039
5040	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
5041	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
5042	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
5043
5044	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5045	ioarcb->read_data_transfer_length =
5046		cpu_to_be32(sizeof(struct ipr_config_table));
5047
5048	ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
5049	ioadl->flags_and_data_len =
5050		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
5051
5052	ipr_cmd->job_step = ipr_init_res_table;
5053
5054	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5055
5056	LEAVE;
5057	return IPR_RC_JOB_RETURN;
5058}
5059
5060/**
5061 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
5062 * @ipr_cmd:	ipr command struct
5063 *
5064 * This utility function sends an inquiry to the adapter.
5065 *
5066 * Return value:
5067 * 	none
5068 **/
5069static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
5070			      u32 dma_addr, u8 xfer_len)
5071{
5072	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5073	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
5074
5075	ENTER;
5076	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5077	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5078
5079	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
5080	ioarcb->cmd_pkt.cdb[1] = flags;
5081	ioarcb->cmd_pkt.cdb[2] = page;
5082	ioarcb->cmd_pkt.cdb[4] = xfer_len;
5083
5084	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
5085	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
5086
5087	ioadl->address = cpu_to_be32(dma_addr);
5088	ioadl->flags_and_data_len =
5089		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
5090
5091	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5092	LEAVE;
5093}
5094
5095/**
5096 * ipr_inquiry_page_supported - Is the given inquiry page supported
5097 * @page0:		inquiry page 0 buffer
5098 * @page:		page code.
5099 *
5100 * This function determines if the specified inquiry page is supported.
5101 *
5102 * Return value:
5103 *	1 if page is supported / 0 if not
5104 **/
5105static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
5106{
5107	int i;
5108
5109	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
5110		if (page0->page[i] == page)
5111			return 1;
5112
5113	return 0;
5114}
5115
5116/**
5117 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
5118 * @ipr_cmd:	ipr command struct
5119 *
5120 * This function sends a Page 3 inquiry to the adapter
5121 * to retrieve software VPD information.
5122 *
5123 * Return value:
5124 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5125 **/
5126static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
5127{
5128	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5129	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
5130
5131	ENTER;
5132
5133	if (!ipr_inquiry_page_supported(page0, 1))
5134		ioa_cfg->cache_state = CACHE_NONE;
5135
5136	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
5137
5138	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
5139			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
5140			  sizeof(struct ipr_inquiry_page3));
5141
5142	LEAVE;
5143	return IPR_RC_JOB_RETURN;
5144}
5145
5146/**
5147 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
5148 * @ipr_cmd:	ipr command struct
5149 *
5150 * This function sends a Page 0 inquiry to the adapter
5151 * to retrieve supported inquiry pages.
5152 *
5153 * Return value:
5154 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5155 **/
5156static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
5157{
5158	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5159	char type[5];
5160
5161	ENTER;
5162
5163	/* Grab the type out of the VPD and store it away */
5164	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
5165	type[4] = '\0';
5166	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
5167
5168	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
5169
5170	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
5171			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
5172			  sizeof(struct ipr_inquiry_page0));
5173
5174	LEAVE;
5175	return IPR_RC_JOB_RETURN;
5176}
5177
5178/**
5179 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
5180 * @ipr_cmd:	ipr command struct
5181 *
5182 * This function sends a standard inquiry to the adapter.
5183 *
5184 * Return value:
5185 * 	IPR_RC_JOB_RETURN
5186 **/
5187static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
5188{
5189	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5190
5191	ENTER;
5192	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
5193
5194	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
5195			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
5196			  sizeof(struct ipr_ioa_vpd));
5197
5198	LEAVE;
5199	return IPR_RC_JOB_RETURN;
5200}
5201
5202/**
5203 * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
5204 * @ipr_cmd:	ipr command struct
5205 *
5206 * This function send an Identify Host Request Response Queue
5207 * command to establish the HRRQ with the adapter.
5208 *
5209 * Return value:
5210 * 	IPR_RC_JOB_RETURN
5211 **/
5212static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
5213{
5214	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5215	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5216
5217	ENTER;
5218	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
5219
5220	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
5221	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5222
5223	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5224	ioarcb->cmd_pkt.cdb[2] =
5225		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
5226	ioarcb->cmd_pkt.cdb[3] =
5227		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
5228	ioarcb->cmd_pkt.cdb[4] =
5229		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
5230	ioarcb->cmd_pkt.cdb[5] =
5231		((u32) ioa_cfg->host_rrq_dma) & 0xff;
5232	ioarcb->cmd_pkt.cdb[7] =
5233		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
5234	ioarcb->cmd_pkt.cdb[8] =
5235		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
5236
5237	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
5238
5239	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
5240
5241	LEAVE;
5242	return IPR_RC_JOB_RETURN;
5243}
5244
5245/**
5246 * ipr_reset_timer_done - Adapter reset timer function
5247 * @ipr_cmd:	ipr command struct
5248 *
5249 * Description: This function is used in adapter reset processing
5250 * for timing events. If the reset_cmd pointer in the IOA
5251 * config struct is not this adapter's we are doing nested
5252 * resets and fail_all_ops will take care of freeing the
5253 * command block.
5254 *
5255 * Return value:
5256 * 	none
5257 **/
5258static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
5259{
5260	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5261	unsigned long lock_flags = 0;
5262
5263	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5264
5265	if (ioa_cfg->reset_cmd == ipr_cmd) {
5266		list_del(&ipr_cmd->queue);
5267		ipr_cmd->done(ipr_cmd);
5268	}
5269
5270	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5271}
5272
5273/**
5274 * ipr_reset_start_timer - Start a timer for adapter reset job
5275 * @ipr_cmd:	ipr command struct
5276 * @timeout:	timeout value
5277 *
5278 * Description: This function is used in adapter reset processing
5279 * for timing events. If the reset_cmd pointer in the IOA
5280 * config struct is not this adapter's we are doing nested
5281 * resets and fail_all_ops will take care of freeing the
5282 * command block.
5283 *
5284 * Return value:
5285 * 	none
5286 **/
5287static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
5288				  unsigned long timeout)
5289{
5290	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5291	ipr_cmd->done = ipr_reset_ioa_job;
5292
5293	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5294	ipr_cmd->timer.expires = jiffies + timeout;
5295	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
5296	add_timer(&ipr_cmd->timer);
5297}
5298
5299/**
5300 * ipr_init_ioa_mem - Initialize ioa_cfg control block
5301 * @ioa_cfg:	ioa cfg struct
5302 *
5303 * Return value:
5304 * 	nothing
5305 **/
5306static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
5307{
5308	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
5309
5310	/* Initialize Host RRQ pointers */
5311	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
5312	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
5313	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5314	ioa_cfg->toggle_bit = 1;
5315
5316	/* Zero out config table */
5317	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
5318}
5319
5320/**
5321 * ipr_reset_enable_ioa - Enable the IOA following a reset.
5322 * @ipr_cmd:	ipr command struct
5323 *
5324 * This function reinitializes some control blocks and
5325 * enables destructive diagnostics on the adapter.
5326 *
5327 * Return value:
5328 * 	IPR_RC_JOB_RETURN
5329 **/
5330static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
5331{
5332	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5333	volatile u32 int_reg;
5334
5335	ENTER;
5336	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
5337	ipr_init_ioa_mem(ioa_cfg);
5338
5339	ioa_cfg->allow_interrupts = 1;
5340	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5341
5342	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
5343		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
5344		       ioa_cfg->regs.clr_interrupt_mask_reg);
5345		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5346		return IPR_RC_JOB_CONTINUE;
5347	}
5348
5349	/* Enable destructive diagnostics on IOA */
5350	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
5351
5352	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
5353	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
5354
5355	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
5356
5357	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
5358	ipr_cmd->timer.expires = jiffies + (ipr_transop_timeout * HZ);
5359	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
5360	ipr_cmd->done = ipr_reset_ioa_job;
5361	add_timer(&ipr_cmd->timer);
5362	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5363
5364	LEAVE;
5365	return IPR_RC_JOB_RETURN;
5366}
5367
5368/**
5369 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
5370 * @ipr_cmd:	ipr command struct
5371 *
5372 * This function is invoked when an adapter dump has run out
5373 * of processing time.
5374 *
5375 * Return value:
5376 * 	IPR_RC_JOB_CONTINUE
5377 **/
5378static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
5379{
5380	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5381
5382	if (ioa_cfg->sdt_state == GET_DUMP)
5383		ioa_cfg->sdt_state = ABORT_DUMP;
5384
5385	ipr_cmd->job_step = ipr_reset_alert;
5386
5387	return IPR_RC_JOB_CONTINUE;
5388}
5389
5390/**
5391 * ipr_unit_check_no_data - Log a unit check/no data error log
5392 * @ioa_cfg:		ioa config struct
5393 *
5394 * Logs an error indicating the adapter unit checked, but for some
5395 * reason, we were unable to fetch the unit check buffer.
5396 *
5397 * Return value:
5398 * 	nothing
5399 **/
5400static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
5401{
5402	ioa_cfg->errors_logged++;
5403	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
5404}
5405
5406/**
5407 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
5408 * @ioa_cfg:		ioa config struct
5409 *
5410 * Fetches the unit check buffer from the adapter by clocking the data
5411 * through the mailbox register.
5412 *
5413 * Return value:
5414 * 	nothing
5415 **/
5416static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
5417{
5418	unsigned long mailbox;
5419	struct ipr_hostrcb *hostrcb;
5420	struct ipr_uc_sdt sdt;
5421	int rc, length;
5422
5423	mailbox = readl(ioa_cfg->ioa_mailbox);
5424
5425	if (!ipr_sdt_is_fmt2(mailbox)) {
5426		ipr_unit_check_no_data(ioa_cfg);
5427		return;
5428	}
5429
5430	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
5431	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
5432					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
5433
5434	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
5435	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
5436		ipr_unit_check_no_data(ioa_cfg);
5437		return;
5438	}
5439
5440	/* Find length of the first sdt entry (UC buffer) */
5441	length = (be32_to_cpu(sdt.entry[0].end_offset) -
5442		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
5443
5444	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
5445			     struct ipr_hostrcb, queue);
5446	list_del(&hostrcb->queue);
5447	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
5448
5449	rc = ipr_get_ldump_data_section(ioa_cfg,
5450					be32_to_cpu(sdt.entry[0].bar_str_offset),
5451					(__be32 *)&hostrcb->hcam,
5452					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
5453
5454	if (!rc)
5455		ipr_handle_log_data(ioa_cfg, hostrcb);
5456	else
5457		ipr_unit_check_no_data(ioa_cfg);
5458
5459	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
5460}
5461
5462/**
5463 * ipr_reset_restore_cfg_space - Restore PCI config space.
5464 * @ipr_cmd:	ipr command struct
5465 *
5466 * Description: This function restores the saved PCI config space of
5467 * the adapter, fails all outstanding ops back to the callers, and
5468 * fetches the dump/unit check if applicable to this reset.
5469 *
5470 * Return value:
5471 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5472 **/
5473static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
5474{
5475	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5476	int rc;
5477
5478	ENTER;
5479	pci_unblock_user_cfg_access(ioa_cfg->pdev);
5480	rc = pci_restore_state(ioa_cfg->pdev);
5481
5482	if (rc != PCIBIOS_SUCCESSFUL) {
5483		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5484		return IPR_RC_JOB_CONTINUE;
5485	}
5486
5487	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
5488		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5489		return IPR_RC_JOB_CONTINUE;
5490	}
5491
5492	ipr_fail_all_ops(ioa_cfg);
5493
5494	if (ioa_cfg->ioa_unit_checked) {
5495		ioa_cfg->ioa_unit_checked = 0;
5496		ipr_get_unit_check_buffer(ioa_cfg);
5497		ipr_cmd->job_step = ipr_reset_alert;
5498		ipr_reset_start_timer(ipr_cmd, 0);
5499		return IPR_RC_JOB_RETURN;
5500	}
5501
5502	if (ioa_cfg->in_ioa_bringdown) {
5503		ipr_cmd->job_step = ipr_ioa_bringdown_done;
5504	} else {
5505		ipr_cmd->job_step = ipr_reset_enable_ioa;
5506
5507		if (GET_DUMP == ioa_cfg->sdt_state) {
5508			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
5509			ipr_cmd->job_step = ipr_reset_wait_for_dump;
5510			schedule_work(&ioa_cfg->work_q);
5511			return IPR_RC_JOB_RETURN;
5512		}
5513	}
5514
5515	ENTER;
5516	return IPR_RC_JOB_CONTINUE;
5517}
5518
5519/**
5520 * ipr_reset_start_bist - Run BIST on the adapter.
5521 * @ipr_cmd:	ipr command struct
5522 *
5523 * Description: This function runs BIST on the adapter, then delays 2 seconds.
5524 *
5525 * Return value:
5526 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5527 **/
5528static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
5529{
5530	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5531	int rc;
5532
5533	ENTER;
5534	pci_block_user_cfg_access(ioa_cfg->pdev);
5535	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
5536
5537	if (rc != PCIBIOS_SUCCESSFUL) {
5538		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
5539		rc = IPR_RC_JOB_CONTINUE;
5540	} else {
5541		ipr_cmd->job_step = ipr_reset_restore_cfg_space;
5542		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
5543		rc = IPR_RC_JOB_RETURN;
5544	}
5545
5546	LEAVE;
5547	return rc;
5548}
5549
5550/**
5551 * ipr_reset_allowed - Query whether or not IOA can be reset
5552 * @ioa_cfg:	ioa config struct
5553 *
5554 * Return value:
5555 * 	0 if reset not allowed / non-zero if reset is allowed
5556 **/
5557static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
5558{
5559	volatile u32 temp_reg;
5560
5561	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
5562	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
5563}
5564
5565/**
5566 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
5567 * @ipr_cmd:	ipr command struct
5568 *
5569 * Description: This function waits for adapter permission to run BIST,
5570 * then runs BIST. If the adapter does not give permission after a
5571 * reasonable time, we will reset the adapter anyway. The impact of
5572 * resetting the adapter without warning the adapter is the risk of
5573 * losing the persistent error log on the adapter. If the adapter is
5574 * reset while it is writing to the flash on the adapter, the flash
5575 * segment will have bad ECC and be zeroed.
5576 *
5577 * Return value:
5578 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5579 **/
5580static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
5581{
5582	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5583	int rc = IPR_RC_JOB_RETURN;
5584
5585	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
5586		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
5587		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5588	} else {
5589		ipr_cmd->job_step = ipr_reset_start_bist;
5590		rc = IPR_RC_JOB_CONTINUE;
5591	}
5592
5593	return rc;
5594}
5595
5596/**
5597 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
5598 * @ipr_cmd:	ipr command struct
5599 *
5600 * Description: This function alerts the adapter that it will be reset.
5601 * If memory space is not currently enabled, proceed directly
5602 * to running BIST on the adapter. The timer must always be started
5603 * so we guarantee we do not run BIST from ipr_isr.
5604 *
5605 * Return value:
5606 * 	IPR_RC_JOB_RETURN
5607 **/
5608static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
5609{
5610	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5611	u16 cmd_reg;
5612	int rc;
5613
5614	ENTER;
5615	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
5616
5617	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
5618		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5619		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
5620		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
5621	} else {
5622		ipr_cmd->job_step = ipr_reset_start_bist;
5623	}
5624
5625	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
5626	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
5627
5628	LEAVE;
5629	return IPR_RC_JOB_RETURN;
5630}
5631
5632/**
5633 * ipr_reset_ucode_download_done - Microcode download completion
5634 * @ipr_cmd:	ipr command struct
5635 *
5636 * Description: This function unmaps the microcode download buffer.
5637 *
5638 * Return value:
5639 * 	IPR_RC_JOB_CONTINUE
5640 **/
5641static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
5642{
5643	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5644	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5645
5646	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
5647		     sglist->num_sg, DMA_TO_DEVICE);
5648
5649	ipr_cmd->job_step = ipr_reset_alert;
5650	return IPR_RC_JOB_CONTINUE;
5651}
5652
5653/**
5654 * ipr_reset_ucode_download - Download microcode to the adapter
5655 * @ipr_cmd:	ipr command struct
5656 *
5657 * Description: This function checks to see if it there is microcode
5658 * to download to the adapter. If there is, a download is performed.
5659 *
5660 * Return value:
5661 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5662 **/
5663static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
5664{
5665	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5666	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
5667
5668	ENTER;
5669	ipr_cmd->job_step = ipr_reset_alert;
5670
5671	if (!sglist)
5672		return IPR_RC_JOB_CONTINUE;
5673
5674	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5675	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
5676	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
5677	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
5678	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
5679	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
5680	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
5681
5682	ipr_build_ucode_ioadl(ipr_cmd, sglist);
5683	ipr_cmd->job_step = ipr_reset_ucode_download_done;
5684
5685	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
5686		   IPR_WRITE_BUFFER_TIMEOUT);
5687
5688	LEAVE;
5689	return IPR_RC_JOB_RETURN;
5690}
5691
5692/**
5693 * ipr_reset_shutdown_ioa - Shutdown the adapter
5694 * @ipr_cmd:	ipr command struct
5695 *
5696 * Description: This function issues an adapter shutdown of the
5697 * specified type to the specified adapter as part of the
5698 * adapter reset job.
5699 *
5700 * Return value:
5701 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
5702 **/
5703static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
5704{
5705	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5706	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
5707	unsigned long timeout;
5708	int rc = IPR_RC_JOB_CONTINUE;
5709
5710	ENTER;
5711	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
5712		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
5713		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5714		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
5715		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
5716
5717		if (shutdown_type == IPR_SHUTDOWN_ABBREV)
5718			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
5719		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
5720			timeout = IPR_INTERNAL_TIMEOUT;
5721		else
5722			timeout = IPR_SHUTDOWN_TIMEOUT;
5723
5724		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
5725
5726		rc = IPR_RC_JOB_RETURN;
5727		ipr_cmd->job_step = ipr_reset_ucode_download;
5728	} else
5729		ipr_cmd->job_step = ipr_reset_alert;
5730
5731	LEAVE;
5732	return rc;
5733}
5734
5735/**
5736 * ipr_reset_ioa_job - Adapter reset job
5737 * @ipr_cmd:	ipr command struct
5738 *
5739 * Description: This function is the job router for the adapter reset job.
5740 *
5741 * Return value:
5742 * 	none
5743 **/
5744static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
5745{
5746	u32 rc, ioasc;
5747	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5748
5749	do {
5750		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
5751
5752		if (ioa_cfg->reset_cmd != ipr_cmd) {
5753			/*
5754			 * We are doing nested adapter resets and this is
5755			 * not the current reset job.
5756			 */
5757			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5758			return;
5759		}
5760
5761		if (IPR_IOASC_SENSE_KEY(ioasc)) {
5762			rc = ipr_cmd->job_step_failed(ipr_cmd);
5763			if (rc == IPR_RC_JOB_RETURN)
5764				return;
5765		}
5766
5767		ipr_reinit_ipr_cmnd(ipr_cmd);
5768		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
5769		rc = ipr_cmd->job_step(ipr_cmd);
5770	} while(rc == IPR_RC_JOB_CONTINUE);
5771}
5772
5773/**
5774 * _ipr_initiate_ioa_reset - Initiate an adapter reset
5775 * @ioa_cfg:		ioa config struct
5776 * @job_step:		first job step of reset job
5777 * @shutdown_type:	shutdown type
5778 *
5779 * Description: This function will initiate the reset of the given adapter
5780 * starting at the selected job step.
5781 * If the caller needs to wait on the completion of the reset,
5782 * the caller must sleep on the reset_wait_q.
5783 *
5784 * Return value:
5785 * 	none
5786 **/
5787static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5788				    int (*job_step) (struct ipr_cmnd *),
5789				    enum ipr_shutdown_type shutdown_type)
5790{
5791	struct ipr_cmnd *ipr_cmd;
5792
5793	ioa_cfg->in_reset_reload = 1;
5794	ioa_cfg->allow_cmds = 0;
5795	scsi_block_requests(ioa_cfg->host);
5796
5797	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5798	ioa_cfg->reset_cmd = ipr_cmd;
5799	ipr_cmd->job_step = job_step;
5800	ipr_cmd->u.shutdown_type = shutdown_type;
5801
5802	ipr_reset_ioa_job(ipr_cmd);
5803}
5804
5805/**
5806 * ipr_initiate_ioa_reset - Initiate an adapter reset
5807 * @ioa_cfg:		ioa config struct
5808 * @shutdown_type:	shutdown type
5809 *
5810 * Description: This function will initiate the reset of the given adapter.
5811 * If the caller needs to wait on the completion of the reset,
5812 * the caller must sleep on the reset_wait_q.
5813 *
5814 * Return value:
5815 * 	none
5816 **/
5817static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
5818				   enum ipr_shutdown_type shutdown_type)
5819{
5820	if (ioa_cfg->ioa_is_dead)
5821		return;
5822
5823	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
5824		ioa_cfg->sdt_state = ABORT_DUMP;
5825
5826	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
5827		dev_err(&ioa_cfg->pdev->dev,
5828			"IOA taken offline - error recovery failed\n");
5829
5830		ioa_cfg->reset_retries = 0;
5831		ioa_cfg->ioa_is_dead = 1;
5832
5833		if (ioa_cfg->in_ioa_bringdown) {
5834			ioa_cfg->reset_cmd = NULL;
5835			ioa_cfg->in_reset_reload = 0;
5836			ipr_fail_all_ops(ioa_cfg);
5837			wake_up_all(&ioa_cfg->reset_wait_q);
5838
5839			spin_unlock_irq(ioa_cfg->host->host_lock);
5840			scsi_unblock_requests(ioa_cfg->host);
5841			spin_lock_irq(ioa_cfg->host->host_lock);
5842			return;
5843		} else {
5844			ioa_cfg->in_ioa_bringdown = 1;
5845			shutdown_type = IPR_SHUTDOWN_NONE;
5846		}
5847	}
5848
5849	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
5850				shutdown_type);
5851}
5852
5853/**
5854 * ipr_reset_freeze - Hold off all I/O activity
5855 * @ipr_cmd:	ipr command struct
5856 *
5857 * Description: If the PCI slot is frozen, hold off all I/O
5858 * activity; then, as soon as the slot is available again,
5859 * initiate an adapter reset.
5860 */
5861static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
5862{
5863	/* Disallow new interrupts, avoid loop */
5864	ipr_cmd->ioa_cfg->allow_interrupts = 0;
5865	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
5866	ipr_cmd->done = ipr_reset_ioa_job;
5867	return IPR_RC_JOB_RETURN;
5868}
5869
5870/**
5871 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
5872 * @pdev:	PCI device struct
5873 *
5874 * Description: This routine is called to tell us that the PCI bus
5875 * is down. Can't do anything here, except put the device driver
5876 * into a holding pattern, waiting for the PCI bus to come back.
5877 */
5878static void ipr_pci_frozen(struct pci_dev *pdev)
5879{
5880	unsigned long flags = 0;
5881	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5882
5883	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5884	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
5885	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5886}
5887
5888/**
5889 * ipr_pci_slot_reset - Called when PCI slot has been reset.
5890 * @pdev:	PCI device struct
5891 *
5892 * Description: This routine is called by the pci error recovery
5893 * code after the PCI slot has been reset, just before we
5894 * should resume normal operations.
5895 */
5896static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
5897{
5898	unsigned long flags = 0;
5899	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5900
5901	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5902	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
5903	                                 IPR_SHUTDOWN_NONE);
5904	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5905	return PCI_ERS_RESULT_RECOVERED;
5906}
5907
5908/**
5909 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
5910 * @pdev:	PCI device struct
5911 *
5912 * Description: This routine is called when the PCI bus has
5913 * permanently failed.
5914 */
5915static void ipr_pci_perm_failure(struct pci_dev *pdev)
5916{
5917	unsigned long flags = 0;
5918	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
5919
5920	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5921	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
5922		ioa_cfg->sdt_state = ABORT_DUMP;
5923	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
5924	ioa_cfg->in_ioa_bringdown = 1;
5925	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5926	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5927}
5928
5929/**
5930 * ipr_pci_error_detected - Called when a PCI error is detected.
5931 * @pdev:	PCI device struct
5932 * @state:	PCI channel state
5933 *
5934 * Description: Called when a PCI error is detected.
5935 *
5936 * Return value:
5937 * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
5938 */
5939static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
5940					       pci_channel_state_t state)
5941{
5942	switch (state) {
5943	case pci_channel_io_frozen:
5944		ipr_pci_frozen(pdev);
5945		return PCI_ERS_RESULT_NEED_RESET;
5946	case pci_channel_io_perm_failure:
5947		ipr_pci_perm_failure(pdev);
5948		return PCI_ERS_RESULT_DISCONNECT;
5949		break;
5950	default:
5951		break;
5952	}
5953	return PCI_ERS_RESULT_NEED_RESET;
5954}
5955
5956/**
5957 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
5958 * @ioa_cfg:	ioa cfg struct
5959 *
5960 * Description: This is the second phase of adapter intialization
5961 * This function takes care of initilizing the adapter to the point
5962 * where it can accept new commands.
5963
5964 * Return value:
5965 * 	0 on sucess / -EIO on failure
5966 **/
5967static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
5968{
5969	int rc = 0;
5970	unsigned long host_lock_flags = 0;
5971
5972	ENTER;
5973	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5974	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
5975	if (ioa_cfg->needs_hard_reset) {
5976		ioa_cfg->needs_hard_reset = 0;
5977		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5978	} else
5979		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
5980					IPR_SHUTDOWN_NONE);
5981
5982	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5983	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5984	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
5985
5986	if (ioa_cfg->ioa_is_dead) {
5987		rc = -EIO;
5988	} else if (ipr_invalid_adapter(ioa_cfg)) {
5989		if (!ipr_testmode)
5990			rc = -EIO;
5991
5992		dev_err(&ioa_cfg->pdev->dev,
5993			"Adapter not supported in this hardware configuration.\n");
5994	}
5995
5996	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
5997
5998	LEAVE;
5999	return rc;
6000}
6001
6002/**
6003 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
6004 * @ioa_cfg:	ioa config struct
6005 *
6006 * Return value:
6007 * 	none
6008 **/
6009static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6010{
6011	int i;
6012
6013	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6014		if (ioa_cfg->ipr_cmnd_list[i])
6015			pci_pool_free(ioa_cfg->ipr_cmd_pool,
6016				      ioa_cfg->ipr_cmnd_list[i],
6017				      ioa_cfg->ipr_cmnd_list_dma[i]);
6018
6019		ioa_cfg->ipr_cmnd_list[i] = NULL;
6020	}
6021
6022	if (ioa_cfg->ipr_cmd_pool)
6023		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
6024
6025	ioa_cfg->ipr_cmd_pool = NULL;
6026}
6027
6028/**
6029 * ipr_free_mem - Frees memory allocated for an adapter
6030 * @ioa_cfg:	ioa cfg struct
6031 *
6032 * Return value:
6033 * 	nothing
6034 **/
6035static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
6036{
6037	int i;
6038
6039	kfree(ioa_cfg->res_entries);
6040	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
6041			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6042	ipr_free_cmd_blks(ioa_cfg);
6043	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6044			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6045	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
6046			    ioa_cfg->cfg_table,
6047			    ioa_cfg->cfg_table_dma);
6048
6049	for (i = 0; i < IPR_NUM_HCAMS; i++) {
6050		pci_free_consistent(ioa_cfg->pdev,
6051				    sizeof(struct ipr_hostrcb),
6052				    ioa_cfg->hostrcb[i],
6053				    ioa_cfg->hostrcb_dma[i]);
6054	}
6055
6056	ipr_free_dump(ioa_cfg);
6057	kfree(ioa_cfg->trace);
6058}
6059
6060/**
6061 * ipr_free_all_resources - Free all allocated resources for an adapter.
6062 * @ipr_cmd:	ipr command struct
6063 *
6064 * This function frees all allocated resources for the
6065 * specified adapter.
6066 *
6067 * Return value:
6068 * 	none
6069 **/
6070static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
6071{
6072	struct pci_dev *pdev = ioa_cfg->pdev;
6073
6074	ENTER;
6075	free_irq(pdev->irq, ioa_cfg);
6076	iounmap(ioa_cfg->hdw_dma_regs);
6077	pci_release_regions(pdev);
6078	ipr_free_mem(ioa_cfg);
6079	scsi_host_put(ioa_cfg->host);
6080	pci_disable_device(pdev);
6081	LEAVE;
6082}
6083
6084/**
6085 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
6086 * @ioa_cfg:	ioa config struct
6087 *
6088 * Return value:
6089 * 	0 on success / -ENOMEM on allocation failure
6090 **/
6091static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
6092{
6093	struct ipr_cmnd *ipr_cmd;
6094	struct ipr_ioarcb *ioarcb;
6095	dma_addr_t dma_addr;
6096	int i;
6097
6098	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
6099						 sizeof(struct ipr_cmnd), 8, 0);
6100
6101	if (!ioa_cfg->ipr_cmd_pool)
6102		return -ENOMEM;
6103
6104	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
6105		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, SLAB_KERNEL, &dma_addr);
6106
6107		if (!ipr_cmd) {
6108			ipr_free_cmd_blks(ioa_cfg);
6109			return -ENOMEM;
6110		}
6111
6112		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
6113		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
6114		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
6115
6116		ioarcb = &ipr_cmd->ioarcb;
6117		ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
6118		ioarcb->host_response_handle = cpu_to_be32(i << 2);
6119		ioarcb->write_ioadl_addr =
6120			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
6121		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
6122		ioarcb->ioasa_host_pci_addr =
6123			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
6124		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
6125		ipr_cmd->cmd_index = i;
6126		ipr_cmd->ioa_cfg = ioa_cfg;
6127		ipr_cmd->sense_buffer_dma = dma_addr +
6128			offsetof(struct ipr_cmnd, sense_buffer);
6129
6130		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6131	}
6132
6133	return 0;
6134}
6135
6136/**
6137 * ipr_alloc_mem - Allocate memory for an adapter
6138 * @ioa_cfg:	ioa config struct
6139 *
6140 * Return value:
6141 * 	0 on success / non-zero for error
6142 **/
6143static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
6144{
6145	struct pci_dev *pdev = ioa_cfg->pdev;
6146	int i, rc = -ENOMEM;
6147
6148	ENTER;
6149	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
6150				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
6151
6152	if (!ioa_cfg->res_entries)
6153		goto out;
6154
6155	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
6156		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
6157
6158	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
6159						sizeof(struct ipr_misc_cbs),
6160						&ioa_cfg->vpd_cbs_dma);
6161
6162	if (!ioa_cfg->vpd_cbs)
6163		goto out_free_res_entries;
6164
6165	if (ipr_alloc_cmd_blks(ioa_cfg))
6166		goto out_free_vpd_cbs;
6167
6168	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
6169						 sizeof(u32) * IPR_NUM_CMD_BLKS,
6170						 &ioa_cfg->host_rrq_dma);
6171
6172	if (!ioa_cfg->host_rrq)
6173		goto out_ipr_free_cmd_blocks;
6174
6175	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
6176						  sizeof(struct ipr_config_table),
6177						  &ioa_cfg->cfg_table_dma);
6178
6179	if (!ioa_cfg->cfg_table)
6180		goto out_free_host_rrq;
6181
6182	for (i = 0; i < IPR_NUM_HCAMS; i++) {
6183		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
6184							   sizeof(struct ipr_hostrcb),
6185							   &ioa_cfg->hostrcb_dma[i]);
6186
6187		if (!ioa_cfg->hostrcb[i])
6188			goto out_free_hostrcb_dma;
6189
6190		ioa_cfg->hostrcb[i]->hostrcb_dma =
6191			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
6192		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
6193	}
6194
6195	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
6196				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
6197
6198	if (!ioa_cfg->trace)
6199		goto out_free_hostrcb_dma;
6200
6201	rc = 0;
6202out:
6203	LEAVE;
6204	return rc;
6205
6206out_free_hostrcb_dma:
6207	while (i-- > 0) {
6208		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
6209				    ioa_cfg->hostrcb[i],
6210				    ioa_cfg->hostrcb_dma[i]);
6211	}
6212	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
6213			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
6214out_free_host_rrq:
6215	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
6216			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
6217out_ipr_free_cmd_blocks:
6218	ipr_free_cmd_blks(ioa_cfg);
6219out_free_vpd_cbs:
6220	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
6221			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
6222out_free_res_entries:
6223	kfree(ioa_cfg->res_entries);
6224	goto out;
6225}
6226
6227/**
6228 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
6229 * @ioa_cfg:	ioa config struct
6230 *
6231 * Return value:
6232 * 	none
6233 **/
6234static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
6235{
6236	int i;
6237
6238	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6239		ioa_cfg->bus_attr[i].bus = i;
6240		ioa_cfg->bus_attr[i].qas_enabled = 0;
6241		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
6242		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
6243			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
6244		else
6245			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
6246	}
6247}
6248
6249/**
6250 * ipr_init_ioa_cfg - Initialize IOA config struct
6251 * @ioa_cfg:	ioa config struct
6252 * @host:		scsi host struct
6253 * @pdev:		PCI dev struct
6254 *
6255 * Return value:
6256 * 	none
6257 **/
6258static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
6259				       struct Scsi_Host *host, struct pci_dev *pdev)
6260{
6261	const struct ipr_interrupt_offsets *p;
6262	struct ipr_interrupts *t;
6263	void __iomem *base;
6264
6265	ioa_cfg->host = host;
6266	ioa_cfg->pdev = pdev;
6267	ioa_cfg->log_level = ipr_log_level;
6268	ioa_cfg->doorbell = IPR_DOORBELL;
6269	if (!ipr_auto_create)
6270		ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6271	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
6272	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
6273	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
6274	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
6275	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
6276	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
6277	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
6278	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
6279
6280	INIT_LIST_HEAD(&ioa_cfg->free_q);
6281	INIT_LIST_HEAD(&ioa_cfg->pending_q);
6282	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
6283	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
6284	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
6285	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
6286	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread, ioa_cfg);
6287	init_waitqueue_head(&ioa_cfg->reset_wait_q);
6288	ioa_cfg->sdt_state = INACTIVE;
6289	if (ipr_enable_cache)
6290		ioa_cfg->cache_state = CACHE_ENABLED;
6291	else
6292		ioa_cfg->cache_state = CACHE_DISABLED;
6293
6294	ipr_initialize_bus_attr(ioa_cfg);
6295
6296	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
6297	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
6298	host->max_channel = IPR_MAX_BUS_TO_SCAN;
6299	host->unique_id = host->host_no;
6300	host->max_cmd_len = IPR_MAX_CDB_LEN;
6301	pci_set_drvdata(pdev, ioa_cfg);
6302
6303	p = &ioa_cfg->chip_cfg->regs;
6304	t = &ioa_cfg->regs;
6305	base = ioa_cfg->hdw_dma_regs;
6306
6307	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
6308	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
6309	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
6310	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
6311	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
6312	t->ioarrin_reg = base + p->ioarrin_reg;
6313	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
6314	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
6315	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
6316}
6317
6318/**
6319 * ipr_get_chip_cfg - Find adapter chip configuration
6320 * @dev_id:		PCI device id struct
6321 *
6322 * Return value:
6323 * 	ptr to chip config on success / NULL on failure
6324 **/
6325static const struct ipr_chip_cfg_t * __devinit
6326ipr_get_chip_cfg(const struct pci_device_id *dev_id)
6327{
6328	int i;
6329
6330	if (dev_id->driver_data)
6331		return (const struct ipr_chip_cfg_t *)dev_id->driver_data;
6332
6333	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
6334		if (ipr_chip[i].vendor == dev_id->vendor &&
6335		    ipr_chip[i].device == dev_id->device)
6336			return ipr_chip[i].cfg;
6337	return NULL;
6338}
6339
6340/**
6341 * ipr_probe_ioa - Allocates memory and does first stage of initialization
6342 * @pdev:		PCI device struct
6343 * @dev_id:		PCI device id struct
6344 *
6345 * Return value:
6346 * 	0 on success / non-zero on failure
6347 **/
6348static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
6349				   const struct pci_device_id *dev_id)
6350{
6351	struct ipr_ioa_cfg *ioa_cfg;
6352	struct Scsi_Host *host;
6353	unsigned long ipr_regs_pci;
6354	void __iomem *ipr_regs;
6355	u32 rc = PCIBIOS_SUCCESSFUL;
6356	volatile u32 mask, uproc;
6357
6358	ENTER;
6359
6360	if ((rc = pci_enable_device(pdev))) {
6361		dev_err(&pdev->dev, "Cannot enable adapter\n");
6362		goto out;
6363	}
6364
6365	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
6366
6367	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
6368
6369	if (!host) {
6370		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
6371		rc = -ENOMEM;
6372		goto out_disable;
6373	}
6374
6375	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
6376	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
6377
6378	ioa_cfg->chip_cfg = ipr_get_chip_cfg(dev_id);
6379
6380	if (!ioa_cfg->chip_cfg) {
6381		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
6382			dev_id->vendor, dev_id->device);
6383		goto out_scsi_host_put;
6384	}
6385
6386	ipr_regs_pci = pci_resource_start(pdev, 0);
6387
6388	rc = pci_request_regions(pdev, IPR_NAME);
6389	if (rc < 0) {
6390		dev_err(&pdev->dev,
6391			"Couldn't register memory range of registers\n");
6392		goto out_scsi_host_put;
6393	}
6394
6395	ipr_regs = ioremap(ipr_regs_pci, pci_resource_len(pdev, 0));
6396
6397	if (!ipr_regs) {
6398		dev_err(&pdev->dev,
6399			"Couldn't map memory range of registers\n");
6400		rc = -ENOMEM;
6401		goto out_release_regions;
6402	}
6403
6404	ioa_cfg->hdw_dma_regs = ipr_regs;
6405	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
6406	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
6407
6408	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
6409
6410	pci_set_master(pdev);
6411
6412	rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
6413	if (rc < 0) {
6414		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
6415		goto cleanup_nomem;
6416	}
6417
6418	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
6419				   ioa_cfg->chip_cfg->cache_line_size);
6420
6421	if (rc != PCIBIOS_SUCCESSFUL) {
6422		dev_err(&pdev->dev, "Write of cache line size failed\n");
6423		rc = -EIO;
6424		goto cleanup_nomem;
6425	}
6426
6427	/* Save away PCI config space for use following IOA reset */
6428	rc = pci_save_state(pdev);
6429
6430	if (rc != PCIBIOS_SUCCESSFUL) {
6431		dev_err(&pdev->dev, "Failed to save PCI config space\n");
6432		rc = -EIO;
6433		goto cleanup_nomem;
6434	}
6435
6436	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
6437		goto cleanup_nomem;
6438
6439	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
6440		goto cleanup_nomem;
6441
6442	rc = ipr_alloc_mem(ioa_cfg);
6443	if (rc < 0) {
6444		dev_err(&pdev->dev,
6445			"Couldn't allocate enough memory for device driver!\n");
6446		goto cleanup_nomem;
6447	}
6448
6449	/*
6450	 * If HRRQ updated interrupt is not masked, or reset alert is set,
6451	 * the card is in an unknown state and needs a hard reset
6452	 */
6453	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
6454	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
6455	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
6456		ioa_cfg->needs_hard_reset = 1;
6457
6458	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
6459	rc = request_irq(pdev->irq, ipr_isr, IRQF_SHARED, IPR_NAME, ioa_cfg);
6460
6461	if (rc) {
6462		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
6463			pdev->irq, rc);
6464		goto cleanup_nolog;
6465	}
6466
6467	spin_lock(&ipr_driver_lock);
6468	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
6469	spin_unlock(&ipr_driver_lock);
6470
6471	LEAVE;
6472out:
6473	return rc;
6474
6475cleanup_nolog:
6476	ipr_free_mem(ioa_cfg);
6477cleanup_nomem:
6478	iounmap(ipr_regs);
6479out_release_regions:
6480	pci_release_regions(pdev);
6481out_scsi_host_put:
6482	scsi_host_put(host);
6483out_disable:
6484	pci_disable_device(pdev);
6485	goto out;
6486}
6487
6488/**
6489 * ipr_scan_vsets - Scans for VSET devices
6490 * @ioa_cfg:	ioa config struct
6491 *
6492 * Description: Since the VSET resources do not follow SAM in that we can have
6493 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
6494 *
6495 * Return value:
6496 * 	none
6497 **/
6498static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
6499{
6500	int target, lun;
6501
6502	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
6503		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
6504			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
6505}
6506
6507/**
6508 * ipr_initiate_ioa_bringdown - Bring down an adapter
6509 * @ioa_cfg:		ioa config struct
6510 * @shutdown_type:	shutdown type
6511 *
6512 * Description: This function will initiate bringing down the adapter.
6513 * This consists of issuing an IOA shutdown to the adapter
6514 * to flush the cache, and running BIST.
6515 * If the caller needs to wait on the completion of the reset,
6516 * the caller must sleep on the reset_wait_q.
6517 *
6518 * Return value:
6519 * 	none
6520 **/
6521static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
6522				       enum ipr_shutdown_type shutdown_type)
6523{
6524	ENTER;
6525	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
6526		ioa_cfg->sdt_state = ABORT_DUMP;
6527	ioa_cfg->reset_retries = 0;
6528	ioa_cfg->in_ioa_bringdown = 1;
6529	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
6530	LEAVE;
6531}
6532
6533/**
6534 * __ipr_remove - Remove a single adapter
6535 * @pdev:	pci device struct
6536 *
6537 * Adapter hot plug remove entry point.
6538 *
6539 * Return value:
6540 * 	none
6541 **/
6542static void __ipr_remove(struct pci_dev *pdev)
6543{
6544	unsigned long host_lock_flags = 0;
6545	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6546	ENTER;
6547
6548	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6549	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6550
6551	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6552	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6553	flush_scheduled_work();
6554	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
6555
6556	spin_lock(&ipr_driver_lock);
6557	list_del(&ioa_cfg->queue);
6558	spin_unlock(&ipr_driver_lock);
6559
6560	if (ioa_cfg->sdt_state == ABORT_DUMP)
6561		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
6562	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
6563
6564	ipr_free_all_resources(ioa_cfg);
6565
6566	LEAVE;
6567}
6568
6569/**
6570 * ipr_remove - IOA hot plug remove entry point
6571 * @pdev:	pci device struct
6572 *
6573 * Adapter hot plug remove entry point.
6574 *
6575 * Return value:
6576 * 	none
6577 **/
6578static void ipr_remove(struct pci_dev *pdev)
6579{
6580	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6581
6582	ENTER;
6583
6584	ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6585			      &ipr_trace_attr);
6586	ipr_remove_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6587			     &ipr_dump_attr);
6588	scsi_remove_host(ioa_cfg->host);
6589
6590	__ipr_remove(pdev);
6591
6592	LEAVE;
6593}
6594
6595/**
6596 * ipr_probe - Adapter hot plug add entry point
6597 *
6598 * Return value:
6599 * 	0 on success / non-zero on failure
6600 **/
6601static int __devinit ipr_probe(struct pci_dev *pdev,
6602			       const struct pci_device_id *dev_id)
6603{
6604	struct ipr_ioa_cfg *ioa_cfg;
6605	int rc;
6606
6607	rc = ipr_probe_ioa(pdev, dev_id);
6608
6609	if (rc)
6610		return rc;
6611
6612	ioa_cfg = pci_get_drvdata(pdev);
6613	rc = ipr_probe_ioa_part2(ioa_cfg);
6614
6615	if (rc) {
6616		__ipr_remove(pdev);
6617		return rc;
6618	}
6619
6620	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
6621
6622	if (rc) {
6623		__ipr_remove(pdev);
6624		return rc;
6625	}
6626
6627	rc = ipr_create_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6628				   &ipr_trace_attr);
6629
6630	if (rc) {
6631		scsi_remove_host(ioa_cfg->host);
6632		__ipr_remove(pdev);
6633		return rc;
6634	}
6635
6636	rc = ipr_create_dump_file(&ioa_cfg->host->shost_classdev.kobj,
6637				   &ipr_dump_attr);
6638
6639	if (rc) {
6640		ipr_remove_trace_file(&ioa_cfg->host->shost_classdev.kobj,
6641				      &ipr_trace_attr);
6642		scsi_remove_host(ioa_cfg->host);
6643		__ipr_remove(pdev);
6644		return rc;
6645	}
6646
6647	scsi_scan_host(ioa_cfg->host);
6648	ipr_scan_vsets(ioa_cfg);
6649	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
6650	ioa_cfg->allow_ml_add_del = 1;
6651	ioa_cfg->host->max_channel = IPR_VSET_BUS;
6652	schedule_work(&ioa_cfg->work_q);
6653	return 0;
6654}
6655
6656/**
6657 * ipr_shutdown - Shutdown handler.
6658 * @pdev:	pci device struct
6659 *
6660 * This function is invoked upon system shutdown/reboot. It will issue
6661 * an adapter shutdown to the adapter to flush the write cache.
6662 *
6663 * Return value:
6664 * 	none
6665 **/
6666static void ipr_shutdown(struct pci_dev *pdev)
6667{
6668	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6669	unsigned long lock_flags = 0;
6670
6671	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6672	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
6673	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6674	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
6675}
6676
6677static struct pci_device_id ipr_pci_table[] __devinitdata = {
6678	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6679		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702,
6680		0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6681	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6682		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703,
6683	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6684	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6685		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D,
6686	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6687	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
6688		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E,
6689	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6690	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6691		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B,
6692	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6693	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6694		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E,
6695	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6696	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6697		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A,
6698	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6699	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
6700		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B,
6701		0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6702	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6703	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6704	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6705	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
6706	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6707	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6708	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6709	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A,
6710	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6711	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
6712	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B,
6713	      0, 0, (kernel_ulong_t)&ipr_chip_cfg[0] },
6714	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
6715		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780,
6716		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6717	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6718		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E,
6719		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6720	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
6721		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F,
6722		0, 0, (kernel_ulong_t)&ipr_chip_cfg[1] },
6723	{ }
6724};
6725MODULE_DEVICE_TABLE(pci, ipr_pci_table);
6726
6727static struct pci_error_handlers ipr_err_handler = {
6728	.error_detected = ipr_pci_error_detected,
6729	.slot_reset = ipr_pci_slot_reset,
6730};
6731
6732static struct pci_driver ipr_driver = {
6733	.name = IPR_NAME,
6734	.id_table = ipr_pci_table,
6735	.probe = ipr_probe,
6736	.remove = ipr_remove,
6737	.shutdown = ipr_shutdown,
6738	.err_handler = &ipr_err_handler,
6739};
6740
6741/**
6742 * ipr_init - Module entry point
6743 *
6744 * Return value:
6745 * 	0 on success / negative value on failure
6746 **/
6747static int __init ipr_init(void)
6748{
6749	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
6750		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
6751
6752	return pci_module_init(&ipr_driver);
6753}
6754
6755/**
6756 * ipr_exit - Module unload
6757 *
6758 * Module unload entry point.
6759 *
6760 * Return value:
6761 * 	none
6762 **/
6763static void __exit ipr_exit(void)
6764{
6765	pci_unregister_driver(&ipr_driver);
6766}
6767
6768module_init(ipr_init);
6769module_exit(ipr_exit);
6770