ipr.c revision 7dacb64f49848f1f28018fd3e58af8d6ba234960
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 *              Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 *	- Ultra 320 SCSI controller
38 *	- PCI-X host interface
39 *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 *	- Non-Volatile Write Cache
41 *	- Supports attachment of non-RAID disks, tape, and optical devices
42 *	- RAID Levels 0, 5, 10
43 *	- Hot spare
44 *	- Background Parity Checking
45 *	- Background Data Scrubbing
46 *	- Ability to increase the capacity of an existing RAID 5 disk array
47 *		by adding disks
48 *
49 * Driver Features:
50 *	- Tagged command queuing
51 *	- Adapter microcode download
52 *	- PCI hot plug
53 *	- SCSI device hot plug
54 *
55 */
56
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/slab.h>
63#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
74#include <linux/libata.h>
75#include <linux/hdreg.h>
76#include <linux/reboot.h>
77#include <linux/stringify.h>
78#include <asm/io.h>
79#include <asm/irq.h>
80#include <asm/processor.h>
81#include <scsi/scsi.h>
82#include <scsi/scsi_host.h>
83#include <scsi/scsi_tcq.h>
84#include <scsi/scsi_eh.h>
85#include <scsi/scsi_cmnd.h>
86#include "ipr.h"
87
88/*
89 *   Global Data
90 */
91static LIST_HEAD(ipr_ioa_head);
92static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
93static unsigned int ipr_max_speed = 1;
94static int ipr_testmode = 0;
95static unsigned int ipr_fastfail = 0;
96static unsigned int ipr_transop_timeout = 0;
97static unsigned int ipr_debug = 0;
98static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
99static unsigned int ipr_dual_ioa_raid = 1;
100static DEFINE_SPINLOCK(ipr_driver_lock);
101
102/* This table describes the differences between DMA controller chips */
103static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
104	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
105		.mailbox = 0x0042C,
106		.cache_line_size = 0x20,
107		{
108			.set_interrupt_mask_reg = 0x0022C,
109			.clr_interrupt_mask_reg = 0x00230,
110			.clr_interrupt_mask_reg32 = 0x00230,
111			.sense_interrupt_mask_reg = 0x0022C,
112			.sense_interrupt_mask_reg32 = 0x0022C,
113			.clr_interrupt_reg = 0x00228,
114			.clr_interrupt_reg32 = 0x00228,
115			.sense_interrupt_reg = 0x00224,
116			.sense_interrupt_reg32 = 0x00224,
117			.ioarrin_reg = 0x00404,
118			.sense_uproc_interrupt_reg = 0x00214,
119			.sense_uproc_interrupt_reg32 = 0x00214,
120			.set_uproc_interrupt_reg = 0x00214,
121			.set_uproc_interrupt_reg32 = 0x00214,
122			.clr_uproc_interrupt_reg = 0x00218,
123			.clr_uproc_interrupt_reg32 = 0x00218
124		}
125	},
126	{ /* Snipe and Scamp */
127		.mailbox = 0x0052C,
128		.cache_line_size = 0x20,
129		{
130			.set_interrupt_mask_reg = 0x00288,
131			.clr_interrupt_mask_reg = 0x0028C,
132			.clr_interrupt_mask_reg32 = 0x0028C,
133			.sense_interrupt_mask_reg = 0x00288,
134			.sense_interrupt_mask_reg32 = 0x00288,
135			.clr_interrupt_reg = 0x00284,
136			.clr_interrupt_reg32 = 0x00284,
137			.sense_interrupt_reg = 0x00280,
138			.sense_interrupt_reg32 = 0x00280,
139			.ioarrin_reg = 0x00504,
140			.sense_uproc_interrupt_reg = 0x00290,
141			.sense_uproc_interrupt_reg32 = 0x00290,
142			.set_uproc_interrupt_reg = 0x00290,
143			.set_uproc_interrupt_reg32 = 0x00290,
144			.clr_uproc_interrupt_reg = 0x00294,
145			.clr_uproc_interrupt_reg32 = 0x00294
146		}
147	},
148	{ /* CRoC */
149		.mailbox = 0x00044,
150		.cache_line_size = 0x20,
151		{
152			.set_interrupt_mask_reg = 0x00010,
153			.clr_interrupt_mask_reg = 0x00018,
154			.clr_interrupt_mask_reg32 = 0x0001C,
155			.sense_interrupt_mask_reg = 0x00010,
156			.sense_interrupt_mask_reg32 = 0x00014,
157			.clr_interrupt_reg = 0x00008,
158			.clr_interrupt_reg32 = 0x0000C,
159			.sense_interrupt_reg = 0x00000,
160			.sense_interrupt_reg32 = 0x00004,
161			.ioarrin_reg = 0x00070,
162			.sense_uproc_interrupt_reg = 0x00020,
163			.sense_uproc_interrupt_reg32 = 0x00024,
164			.set_uproc_interrupt_reg = 0x00020,
165			.set_uproc_interrupt_reg32 = 0x00024,
166			.clr_uproc_interrupt_reg = 0x00028,
167			.clr_uproc_interrupt_reg32 = 0x0002C,
168			.init_feedback_reg = 0x0005C,
169			.dump_addr_reg = 0x00064,
170			.dump_data_reg = 0x00068,
171			.endian_swap_reg = 0x00084
172		}
173	},
174};
175
176static const struct ipr_chip_t ipr_chip[] = {
177	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
178	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
179	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
180	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
181	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[0] },
182	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
183	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, IPR_PCI_CFG, &ipr_chip_cfg[1] },
184	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] },
185	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, IPR_MMIO, &ipr_chip_cfg[2] }
186};
187
188static int ipr_max_bus_speeds [] = {
189	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
190};
191
192MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
193MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
194module_param_named(max_speed, ipr_max_speed, uint, 0);
195MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
196module_param_named(log_level, ipr_log_level, uint, 0);
197MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
198module_param_named(testmode, ipr_testmode, int, 0);
199MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
200module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
201MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
202module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
203MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
204module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
205MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
206module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
207MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
208module_param_named(max_devs, ipr_max_devs, int, 0);
209MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
210		 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
211MODULE_LICENSE("GPL");
212MODULE_VERSION(IPR_DRIVER_VERSION);
213
214/*  A constant array of IOASCs/URCs/Error Messages */
215static const
216struct ipr_error_table_t ipr_error_table[] = {
217	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
218	"8155: An unknown error was received"},
219	{0x00330000, 0, 0,
220	"Soft underlength error"},
221	{0x005A0000, 0, 0,
222	"Command to be cancelled not found"},
223	{0x00808000, 0, 0,
224	"Qualified success"},
225	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
226	"FFFE: Soft device bus error recovered by the IOA"},
227	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
228	"4101: Soft device bus fabric error"},
229	{0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
230	"FFFC: Logical block guard error recovered by the device"},
231	{0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
232	"FFFC: Logical block reference tag error recovered by the device"},
233	{0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
234	"4171: Recovered scatter list tag / sequence number error"},
235	{0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
236	"FF3D: Recovered logical block CRC error on IOA to Host transfer"},
237	{0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
238	"4171: Recovered logical block sequence number error on IOA to Host transfer"},
239	{0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
240	"FFFD: Recovered logical block reference tag error detected by the IOA"},
241	{0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
242	"FFFD: Logical block guard error recovered by the IOA"},
243	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
244	"FFF9: Device sector reassign successful"},
245	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
246	"FFF7: Media error recovered by device rewrite procedures"},
247	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
248	"7001: IOA sector reassignment successful"},
249	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
250	"FFF9: Soft media error. Sector reassignment recommended"},
251	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
252	"FFF7: Media error recovered by IOA rewrite procedures"},
253	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
254	"FF3D: Soft PCI bus error recovered by the IOA"},
255	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
256	"FFF6: Device hardware error recovered by the IOA"},
257	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
258	"FFF6: Device hardware error recovered by the device"},
259	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
260	"FF3D: Soft IOA error recovered by the IOA"},
261	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
262	"FFFA: Undefined device response recovered by the IOA"},
263	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
264	"FFF6: Device bus error, message or command phase"},
265	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
266	"FFFE: Task Management Function failed"},
267	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
268	"FFF6: Failure prediction threshold exceeded"},
269	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
270	"8009: Impending cache battery pack failure"},
271	{0x02040400, 0, 0,
272	"34FF: Disk device format in progress"},
273	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
274	"9070: IOA requested reset"},
275	{0x023F0000, 0, 0,
276	"Synchronization required"},
277	{0x024E0000, 0, 0,
278	"No ready, IOA shutdown"},
279	{0x025A0000, 0, 0,
280	"Not ready, IOA has been shutdown"},
281	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
282	"3020: Storage subsystem configuration error"},
283	{0x03110B00, 0, 0,
284	"FFF5: Medium error, data unreadable, recommend reassign"},
285	{0x03110C00, 0, 0,
286	"7000: Medium error, data unreadable, do not reassign"},
287	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
288	"FFF3: Disk media format bad"},
289	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
290	"3002: Addressed device failed to respond to selection"},
291	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
292	"3100: Device bus error"},
293	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
294	"3109: IOA timed out a device command"},
295	{0x04088000, 0, 0,
296	"3120: SCSI bus is not operational"},
297	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
298	"4100: Hard device bus fabric error"},
299	{0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
300	"310C: Logical block guard error detected by the device"},
301	{0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
302	"310C: Logical block reference tag error detected by the device"},
303	{0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
304	"4170: Scatter list tag / sequence number error"},
305	{0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
306	"8150: Logical block CRC error on IOA to Host transfer"},
307	{0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
308	"4170: Logical block sequence number error on IOA to Host transfer"},
309	{0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
310	"310D: Logical block reference tag error detected by the IOA"},
311	{0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
312	"310D: Logical block guard error detected by the IOA"},
313	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
314	"9000: IOA reserved area data check"},
315	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
316	"9001: IOA reserved area invalid data pattern"},
317	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
318	"9002: IOA reserved area LRC error"},
319	{0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
320	"Hardware Error, IOA metadata access error"},
321	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
322	"102E: Out of alternate sectors for disk storage"},
323	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
324	"FFF4: Data transfer underlength error"},
325	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
326	"FFF4: Data transfer overlength error"},
327	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
328	"3400: Logical unit failure"},
329	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
330	"FFF4: Device microcode is corrupt"},
331	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
332	"8150: PCI bus error"},
333	{0x04430000, 1, 0,
334	"Unsupported device bus message received"},
335	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
336	"FFF4: Disk device problem"},
337	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
338	"8150: Permanent IOA failure"},
339	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
340	"3010: Disk device returned wrong response to IOA"},
341	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
342	"8151: IOA microcode error"},
343	{0x04448500, 0, 0,
344	"Device bus status error"},
345	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
346	"8157: IOA error requiring IOA reset to recover"},
347	{0x04448700, 0, 0,
348	"ATA device status error"},
349	{0x04490000, 0, 0,
350	"Message reject received from the device"},
351	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
352	"8008: A permanent cache battery pack failure occurred"},
353	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
354	"9090: Disk unit has been modified after the last known status"},
355	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
356	"9081: IOA detected device error"},
357	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
358	"9082: IOA detected device error"},
359	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
360	"3110: Device bus error, message or command phase"},
361	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
362	"3110: SAS Command / Task Management Function failed"},
363	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
364	"9091: Incorrect hardware configuration change has been detected"},
365	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
366	"9073: Invalid multi-adapter configuration"},
367	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
368	"4010: Incorrect connection between cascaded expanders"},
369	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
370	"4020: Connections exceed IOA design limits"},
371	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
372	"4030: Incorrect multipath connection"},
373	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
374	"4110: Unsupported enclosure function"},
375	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
376	"FFF4: Command to logical unit failed"},
377	{0x05240000, 1, 0,
378	"Illegal request, invalid request type or request packet"},
379	{0x05250000, 0, 0,
380	"Illegal request, invalid resource handle"},
381	{0x05258000, 0, 0,
382	"Illegal request, commands not allowed to this device"},
383	{0x05258100, 0, 0,
384	"Illegal request, command not allowed to a secondary adapter"},
385	{0x05258200, 0, 0,
386	"Illegal request, command not allowed to a non-optimized resource"},
387	{0x05260000, 0, 0,
388	"Illegal request, invalid field in parameter list"},
389	{0x05260100, 0, 0,
390	"Illegal request, parameter not supported"},
391	{0x05260200, 0, 0,
392	"Illegal request, parameter value invalid"},
393	{0x052C0000, 0, 0,
394	"Illegal request, command sequence error"},
395	{0x052C8000, 1, 0,
396	"Illegal request, dual adapter support not enabled"},
397	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
398	"9031: Array protection temporarily suspended, protection resuming"},
399	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
400	"9040: Array protection temporarily suspended, protection resuming"},
401	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
402	"3140: Device bus not ready to ready transition"},
403	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
404	"FFFB: SCSI bus was reset"},
405	{0x06290500, 0, 0,
406	"FFFE: SCSI bus transition to single ended"},
407	{0x06290600, 0, 0,
408	"FFFE: SCSI bus transition to LVD"},
409	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
410	"FFFB: SCSI bus was reset by another initiator"},
411	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
412	"3029: A device replacement has occurred"},
413	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
414	"9051: IOA cache data exists for a missing or failed device"},
415	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
416	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
417	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
418	"9025: Disk unit is not supported at its physical location"},
419	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
420	"3020: IOA detected a SCSI bus configuration error"},
421	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
422	"3150: SCSI bus configuration error"},
423	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
424	"9074: Asymmetric advanced function disk configuration"},
425	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
426	"4040: Incomplete multipath connection between IOA and enclosure"},
427	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
428	"4041: Incomplete multipath connection between enclosure and device"},
429	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
430	"9075: Incomplete multipath connection between IOA and remote IOA"},
431	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
432	"9076: Configuration error, missing remote IOA"},
433	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
434	"4050: Enclosure does not support a required multipath function"},
435	{0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
436	"4070: Logically bad block written on device"},
437	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
438	"9041: Array protection temporarily suspended"},
439	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
440	"9042: Corrupt array parity detected on specified device"},
441	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
442	"9030: Array no longer protected due to missing or failed disk unit"},
443	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
444	"9071: Link operational transition"},
445	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
446	"9072: Link not operational transition"},
447	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
448	"9032: Array exposed but still protected"},
449	{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
450	"70DD: Device forced failed by disrupt device command"},
451	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
452	"4061: Multipath redundancy level got better"},
453	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
454	"4060: Multipath redundancy level got worse"},
455	{0x07270000, 0, 0,
456	"Failure due to other device"},
457	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
458	"9008: IOA does not support functions expected by devices"},
459	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
460	"9010: Cache data associated with attached devices cannot be found"},
461	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
462	"9011: Cache data belongs to devices other than those attached"},
463	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
464	"9020: Array missing 2 or more devices with only 1 device present"},
465	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
466	"9021: Array missing 2 or more devices with 2 or more devices present"},
467	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
468	"9022: Exposed array is missing a required device"},
469	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
470	"9023: Array member(s) not at required physical locations"},
471	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
472	"9024: Array not functional due to present hardware configuration"},
473	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
474	"9026: Array not functional due to present hardware configuration"},
475	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
476	"9027: Array is missing a device and parity is out of sync"},
477	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
478	"9028: Maximum number of arrays already exist"},
479	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
480	"9050: Required cache data cannot be located for a disk unit"},
481	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
482	"9052: Cache data exists for a device that has been modified"},
483	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
484	"9054: IOA resources not available due to previous problems"},
485	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
486	"9092: Disk unit requires initialization before use"},
487	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
488	"9029: Incorrect hardware configuration change has been detected"},
489	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
490	"9060: One or more disk pairs are missing from an array"},
491	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
492	"9061: One or more disks are missing from an array"},
493	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
494	"9062: One or more disks are missing from an array"},
495	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
496	"9063: Maximum number of functional arrays has been exceeded"},
497	{0x0B260000, 0, 0,
498	"Aborted command, invalid descriptor"},
499	{0x0B5A0000, 0, 0,
500	"Command terminated by host"}
501};
502
503static const struct ipr_ses_table_entry ipr_ses_table[] = {
504	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
505	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
506	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
507	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
508	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
509	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
510	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
511	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
512	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
513	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
514	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
515	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
516	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
517};
518
519/*
520 *  Function Prototypes
521 */
522static int ipr_reset_alert(struct ipr_cmnd *);
523static void ipr_process_ccn(struct ipr_cmnd *);
524static void ipr_process_error(struct ipr_cmnd *);
525static void ipr_reset_ioa_job(struct ipr_cmnd *);
526static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
527				   enum ipr_shutdown_type);
528
529#ifdef CONFIG_SCSI_IPR_TRACE
530/**
531 * ipr_trc_hook - Add a trace entry to the driver trace
532 * @ipr_cmd:	ipr command struct
533 * @type:		trace type
534 * @add_data:	additional data
535 *
536 * Return value:
537 * 	none
538 **/
539static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
540			 u8 type, u32 add_data)
541{
542	struct ipr_trace_entry *trace_entry;
543	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
544
545	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
546	trace_entry->time = jiffies;
547	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
548	trace_entry->type = type;
549	if (ipr_cmd->ioa_cfg->sis64)
550		trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
551	else
552		trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
553	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
554	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
555	trace_entry->u.add_data = add_data;
556}
557#else
558#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
559#endif
560
561/**
562 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
563 * @ipr_cmd:	ipr command struct
564 *
565 * Return value:
566 * 	none
567 **/
568static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
569{
570	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
571	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
572	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
573	dma_addr_t dma_addr = ipr_cmd->dma_addr;
574
575	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
576	ioarcb->data_transfer_length = 0;
577	ioarcb->read_data_transfer_length = 0;
578	ioarcb->ioadl_len = 0;
579	ioarcb->read_ioadl_len = 0;
580
581	if (ipr_cmd->ioa_cfg->sis64) {
582		ioarcb->u.sis64_addr_data.data_ioadl_addr =
583			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
584		ioasa64->u.gata.status = 0;
585	} else {
586		ioarcb->write_ioadl_addr =
587			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
588		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
589		ioasa->u.gata.status = 0;
590	}
591
592	ioasa->hdr.ioasc = 0;
593	ioasa->hdr.residual_data_len = 0;
594	ipr_cmd->scsi_cmd = NULL;
595	ipr_cmd->qc = NULL;
596	ipr_cmd->sense_buffer[0] = 0;
597	ipr_cmd->dma_use_sg = 0;
598}
599
600/**
601 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
602 * @ipr_cmd:	ipr command struct
603 *
604 * Return value:
605 * 	none
606 **/
607static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
608{
609	ipr_reinit_ipr_cmnd(ipr_cmd);
610	ipr_cmd->u.scratch = 0;
611	ipr_cmd->sibling = NULL;
612	init_timer(&ipr_cmd->timer);
613}
614
615/**
616 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
617 * @ioa_cfg:	ioa config struct
618 *
619 * Return value:
620 * 	pointer to ipr command struct
621 **/
622static
623struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
624{
625	struct ipr_cmnd *ipr_cmd;
626
627	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
628	list_del(&ipr_cmd->queue);
629	ipr_init_ipr_cmnd(ipr_cmd);
630
631	return ipr_cmd;
632}
633
634/**
635 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
636 * @ioa_cfg:	ioa config struct
637 * @clr_ints:     interrupts to clear
638 *
639 * This function masks all interrupts on the adapter, then clears the
640 * interrupts specified in the mask
641 *
642 * Return value:
643 * 	none
644 **/
645static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
646					  u32 clr_ints)
647{
648	volatile u32 int_reg;
649
650	/* Stop new interrupts */
651	ioa_cfg->allow_interrupts = 0;
652
653	/* Set interrupt mask to stop all new interrupts */
654	if (ioa_cfg->sis64)
655		writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
656	else
657		writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
658
659	/* Clear any pending interrupts */
660	if (ioa_cfg->sis64)
661		writel(~0, ioa_cfg->regs.clr_interrupt_reg);
662	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
663	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
664}
665
666/**
667 * ipr_save_pcix_cmd_reg - Save PCI-X command register
668 * @ioa_cfg:	ioa config struct
669 *
670 * Return value:
671 * 	0 on success / -EIO on failure
672 **/
673static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
674{
675	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
676
677	if (pcix_cmd_reg == 0)
678		return 0;
679
680	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
681				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
682		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
683		return -EIO;
684	}
685
686	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
687	return 0;
688}
689
690/**
691 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
692 * @ioa_cfg:	ioa config struct
693 *
694 * Return value:
695 * 	0 on success / -EIO on failure
696 **/
697static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
698{
699	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
700
701	if (pcix_cmd_reg) {
702		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
703					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
704			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
705			return -EIO;
706		}
707	}
708
709	return 0;
710}
711
712/**
713 * ipr_sata_eh_done - done function for aborted SATA commands
714 * @ipr_cmd:	ipr command struct
715 *
716 * This function is invoked for ops generated to SATA
717 * devices which are being aborted.
718 *
719 * Return value:
720 * 	none
721 **/
722static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
723{
724	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
725	struct ata_queued_cmd *qc = ipr_cmd->qc;
726	struct ipr_sata_port *sata_port = qc->ap->private_data;
727
728	qc->err_mask |= AC_ERR_OTHER;
729	sata_port->ioasa.status |= ATA_BUSY;
730	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
731	ata_qc_complete(qc);
732}
733
734/**
735 * ipr_scsi_eh_done - mid-layer done function for aborted ops
736 * @ipr_cmd:	ipr command struct
737 *
738 * This function is invoked by the interrupt handler for
739 * ops generated by the SCSI mid-layer which are being aborted.
740 *
741 * Return value:
742 * 	none
743 **/
744static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
745{
746	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
747	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
748
749	scsi_cmd->result |= (DID_ERROR << 16);
750
751	scsi_dma_unmap(ipr_cmd->scsi_cmd);
752	scsi_cmd->scsi_done(scsi_cmd);
753	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
754}
755
756/**
757 * ipr_fail_all_ops - Fails all outstanding ops.
758 * @ioa_cfg:	ioa config struct
759 *
760 * This function fails all outstanding ops.
761 *
762 * Return value:
763 * 	none
764 **/
765static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
766{
767	struct ipr_cmnd *ipr_cmd, *temp;
768
769	ENTER;
770	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
771		list_del(&ipr_cmd->queue);
772
773		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
774		ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
775
776		if (ipr_cmd->scsi_cmd)
777			ipr_cmd->done = ipr_scsi_eh_done;
778		else if (ipr_cmd->qc)
779			ipr_cmd->done = ipr_sata_eh_done;
780
781		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
782		del_timer(&ipr_cmd->timer);
783		ipr_cmd->done(ipr_cmd);
784	}
785
786	LEAVE;
787}
788
789/**
790 * ipr_send_command -  Send driver initiated requests.
791 * @ipr_cmd:		ipr command struct
792 *
793 * This function sends a command to the adapter using the correct write call.
794 * In the case of sis64, calculate the ioarcb size required. Then or in the
795 * appropriate bits.
796 *
797 * Return value:
798 * 	none
799 **/
800static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
801{
802	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
803	dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
804
805	if (ioa_cfg->sis64) {
806		/* The default size is 256 bytes */
807		send_dma_addr |= 0x1;
808
809		/* If the number of ioadls * size of ioadl > 128 bytes,
810		   then use a 512 byte ioarcb */
811		if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
812			send_dma_addr |= 0x4;
813		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
814	} else
815		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
816}
817
818/**
819 * ipr_do_req -  Send driver initiated requests.
820 * @ipr_cmd:		ipr command struct
821 * @done:			done function
822 * @timeout_func:	timeout function
823 * @timeout:		timeout value
824 *
825 * This function sends the specified command to the adapter with the
826 * timeout given. The done function is invoked on command completion.
827 *
828 * Return value:
829 * 	none
830 **/
831static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
832		       void (*done) (struct ipr_cmnd *),
833		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
834{
835	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
836
837	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
838
839	ipr_cmd->done = done;
840
841	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
842	ipr_cmd->timer.expires = jiffies + timeout;
843	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
844
845	add_timer(&ipr_cmd->timer);
846
847	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
848
849	mb();
850
851	ipr_send_command(ipr_cmd);
852}
853
854/**
855 * ipr_internal_cmd_done - Op done function for an internally generated op.
856 * @ipr_cmd:	ipr command struct
857 *
858 * This function is the op done function for an internally generated,
859 * blocking op. It simply wakes the sleeping thread.
860 *
861 * Return value:
862 * 	none
863 **/
864static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
865{
866	if (ipr_cmd->sibling)
867		ipr_cmd->sibling = NULL;
868	else
869		complete(&ipr_cmd->completion);
870}
871
872/**
873 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
874 * @ipr_cmd:	ipr command struct
875 * @dma_addr:	dma address
876 * @len:	transfer length
877 * @flags:	ioadl flag value
878 *
879 * This function initializes an ioadl in the case where there is only a single
880 * descriptor.
881 *
882 * Return value:
883 * 	nothing
884 **/
885static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
886			   u32 len, int flags)
887{
888	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
889	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
890
891	ipr_cmd->dma_use_sg = 1;
892
893	if (ipr_cmd->ioa_cfg->sis64) {
894		ioadl64->flags = cpu_to_be32(flags);
895		ioadl64->data_len = cpu_to_be32(len);
896		ioadl64->address = cpu_to_be64(dma_addr);
897
898		ipr_cmd->ioarcb.ioadl_len =
899		       	cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
900		ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
901	} else {
902		ioadl->flags_and_data_len = cpu_to_be32(flags | len);
903		ioadl->address = cpu_to_be32(dma_addr);
904
905		if (flags == IPR_IOADL_FLAGS_READ_LAST) {
906			ipr_cmd->ioarcb.read_ioadl_len =
907				cpu_to_be32(sizeof(struct ipr_ioadl_desc));
908			ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
909		} else {
910			ipr_cmd->ioarcb.ioadl_len =
911			       	cpu_to_be32(sizeof(struct ipr_ioadl_desc));
912			ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
913		}
914	}
915}
916
917/**
918 * ipr_send_blocking_cmd - Send command and sleep on its completion.
919 * @ipr_cmd:	ipr command struct
920 * @timeout_func:	function to invoke if command times out
921 * @timeout:	timeout
922 *
923 * Return value:
924 * 	none
925 **/
926static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
927				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
928				  u32 timeout)
929{
930	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
931
932	init_completion(&ipr_cmd->completion);
933	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
934
935	spin_unlock_irq(ioa_cfg->host->host_lock);
936	wait_for_completion(&ipr_cmd->completion);
937	spin_lock_irq(ioa_cfg->host->host_lock);
938}
939
940/**
941 * ipr_send_hcam - Send an HCAM to the adapter.
942 * @ioa_cfg:	ioa config struct
943 * @type:		HCAM type
944 * @hostrcb:	hostrcb struct
945 *
946 * This function will send a Host Controlled Async command to the adapter.
947 * If HCAMs are currently not allowed to be issued to the adapter, it will
948 * place the hostrcb on the free queue.
949 *
950 * Return value:
951 * 	none
952 **/
953static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
954			  struct ipr_hostrcb *hostrcb)
955{
956	struct ipr_cmnd *ipr_cmd;
957	struct ipr_ioarcb *ioarcb;
958
959	if (ioa_cfg->allow_cmds) {
960		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
961		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
962		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
963
964		ipr_cmd->u.hostrcb = hostrcb;
965		ioarcb = &ipr_cmd->ioarcb;
966
967		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
968		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
969		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
970		ioarcb->cmd_pkt.cdb[1] = type;
971		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
972		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
973
974		ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
975			       sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
976
977		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
978			ipr_cmd->done = ipr_process_ccn;
979		else
980			ipr_cmd->done = ipr_process_error;
981
982		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
983
984		mb();
985
986		ipr_send_command(ipr_cmd);
987	} else {
988		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
989	}
990}
991
992/**
993 * ipr_update_ata_class - Update the ata class in the resource entry
994 * @res:	resource entry struct
995 * @proto:	cfgte device bus protocol value
996 *
997 * Return value:
998 * 	none
999 **/
1000static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1001{
1002	switch(proto) {
1003	case IPR_PROTO_SATA:
1004	case IPR_PROTO_SAS_STP:
1005		res->ata_class = ATA_DEV_ATA;
1006		break;
1007	case IPR_PROTO_SATA_ATAPI:
1008	case IPR_PROTO_SAS_STP_ATAPI:
1009		res->ata_class = ATA_DEV_ATAPI;
1010		break;
1011	default:
1012		res->ata_class = ATA_DEV_UNKNOWN;
1013		break;
1014	};
1015}
1016
1017/**
1018 * ipr_init_res_entry - Initialize a resource entry struct.
1019 * @res:	resource entry struct
1020 * @cfgtew:	config table entry wrapper struct
1021 *
1022 * Return value:
1023 * 	none
1024 **/
1025static void ipr_init_res_entry(struct ipr_resource_entry *res,
1026			       struct ipr_config_table_entry_wrapper *cfgtew)
1027{
1028	int found = 0;
1029	unsigned int proto;
1030	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1031	struct ipr_resource_entry *gscsi_res = NULL;
1032
1033	res->needs_sync_complete = 0;
1034	res->in_erp = 0;
1035	res->add_to_ml = 0;
1036	res->del_from_ml = 0;
1037	res->resetting_device = 0;
1038	res->sdev = NULL;
1039	res->sata_port = NULL;
1040
1041	if (ioa_cfg->sis64) {
1042		proto = cfgtew->u.cfgte64->proto;
1043		res->res_flags = cfgtew->u.cfgte64->res_flags;
1044		res->qmodel = IPR_QUEUEING_MODEL64(res);
1045		res->type = cfgtew->u.cfgte64->res_type;
1046
1047		memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1048			sizeof(res->res_path));
1049
1050		res->bus = 0;
1051		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1052			sizeof(res->dev_lun.scsi_lun));
1053		res->lun = scsilun_to_int(&res->dev_lun);
1054
1055		if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1056			list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1057				if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1058					found = 1;
1059					res->target = gscsi_res->target;
1060					break;
1061				}
1062			}
1063			if (!found) {
1064				res->target = find_first_zero_bit(ioa_cfg->target_ids,
1065								  ioa_cfg->max_devs_supported);
1066				set_bit(res->target, ioa_cfg->target_ids);
1067			}
1068		} else if (res->type == IPR_RES_TYPE_IOAFP) {
1069			res->bus = IPR_IOAFP_VIRTUAL_BUS;
1070			res->target = 0;
1071		} else if (res->type == IPR_RES_TYPE_ARRAY) {
1072			res->bus = IPR_ARRAY_VIRTUAL_BUS;
1073			res->target = find_first_zero_bit(ioa_cfg->array_ids,
1074							  ioa_cfg->max_devs_supported);
1075			set_bit(res->target, ioa_cfg->array_ids);
1076		} else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1077			res->bus = IPR_VSET_VIRTUAL_BUS;
1078			res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1079							  ioa_cfg->max_devs_supported);
1080			set_bit(res->target, ioa_cfg->vset_ids);
1081		} else {
1082			res->target = find_first_zero_bit(ioa_cfg->target_ids,
1083							  ioa_cfg->max_devs_supported);
1084			set_bit(res->target, ioa_cfg->target_ids);
1085		}
1086	} else {
1087		proto = cfgtew->u.cfgte->proto;
1088		res->qmodel = IPR_QUEUEING_MODEL(res);
1089		res->flags = cfgtew->u.cfgte->flags;
1090		if (res->flags & IPR_IS_IOA_RESOURCE)
1091			res->type = IPR_RES_TYPE_IOAFP;
1092		else
1093			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1094
1095		res->bus = cfgtew->u.cfgte->res_addr.bus;
1096		res->target = cfgtew->u.cfgte->res_addr.target;
1097		res->lun = cfgtew->u.cfgte->res_addr.lun;
1098		res->lun_wwn = get_unaligned_be64(cfgtew->u.cfgte->lun_wwn);
1099	}
1100
1101	ipr_update_ata_class(res, proto);
1102}
1103
1104/**
1105 * ipr_is_same_device - Determine if two devices are the same.
1106 * @res:	resource entry struct
1107 * @cfgtew:	config table entry wrapper struct
1108 *
1109 * Return value:
1110 * 	1 if the devices are the same / 0 otherwise
1111 **/
1112static int ipr_is_same_device(struct ipr_resource_entry *res,
1113			      struct ipr_config_table_entry_wrapper *cfgtew)
1114{
1115	if (res->ioa_cfg->sis64) {
1116		if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1117					sizeof(cfgtew->u.cfgte64->dev_id)) &&
1118			!memcmp(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1119					sizeof(cfgtew->u.cfgte64->lun))) {
1120			return 1;
1121		}
1122	} else {
1123		if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1124		    res->target == cfgtew->u.cfgte->res_addr.target &&
1125		    res->lun == cfgtew->u.cfgte->res_addr.lun)
1126			return 1;
1127	}
1128
1129	return 0;
1130}
1131
1132/**
1133 * ipr_format_res_path - Format the resource path for printing.
1134 * @res_path:	resource path
1135 * @buf:	buffer
1136 *
1137 * Return value:
1138 * 	pointer to buffer
1139 **/
1140static char *ipr_format_res_path(u8 *res_path, char *buffer, int len)
1141{
1142	int i;
1143	char *p = buffer;
1144
1145	*p = '\0';
1146	p += snprintf(p, buffer + len - p, "%02X", res_path[0]);
1147	for (i = 1; res_path[i] != 0xff && ((i * 3) < len); i++)
1148		p += snprintf(p, buffer + len - p, "-%02X", res_path[i]);
1149
1150	return buffer;
1151}
1152
1153/**
1154 * ipr_update_res_entry - Update the resource entry.
1155 * @res:	resource entry struct
1156 * @cfgtew:	config table entry wrapper struct
1157 *
1158 * Return value:
1159 *      none
1160 **/
1161static void ipr_update_res_entry(struct ipr_resource_entry *res,
1162				 struct ipr_config_table_entry_wrapper *cfgtew)
1163{
1164	char buffer[IPR_MAX_RES_PATH_LENGTH];
1165	unsigned int proto;
1166	int new_path = 0;
1167
1168	if (res->ioa_cfg->sis64) {
1169		res->flags = cfgtew->u.cfgte64->flags;
1170		res->res_flags = cfgtew->u.cfgte64->res_flags;
1171		res->type = cfgtew->u.cfgte64->res_type;
1172
1173		memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1174			sizeof(struct ipr_std_inq_data));
1175
1176		res->qmodel = IPR_QUEUEING_MODEL64(res);
1177		proto = cfgtew->u.cfgte64->proto;
1178		res->res_handle = cfgtew->u.cfgte64->res_handle;
1179		res->dev_id = cfgtew->u.cfgte64->dev_id;
1180
1181		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1182			sizeof(res->dev_lun.scsi_lun));
1183
1184		if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1185					sizeof(res->res_path))) {
1186			memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1187				sizeof(res->res_path));
1188			new_path = 1;
1189		}
1190
1191		if (res->sdev && new_path)
1192			sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1193				    ipr_format_res_path(res->res_path, buffer,
1194							sizeof(buffer)));
1195	} else {
1196		res->flags = cfgtew->u.cfgte->flags;
1197		if (res->flags & IPR_IS_IOA_RESOURCE)
1198			res->type = IPR_RES_TYPE_IOAFP;
1199		else
1200			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1201
1202		memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1203			sizeof(struct ipr_std_inq_data));
1204
1205		res->qmodel = IPR_QUEUEING_MODEL(res);
1206		proto = cfgtew->u.cfgte->proto;
1207		res->res_handle = cfgtew->u.cfgte->res_handle;
1208	}
1209
1210	ipr_update_ata_class(res, proto);
1211}
1212
1213/**
1214 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1215 * 			  for the resource.
1216 * @res:	resource entry struct
1217 * @cfgtew:	config table entry wrapper struct
1218 *
1219 * Return value:
1220 *      none
1221 **/
1222static void ipr_clear_res_target(struct ipr_resource_entry *res)
1223{
1224	struct ipr_resource_entry *gscsi_res = NULL;
1225	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1226
1227	if (!ioa_cfg->sis64)
1228		return;
1229
1230	if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1231		clear_bit(res->target, ioa_cfg->array_ids);
1232	else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1233		clear_bit(res->target, ioa_cfg->vset_ids);
1234	else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1235		list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1236			if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1237				return;
1238		clear_bit(res->target, ioa_cfg->target_ids);
1239
1240	} else if (res->bus == 0)
1241		clear_bit(res->target, ioa_cfg->target_ids);
1242}
1243
1244/**
1245 * ipr_handle_config_change - Handle a config change from the adapter
1246 * @ioa_cfg:	ioa config struct
1247 * @hostrcb:	hostrcb
1248 *
1249 * Return value:
1250 * 	none
1251 **/
1252static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1253				     struct ipr_hostrcb *hostrcb)
1254{
1255	struct ipr_resource_entry *res = NULL;
1256	struct ipr_config_table_entry_wrapper cfgtew;
1257	__be32 cc_res_handle;
1258
1259	u32 is_ndn = 1;
1260
1261	if (ioa_cfg->sis64) {
1262		cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1263		cc_res_handle = cfgtew.u.cfgte64->res_handle;
1264	} else {
1265		cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1266		cc_res_handle = cfgtew.u.cfgte->res_handle;
1267	}
1268
1269	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1270		if (res->res_handle == cc_res_handle) {
1271			is_ndn = 0;
1272			break;
1273		}
1274	}
1275
1276	if (is_ndn) {
1277		if (list_empty(&ioa_cfg->free_res_q)) {
1278			ipr_send_hcam(ioa_cfg,
1279				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1280				      hostrcb);
1281			return;
1282		}
1283
1284		res = list_entry(ioa_cfg->free_res_q.next,
1285				 struct ipr_resource_entry, queue);
1286
1287		list_del(&res->queue);
1288		ipr_init_res_entry(res, &cfgtew);
1289		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1290	}
1291
1292	ipr_update_res_entry(res, &cfgtew);
1293
1294	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1295		if (res->sdev) {
1296			res->del_from_ml = 1;
1297			res->res_handle = IPR_INVALID_RES_HANDLE;
1298			if (ioa_cfg->allow_ml_add_del)
1299				schedule_work(&ioa_cfg->work_q);
1300		} else {
1301			ipr_clear_res_target(res);
1302			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1303		}
1304	} else if (!res->sdev || res->del_from_ml) {
1305		res->add_to_ml = 1;
1306		if (ioa_cfg->allow_ml_add_del)
1307			schedule_work(&ioa_cfg->work_q);
1308	}
1309
1310	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1311}
1312
1313/**
1314 * ipr_process_ccn - Op done function for a CCN.
1315 * @ipr_cmd:	ipr command struct
1316 *
1317 * This function is the op done function for a configuration
1318 * change notification host controlled async from the adapter.
1319 *
1320 * Return value:
1321 * 	none
1322 **/
1323static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1324{
1325	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1326	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1327	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1328
1329	list_del(&hostrcb->queue);
1330	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1331
1332	if (ioasc) {
1333		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1334			dev_err(&ioa_cfg->pdev->dev,
1335				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1336
1337		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1338	} else {
1339		ipr_handle_config_change(ioa_cfg, hostrcb);
1340	}
1341}
1342
1343/**
1344 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1345 * @i:		index into buffer
1346 * @buf:		string to modify
1347 *
1348 * This function will strip all trailing whitespace, pad the end
1349 * of the string with a single space, and NULL terminate the string.
1350 *
1351 * Return value:
1352 * 	new length of string
1353 **/
1354static int strip_and_pad_whitespace(int i, char *buf)
1355{
1356	while (i && buf[i] == ' ')
1357		i--;
1358	buf[i+1] = ' ';
1359	buf[i+2] = '\0';
1360	return i + 2;
1361}
1362
1363/**
1364 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1365 * @prefix:		string to print at start of printk
1366 * @hostrcb:	hostrcb pointer
1367 * @vpd:		vendor/product id/sn struct
1368 *
1369 * Return value:
1370 * 	none
1371 **/
1372static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1373				struct ipr_vpd *vpd)
1374{
1375	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1376	int i = 0;
1377
1378	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1379	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1380
1381	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1382	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1383
1384	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1385	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1386
1387	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1388}
1389
1390/**
1391 * ipr_log_vpd - Log the passed VPD to the error log.
1392 * @vpd:		vendor/product id/sn struct
1393 *
1394 * Return value:
1395 * 	none
1396 **/
1397static void ipr_log_vpd(struct ipr_vpd *vpd)
1398{
1399	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1400		    + IPR_SERIAL_NUM_LEN];
1401
1402	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1403	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1404	       IPR_PROD_ID_LEN);
1405	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1406	ipr_err("Vendor/Product ID: %s\n", buffer);
1407
1408	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1409	buffer[IPR_SERIAL_NUM_LEN] = '\0';
1410	ipr_err("    Serial Number: %s\n", buffer);
1411}
1412
1413/**
1414 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1415 * @prefix:		string to print at start of printk
1416 * @hostrcb:	hostrcb pointer
1417 * @vpd:		vendor/product id/sn/wwn struct
1418 *
1419 * Return value:
1420 * 	none
1421 **/
1422static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1423				    struct ipr_ext_vpd *vpd)
1424{
1425	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1426	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1427		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1428}
1429
1430/**
1431 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1432 * @vpd:		vendor/product id/sn/wwn struct
1433 *
1434 * Return value:
1435 * 	none
1436 **/
1437static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1438{
1439	ipr_log_vpd(&vpd->vpd);
1440	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1441		be32_to_cpu(vpd->wwid[1]));
1442}
1443
1444/**
1445 * ipr_log_enhanced_cache_error - Log a cache error.
1446 * @ioa_cfg:	ioa config struct
1447 * @hostrcb:	hostrcb struct
1448 *
1449 * Return value:
1450 * 	none
1451 **/
1452static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1453					 struct ipr_hostrcb *hostrcb)
1454{
1455	struct ipr_hostrcb_type_12_error *error;
1456
1457	if (ioa_cfg->sis64)
1458		error = &hostrcb->hcam.u.error64.u.type_12_error;
1459	else
1460		error = &hostrcb->hcam.u.error.u.type_12_error;
1461
1462	ipr_err("-----Current Configuration-----\n");
1463	ipr_err("Cache Directory Card Information:\n");
1464	ipr_log_ext_vpd(&error->ioa_vpd);
1465	ipr_err("Adapter Card Information:\n");
1466	ipr_log_ext_vpd(&error->cfc_vpd);
1467
1468	ipr_err("-----Expected Configuration-----\n");
1469	ipr_err("Cache Directory Card Information:\n");
1470	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1471	ipr_err("Adapter Card Information:\n");
1472	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1473
1474	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1475		     be32_to_cpu(error->ioa_data[0]),
1476		     be32_to_cpu(error->ioa_data[1]),
1477		     be32_to_cpu(error->ioa_data[2]));
1478}
1479
1480/**
1481 * ipr_log_cache_error - Log a cache error.
1482 * @ioa_cfg:	ioa config struct
1483 * @hostrcb:	hostrcb struct
1484 *
1485 * Return value:
1486 * 	none
1487 **/
1488static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1489				struct ipr_hostrcb *hostrcb)
1490{
1491	struct ipr_hostrcb_type_02_error *error =
1492		&hostrcb->hcam.u.error.u.type_02_error;
1493
1494	ipr_err("-----Current Configuration-----\n");
1495	ipr_err("Cache Directory Card Information:\n");
1496	ipr_log_vpd(&error->ioa_vpd);
1497	ipr_err("Adapter Card Information:\n");
1498	ipr_log_vpd(&error->cfc_vpd);
1499
1500	ipr_err("-----Expected Configuration-----\n");
1501	ipr_err("Cache Directory Card Information:\n");
1502	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1503	ipr_err("Adapter Card Information:\n");
1504	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1505
1506	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1507		     be32_to_cpu(error->ioa_data[0]),
1508		     be32_to_cpu(error->ioa_data[1]),
1509		     be32_to_cpu(error->ioa_data[2]));
1510}
1511
1512/**
1513 * ipr_log_enhanced_config_error - Log a configuration error.
1514 * @ioa_cfg:	ioa config struct
1515 * @hostrcb:	hostrcb struct
1516 *
1517 * Return value:
1518 * 	none
1519 **/
1520static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1521					  struct ipr_hostrcb *hostrcb)
1522{
1523	int errors_logged, i;
1524	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1525	struct ipr_hostrcb_type_13_error *error;
1526
1527	error = &hostrcb->hcam.u.error.u.type_13_error;
1528	errors_logged = be32_to_cpu(error->errors_logged);
1529
1530	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1531		be32_to_cpu(error->errors_detected), errors_logged);
1532
1533	dev_entry = error->dev;
1534
1535	for (i = 0; i < errors_logged; i++, dev_entry++) {
1536		ipr_err_separator;
1537
1538		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1539		ipr_log_ext_vpd(&dev_entry->vpd);
1540
1541		ipr_err("-----New Device Information-----\n");
1542		ipr_log_ext_vpd(&dev_entry->new_vpd);
1543
1544		ipr_err("Cache Directory Card Information:\n");
1545		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1546
1547		ipr_err("Adapter Card Information:\n");
1548		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1549	}
1550}
1551
1552/**
1553 * ipr_log_sis64_config_error - Log a device error.
1554 * @ioa_cfg:	ioa config struct
1555 * @hostrcb:	hostrcb struct
1556 *
1557 * Return value:
1558 * 	none
1559 **/
1560static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1561				       struct ipr_hostrcb *hostrcb)
1562{
1563	int errors_logged, i;
1564	struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1565	struct ipr_hostrcb_type_23_error *error;
1566	char buffer[IPR_MAX_RES_PATH_LENGTH];
1567
1568	error = &hostrcb->hcam.u.error64.u.type_23_error;
1569	errors_logged = be32_to_cpu(error->errors_logged);
1570
1571	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1572		be32_to_cpu(error->errors_detected), errors_logged);
1573
1574	dev_entry = error->dev;
1575
1576	for (i = 0; i < errors_logged; i++, dev_entry++) {
1577		ipr_err_separator;
1578
1579		ipr_err("Device %d : %s", i + 1,
1580			 ipr_format_res_path(dev_entry->res_path, buffer,
1581					     sizeof(buffer)));
1582		ipr_log_ext_vpd(&dev_entry->vpd);
1583
1584		ipr_err("-----New Device Information-----\n");
1585		ipr_log_ext_vpd(&dev_entry->new_vpd);
1586
1587		ipr_err("Cache Directory Card Information:\n");
1588		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1589
1590		ipr_err("Adapter Card Information:\n");
1591		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1592	}
1593}
1594
1595/**
1596 * ipr_log_config_error - Log a configuration error.
1597 * @ioa_cfg:	ioa config struct
1598 * @hostrcb:	hostrcb struct
1599 *
1600 * Return value:
1601 * 	none
1602 **/
1603static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1604				 struct ipr_hostrcb *hostrcb)
1605{
1606	int errors_logged, i;
1607	struct ipr_hostrcb_device_data_entry *dev_entry;
1608	struct ipr_hostrcb_type_03_error *error;
1609
1610	error = &hostrcb->hcam.u.error.u.type_03_error;
1611	errors_logged = be32_to_cpu(error->errors_logged);
1612
1613	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1614		be32_to_cpu(error->errors_detected), errors_logged);
1615
1616	dev_entry = error->dev;
1617
1618	for (i = 0; i < errors_logged; i++, dev_entry++) {
1619		ipr_err_separator;
1620
1621		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1622		ipr_log_vpd(&dev_entry->vpd);
1623
1624		ipr_err("-----New Device Information-----\n");
1625		ipr_log_vpd(&dev_entry->new_vpd);
1626
1627		ipr_err("Cache Directory Card Information:\n");
1628		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1629
1630		ipr_err("Adapter Card Information:\n");
1631		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1632
1633		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1634			be32_to_cpu(dev_entry->ioa_data[0]),
1635			be32_to_cpu(dev_entry->ioa_data[1]),
1636			be32_to_cpu(dev_entry->ioa_data[2]),
1637			be32_to_cpu(dev_entry->ioa_data[3]),
1638			be32_to_cpu(dev_entry->ioa_data[4]));
1639	}
1640}
1641
1642/**
1643 * ipr_log_enhanced_array_error - Log an array configuration error.
1644 * @ioa_cfg:	ioa config struct
1645 * @hostrcb:	hostrcb struct
1646 *
1647 * Return value:
1648 * 	none
1649 **/
1650static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1651					 struct ipr_hostrcb *hostrcb)
1652{
1653	int i, num_entries;
1654	struct ipr_hostrcb_type_14_error *error;
1655	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1656	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1657
1658	error = &hostrcb->hcam.u.error.u.type_14_error;
1659
1660	ipr_err_separator;
1661
1662	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1663		error->protection_level,
1664		ioa_cfg->host->host_no,
1665		error->last_func_vset_res_addr.bus,
1666		error->last_func_vset_res_addr.target,
1667		error->last_func_vset_res_addr.lun);
1668
1669	ipr_err_separator;
1670
1671	array_entry = error->array_member;
1672	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1673			    ARRAY_SIZE(error->array_member));
1674
1675	for (i = 0; i < num_entries; i++, array_entry++) {
1676		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1677			continue;
1678
1679		if (be32_to_cpu(error->exposed_mode_adn) == i)
1680			ipr_err("Exposed Array Member %d:\n", i);
1681		else
1682			ipr_err("Array Member %d:\n", i);
1683
1684		ipr_log_ext_vpd(&array_entry->vpd);
1685		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1686		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1687				 "Expected Location");
1688
1689		ipr_err_separator;
1690	}
1691}
1692
1693/**
1694 * ipr_log_array_error - Log an array configuration error.
1695 * @ioa_cfg:	ioa config struct
1696 * @hostrcb:	hostrcb struct
1697 *
1698 * Return value:
1699 * 	none
1700 **/
1701static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1702				struct ipr_hostrcb *hostrcb)
1703{
1704	int i;
1705	struct ipr_hostrcb_type_04_error *error;
1706	struct ipr_hostrcb_array_data_entry *array_entry;
1707	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1708
1709	error = &hostrcb->hcam.u.error.u.type_04_error;
1710
1711	ipr_err_separator;
1712
1713	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1714		error->protection_level,
1715		ioa_cfg->host->host_no,
1716		error->last_func_vset_res_addr.bus,
1717		error->last_func_vset_res_addr.target,
1718		error->last_func_vset_res_addr.lun);
1719
1720	ipr_err_separator;
1721
1722	array_entry = error->array_member;
1723
1724	for (i = 0; i < 18; i++) {
1725		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1726			continue;
1727
1728		if (be32_to_cpu(error->exposed_mode_adn) == i)
1729			ipr_err("Exposed Array Member %d:\n", i);
1730		else
1731			ipr_err("Array Member %d:\n", i);
1732
1733		ipr_log_vpd(&array_entry->vpd);
1734
1735		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1736		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1737				 "Expected Location");
1738
1739		ipr_err_separator;
1740
1741		if (i == 9)
1742			array_entry = error->array_member2;
1743		else
1744			array_entry++;
1745	}
1746}
1747
1748/**
1749 * ipr_log_hex_data - Log additional hex IOA error data.
1750 * @ioa_cfg:	ioa config struct
1751 * @data:		IOA error data
1752 * @len:		data length
1753 *
1754 * Return value:
1755 * 	none
1756 **/
1757static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1758{
1759	int i;
1760
1761	if (len == 0)
1762		return;
1763
1764	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1765		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1766
1767	for (i = 0; i < len / 4; i += 4) {
1768		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1769			be32_to_cpu(data[i]),
1770			be32_to_cpu(data[i+1]),
1771			be32_to_cpu(data[i+2]),
1772			be32_to_cpu(data[i+3]));
1773	}
1774}
1775
1776/**
1777 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1778 * @ioa_cfg:	ioa config struct
1779 * @hostrcb:	hostrcb struct
1780 *
1781 * Return value:
1782 * 	none
1783 **/
1784static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1785					    struct ipr_hostrcb *hostrcb)
1786{
1787	struct ipr_hostrcb_type_17_error *error;
1788
1789	if (ioa_cfg->sis64)
1790		error = &hostrcb->hcam.u.error64.u.type_17_error;
1791	else
1792		error = &hostrcb->hcam.u.error.u.type_17_error;
1793
1794	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1795	strim(error->failure_reason);
1796
1797	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1798		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1799	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1800	ipr_log_hex_data(ioa_cfg, error->data,
1801			 be32_to_cpu(hostrcb->hcam.length) -
1802			 (offsetof(struct ipr_hostrcb_error, u) +
1803			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1804}
1805
1806/**
1807 * ipr_log_dual_ioa_error - Log a dual adapter error.
1808 * @ioa_cfg:	ioa config struct
1809 * @hostrcb:	hostrcb struct
1810 *
1811 * Return value:
1812 * 	none
1813 **/
1814static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1815				   struct ipr_hostrcb *hostrcb)
1816{
1817	struct ipr_hostrcb_type_07_error *error;
1818
1819	error = &hostrcb->hcam.u.error.u.type_07_error;
1820	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1821	strim(error->failure_reason);
1822
1823	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1824		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1825	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1826	ipr_log_hex_data(ioa_cfg, error->data,
1827			 be32_to_cpu(hostrcb->hcam.length) -
1828			 (offsetof(struct ipr_hostrcb_error, u) +
1829			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1830}
1831
1832static const struct {
1833	u8 active;
1834	char *desc;
1835} path_active_desc[] = {
1836	{ IPR_PATH_NO_INFO, "Path" },
1837	{ IPR_PATH_ACTIVE, "Active path" },
1838	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1839};
1840
1841static const struct {
1842	u8 state;
1843	char *desc;
1844} path_state_desc[] = {
1845	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1846	{ IPR_PATH_HEALTHY, "is healthy" },
1847	{ IPR_PATH_DEGRADED, "is degraded" },
1848	{ IPR_PATH_FAILED, "is failed" }
1849};
1850
1851/**
1852 * ipr_log_fabric_path - Log a fabric path error
1853 * @hostrcb:	hostrcb struct
1854 * @fabric:		fabric descriptor
1855 *
1856 * Return value:
1857 * 	none
1858 **/
1859static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1860				struct ipr_hostrcb_fabric_desc *fabric)
1861{
1862	int i, j;
1863	u8 path_state = fabric->path_state;
1864	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1865	u8 state = path_state & IPR_PATH_STATE_MASK;
1866
1867	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1868		if (path_active_desc[i].active != active)
1869			continue;
1870
1871		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1872			if (path_state_desc[j].state != state)
1873				continue;
1874
1875			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1876				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1877					     path_active_desc[i].desc, path_state_desc[j].desc,
1878					     fabric->ioa_port);
1879			} else if (fabric->cascaded_expander == 0xff) {
1880				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1881					     path_active_desc[i].desc, path_state_desc[j].desc,
1882					     fabric->ioa_port, fabric->phy);
1883			} else if (fabric->phy == 0xff) {
1884				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1885					     path_active_desc[i].desc, path_state_desc[j].desc,
1886					     fabric->ioa_port, fabric->cascaded_expander);
1887			} else {
1888				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1889					     path_active_desc[i].desc, path_state_desc[j].desc,
1890					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1891			}
1892			return;
1893		}
1894	}
1895
1896	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1897		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1898}
1899
1900/**
1901 * ipr_log64_fabric_path - Log a fabric path error
1902 * @hostrcb:	hostrcb struct
1903 * @fabric:		fabric descriptor
1904 *
1905 * Return value:
1906 * 	none
1907 **/
1908static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1909				  struct ipr_hostrcb64_fabric_desc *fabric)
1910{
1911	int i, j;
1912	u8 path_state = fabric->path_state;
1913	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1914	u8 state = path_state & IPR_PATH_STATE_MASK;
1915	char buffer[IPR_MAX_RES_PATH_LENGTH];
1916
1917	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1918		if (path_active_desc[i].active != active)
1919			continue;
1920
1921		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1922			if (path_state_desc[j].state != state)
1923				continue;
1924
1925			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1926				     path_active_desc[i].desc, path_state_desc[j].desc,
1927				     ipr_format_res_path(fabric->res_path, buffer,
1928							 sizeof(buffer)));
1929			return;
1930		}
1931	}
1932
1933	ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1934		ipr_format_res_path(fabric->res_path, buffer, sizeof(buffer)));
1935}
1936
1937static const struct {
1938	u8 type;
1939	char *desc;
1940} path_type_desc[] = {
1941	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
1942	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
1943	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1944	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1945};
1946
1947static const struct {
1948	u8 status;
1949	char *desc;
1950} path_status_desc[] = {
1951	{ IPR_PATH_CFG_NO_PROB, "Functional" },
1952	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
1953	{ IPR_PATH_CFG_FAILED, "Failed" },
1954	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
1955	{ IPR_PATH_NOT_DETECTED, "Missing" },
1956	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1957};
1958
1959static const char *link_rate[] = {
1960	"unknown",
1961	"disabled",
1962	"phy reset problem",
1963	"spinup hold",
1964	"port selector",
1965	"unknown",
1966	"unknown",
1967	"unknown",
1968	"1.5Gbps",
1969	"3.0Gbps",
1970	"unknown",
1971	"unknown",
1972	"unknown",
1973	"unknown",
1974	"unknown",
1975	"unknown"
1976};
1977
1978/**
1979 * ipr_log_path_elem - Log a fabric path element.
1980 * @hostrcb:	hostrcb struct
1981 * @cfg:		fabric path element struct
1982 *
1983 * Return value:
1984 * 	none
1985 **/
1986static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1987			      struct ipr_hostrcb_config_element *cfg)
1988{
1989	int i, j;
1990	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1991	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1992
1993	if (type == IPR_PATH_CFG_NOT_EXIST)
1994		return;
1995
1996	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1997		if (path_type_desc[i].type != type)
1998			continue;
1999
2000		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2001			if (path_status_desc[j].status != status)
2002				continue;
2003
2004			if (type == IPR_PATH_CFG_IOA_PORT) {
2005				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2006					     path_status_desc[j].desc, path_type_desc[i].desc,
2007					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2008					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2009			} else {
2010				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2011					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2012						     path_status_desc[j].desc, path_type_desc[i].desc,
2013						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2014						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2015				} else if (cfg->cascaded_expander == 0xff) {
2016					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2017						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2018						     path_type_desc[i].desc, cfg->phy,
2019						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2020						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2021				} else if (cfg->phy == 0xff) {
2022					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2023						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2024						     path_type_desc[i].desc, cfg->cascaded_expander,
2025						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2026						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2027				} else {
2028					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2029						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2030						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2031						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2032						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2033				}
2034			}
2035			return;
2036		}
2037	}
2038
2039	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2040		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2041		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2042		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2043}
2044
2045/**
2046 * ipr_log64_path_elem - Log a fabric path element.
2047 * @hostrcb:	hostrcb struct
2048 * @cfg:		fabric path element struct
2049 *
2050 * Return value:
2051 * 	none
2052 **/
2053static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2054				struct ipr_hostrcb64_config_element *cfg)
2055{
2056	int i, j;
2057	u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2058	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2059	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2060	char buffer[IPR_MAX_RES_PATH_LENGTH];
2061
2062	if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2063		return;
2064
2065	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2066		if (path_type_desc[i].type != type)
2067			continue;
2068
2069		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2070			if (path_status_desc[j].status != status)
2071				continue;
2072
2073			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2074				     path_status_desc[j].desc, path_type_desc[i].desc,
2075				     ipr_format_res_path(cfg->res_path, buffer,
2076							 sizeof(buffer)),
2077				     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2078				     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2079			return;
2080		}
2081	}
2082	ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2083		     "WWN=%08X%08X\n", cfg->type_status,
2084		     ipr_format_res_path(cfg->res_path, buffer, sizeof(buffer)),
2085		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2086		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2087}
2088
2089/**
2090 * ipr_log_fabric_error - Log a fabric error.
2091 * @ioa_cfg:	ioa config struct
2092 * @hostrcb:	hostrcb struct
2093 *
2094 * Return value:
2095 * 	none
2096 **/
2097static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2098				 struct ipr_hostrcb *hostrcb)
2099{
2100	struct ipr_hostrcb_type_20_error *error;
2101	struct ipr_hostrcb_fabric_desc *fabric;
2102	struct ipr_hostrcb_config_element *cfg;
2103	int i, add_len;
2104
2105	error = &hostrcb->hcam.u.error.u.type_20_error;
2106	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2107	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2108
2109	add_len = be32_to_cpu(hostrcb->hcam.length) -
2110		(offsetof(struct ipr_hostrcb_error, u) +
2111		 offsetof(struct ipr_hostrcb_type_20_error, desc));
2112
2113	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2114		ipr_log_fabric_path(hostrcb, fabric);
2115		for_each_fabric_cfg(fabric, cfg)
2116			ipr_log_path_elem(hostrcb, cfg);
2117
2118		add_len -= be16_to_cpu(fabric->length);
2119		fabric = (struct ipr_hostrcb_fabric_desc *)
2120			((unsigned long)fabric + be16_to_cpu(fabric->length));
2121	}
2122
2123	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2124}
2125
2126/**
2127 * ipr_log_sis64_array_error - Log a sis64 array error.
2128 * @ioa_cfg:	ioa config struct
2129 * @hostrcb:	hostrcb struct
2130 *
2131 * Return value:
2132 * 	none
2133 **/
2134static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2135				      struct ipr_hostrcb *hostrcb)
2136{
2137	int i, num_entries;
2138	struct ipr_hostrcb_type_24_error *error;
2139	struct ipr_hostrcb64_array_data_entry *array_entry;
2140	char buffer[IPR_MAX_RES_PATH_LENGTH];
2141	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2142
2143	error = &hostrcb->hcam.u.error64.u.type_24_error;
2144
2145	ipr_err_separator;
2146
2147	ipr_err("RAID %s Array Configuration: %s\n",
2148		error->protection_level,
2149		ipr_format_res_path(error->last_res_path, buffer, sizeof(buffer)));
2150
2151	ipr_err_separator;
2152
2153	array_entry = error->array_member;
2154	num_entries = min_t(u32, error->num_entries,
2155			    ARRAY_SIZE(error->array_member));
2156
2157	for (i = 0; i < num_entries; i++, array_entry++) {
2158
2159		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2160			continue;
2161
2162		if (error->exposed_mode_adn == i)
2163			ipr_err("Exposed Array Member %d:\n", i);
2164		else
2165			ipr_err("Array Member %d:\n", i);
2166
2167		ipr_err("Array Member %d:\n", i);
2168		ipr_log_ext_vpd(&array_entry->vpd);
2169		ipr_err("Current Location: %s\n",
2170			 ipr_format_res_path(array_entry->res_path, buffer,
2171					     sizeof(buffer)));
2172		ipr_err("Expected Location: %s\n",
2173			 ipr_format_res_path(array_entry->expected_res_path,
2174					     buffer, sizeof(buffer)));
2175
2176		ipr_err_separator;
2177	}
2178}
2179
2180/**
2181 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2182 * @ioa_cfg:	ioa config struct
2183 * @hostrcb:	hostrcb struct
2184 *
2185 * Return value:
2186 * 	none
2187 **/
2188static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2189				       struct ipr_hostrcb *hostrcb)
2190{
2191	struct ipr_hostrcb_type_30_error *error;
2192	struct ipr_hostrcb64_fabric_desc *fabric;
2193	struct ipr_hostrcb64_config_element *cfg;
2194	int i, add_len;
2195
2196	error = &hostrcb->hcam.u.error64.u.type_30_error;
2197
2198	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2199	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2200
2201	add_len = be32_to_cpu(hostrcb->hcam.length) -
2202		(offsetof(struct ipr_hostrcb64_error, u) +
2203		 offsetof(struct ipr_hostrcb_type_30_error, desc));
2204
2205	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2206		ipr_log64_fabric_path(hostrcb, fabric);
2207		for_each_fabric_cfg(fabric, cfg)
2208			ipr_log64_path_elem(hostrcb, cfg);
2209
2210		add_len -= be16_to_cpu(fabric->length);
2211		fabric = (struct ipr_hostrcb64_fabric_desc *)
2212			((unsigned long)fabric + be16_to_cpu(fabric->length));
2213	}
2214
2215	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2216}
2217
2218/**
2219 * ipr_log_generic_error - Log an adapter error.
2220 * @ioa_cfg:	ioa config struct
2221 * @hostrcb:	hostrcb struct
2222 *
2223 * Return value:
2224 * 	none
2225 **/
2226static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2227				  struct ipr_hostrcb *hostrcb)
2228{
2229	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2230			 be32_to_cpu(hostrcb->hcam.length));
2231}
2232
2233/**
2234 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2235 * @ioasc:	IOASC
2236 *
2237 * This function will return the index of into the ipr_error_table
2238 * for the specified IOASC. If the IOASC is not in the table,
2239 * 0 will be returned, which points to the entry used for unknown errors.
2240 *
2241 * Return value:
2242 * 	index into the ipr_error_table
2243 **/
2244static u32 ipr_get_error(u32 ioasc)
2245{
2246	int i;
2247
2248	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2249		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2250			return i;
2251
2252	return 0;
2253}
2254
2255/**
2256 * ipr_handle_log_data - Log an adapter error.
2257 * @ioa_cfg:	ioa config struct
2258 * @hostrcb:	hostrcb struct
2259 *
2260 * This function logs an adapter error to the system.
2261 *
2262 * Return value:
2263 * 	none
2264 **/
2265static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2266				struct ipr_hostrcb *hostrcb)
2267{
2268	u32 ioasc;
2269	int error_index;
2270
2271	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2272		return;
2273
2274	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2275		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2276
2277	if (ioa_cfg->sis64)
2278		ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2279	else
2280		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2281
2282	if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2283	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2284		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
2285		scsi_report_bus_reset(ioa_cfg->host,
2286				      hostrcb->hcam.u.error.fd_res_addr.bus);
2287	}
2288
2289	error_index = ipr_get_error(ioasc);
2290
2291	if (!ipr_error_table[error_index].log_hcam)
2292		return;
2293
2294	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2295
2296	/* Set indication we have logged an error */
2297	ioa_cfg->errors_logged++;
2298
2299	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2300		return;
2301	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2302		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2303
2304	switch (hostrcb->hcam.overlay_id) {
2305	case IPR_HOST_RCB_OVERLAY_ID_2:
2306		ipr_log_cache_error(ioa_cfg, hostrcb);
2307		break;
2308	case IPR_HOST_RCB_OVERLAY_ID_3:
2309		ipr_log_config_error(ioa_cfg, hostrcb);
2310		break;
2311	case IPR_HOST_RCB_OVERLAY_ID_4:
2312	case IPR_HOST_RCB_OVERLAY_ID_6:
2313		ipr_log_array_error(ioa_cfg, hostrcb);
2314		break;
2315	case IPR_HOST_RCB_OVERLAY_ID_7:
2316		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2317		break;
2318	case IPR_HOST_RCB_OVERLAY_ID_12:
2319		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2320		break;
2321	case IPR_HOST_RCB_OVERLAY_ID_13:
2322		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2323		break;
2324	case IPR_HOST_RCB_OVERLAY_ID_14:
2325	case IPR_HOST_RCB_OVERLAY_ID_16:
2326		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2327		break;
2328	case IPR_HOST_RCB_OVERLAY_ID_17:
2329		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2330		break;
2331	case IPR_HOST_RCB_OVERLAY_ID_20:
2332		ipr_log_fabric_error(ioa_cfg, hostrcb);
2333		break;
2334	case IPR_HOST_RCB_OVERLAY_ID_23:
2335		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2336		break;
2337	case IPR_HOST_RCB_OVERLAY_ID_24:
2338	case IPR_HOST_RCB_OVERLAY_ID_26:
2339		ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2340		break;
2341	case IPR_HOST_RCB_OVERLAY_ID_30:
2342		ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2343		break;
2344	case IPR_HOST_RCB_OVERLAY_ID_1:
2345	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2346	default:
2347		ipr_log_generic_error(ioa_cfg, hostrcb);
2348		break;
2349	}
2350}
2351
2352/**
2353 * ipr_process_error - Op done function for an adapter error log.
2354 * @ipr_cmd:	ipr command struct
2355 *
2356 * This function is the op done function for an error log host
2357 * controlled async from the adapter. It will log the error and
2358 * send the HCAM back to the adapter.
2359 *
2360 * Return value:
2361 * 	none
2362 **/
2363static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2364{
2365	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2366	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2367	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2368	u32 fd_ioasc;
2369
2370	if (ioa_cfg->sis64)
2371		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2372	else
2373		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2374
2375	list_del(&hostrcb->queue);
2376	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2377
2378	if (!ioasc) {
2379		ipr_handle_log_data(ioa_cfg, hostrcb);
2380		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2381			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2382	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2383		dev_err(&ioa_cfg->pdev->dev,
2384			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
2385	}
2386
2387	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2388}
2389
2390/**
2391 * ipr_timeout -  An internally generated op has timed out.
2392 * @ipr_cmd:	ipr command struct
2393 *
2394 * This function blocks host requests and initiates an
2395 * adapter reset.
2396 *
2397 * Return value:
2398 * 	none
2399 **/
2400static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2401{
2402	unsigned long lock_flags = 0;
2403	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2404
2405	ENTER;
2406	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2407
2408	ioa_cfg->errors_logged++;
2409	dev_err(&ioa_cfg->pdev->dev,
2410		"Adapter being reset due to command timeout.\n");
2411
2412	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2413		ioa_cfg->sdt_state = GET_DUMP;
2414
2415	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2416		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2417
2418	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2419	LEAVE;
2420}
2421
2422/**
2423 * ipr_oper_timeout -  Adapter timed out transitioning to operational
2424 * @ipr_cmd:	ipr command struct
2425 *
2426 * This function blocks host requests and initiates an
2427 * adapter reset.
2428 *
2429 * Return value:
2430 * 	none
2431 **/
2432static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2433{
2434	unsigned long lock_flags = 0;
2435	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2436
2437	ENTER;
2438	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2439
2440	ioa_cfg->errors_logged++;
2441	dev_err(&ioa_cfg->pdev->dev,
2442		"Adapter timed out transitioning to operational.\n");
2443
2444	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2445		ioa_cfg->sdt_state = GET_DUMP;
2446
2447	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2448		if (ipr_fastfail)
2449			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2450		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2451	}
2452
2453	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2454	LEAVE;
2455}
2456
2457/**
2458 * ipr_reset_reload - Reset/Reload the IOA
2459 * @ioa_cfg:		ioa config struct
2460 * @shutdown_type:	shutdown type
2461 *
2462 * This function resets the adapter and re-initializes it.
2463 * This function assumes that all new host commands have been stopped.
2464 * Return value:
2465 * 	SUCCESS / FAILED
2466 **/
2467static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2468			    enum ipr_shutdown_type shutdown_type)
2469{
2470	if (!ioa_cfg->in_reset_reload)
2471		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2472
2473	spin_unlock_irq(ioa_cfg->host->host_lock);
2474	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2475	spin_lock_irq(ioa_cfg->host->host_lock);
2476
2477	/* If we got hit with a host reset while we were already resetting
2478	 the adapter for some reason, and the reset failed. */
2479	if (ioa_cfg->ioa_is_dead) {
2480		ipr_trace;
2481		return FAILED;
2482	}
2483
2484	return SUCCESS;
2485}
2486
2487/**
2488 * ipr_find_ses_entry - Find matching SES in SES table
2489 * @res:	resource entry struct of SES
2490 *
2491 * Return value:
2492 * 	pointer to SES table entry / NULL on failure
2493 **/
2494static const struct ipr_ses_table_entry *
2495ipr_find_ses_entry(struct ipr_resource_entry *res)
2496{
2497	int i, j, matches;
2498	struct ipr_std_inq_vpids *vpids;
2499	const struct ipr_ses_table_entry *ste = ipr_ses_table;
2500
2501	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2502		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2503			if (ste->compare_product_id_byte[j] == 'X') {
2504				vpids = &res->std_inq_data.vpids;
2505				if (vpids->product_id[j] == ste->product_id[j])
2506					matches++;
2507				else
2508					break;
2509			} else
2510				matches++;
2511		}
2512
2513		if (matches == IPR_PROD_ID_LEN)
2514			return ste;
2515	}
2516
2517	return NULL;
2518}
2519
2520/**
2521 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2522 * @ioa_cfg:	ioa config struct
2523 * @bus:		SCSI bus
2524 * @bus_width:	bus width
2525 *
2526 * Return value:
2527 *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2528 *	For a 2-byte wide SCSI bus, the maximum transfer speed is
2529 *	twice the maximum transfer rate (e.g. for a wide enabled bus,
2530 *	max 160MHz = max 320MB/sec).
2531 **/
2532static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2533{
2534	struct ipr_resource_entry *res;
2535	const struct ipr_ses_table_entry *ste;
2536	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2537
2538	/* Loop through each config table entry in the config table buffer */
2539	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2540		if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2541			continue;
2542
2543		if (bus != res->bus)
2544			continue;
2545
2546		if (!(ste = ipr_find_ses_entry(res)))
2547			continue;
2548
2549		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2550	}
2551
2552	return max_xfer_rate;
2553}
2554
2555/**
2556 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2557 * @ioa_cfg:		ioa config struct
2558 * @max_delay:		max delay in micro-seconds to wait
2559 *
2560 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2561 *
2562 * Return value:
2563 * 	0 on success / other on failure
2564 **/
2565static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2566{
2567	volatile u32 pcii_reg;
2568	int delay = 1;
2569
2570	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
2571	while (delay < max_delay) {
2572		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2573
2574		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2575			return 0;
2576
2577		/* udelay cannot be used if delay is more than a few milliseconds */
2578		if ((delay / 1000) > MAX_UDELAY_MS)
2579			mdelay(delay / 1000);
2580		else
2581			udelay(delay);
2582
2583		delay += delay;
2584	}
2585	return -EIO;
2586}
2587
2588/**
2589 * ipr_get_sis64_dump_data_section - Dump IOA memory
2590 * @ioa_cfg:			ioa config struct
2591 * @start_addr:			adapter address to dump
2592 * @dest:			destination kernel buffer
2593 * @length_in_words:		length to dump in 4 byte words
2594 *
2595 * Return value:
2596 * 	0 on success
2597 **/
2598static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2599					   u32 start_addr,
2600					   __be32 *dest, u32 length_in_words)
2601{
2602	int i;
2603
2604	for (i = 0; i < length_in_words; i++) {
2605		writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2606		*dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2607		dest++;
2608	}
2609
2610	return 0;
2611}
2612
2613/**
2614 * ipr_get_ldump_data_section - Dump IOA memory
2615 * @ioa_cfg:			ioa config struct
2616 * @start_addr:			adapter address to dump
2617 * @dest:				destination kernel buffer
2618 * @length_in_words:	length to dump in 4 byte words
2619 *
2620 * Return value:
2621 * 	0 on success / -EIO on failure
2622 **/
2623static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2624				      u32 start_addr,
2625				      __be32 *dest, u32 length_in_words)
2626{
2627	volatile u32 temp_pcii_reg;
2628	int i, delay = 0;
2629
2630	if (ioa_cfg->sis64)
2631		return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2632						       dest, length_in_words);
2633
2634	/* Write IOA interrupt reg starting LDUMP state  */
2635	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2636	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2637
2638	/* Wait for IO debug acknowledge */
2639	if (ipr_wait_iodbg_ack(ioa_cfg,
2640			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2641		dev_err(&ioa_cfg->pdev->dev,
2642			"IOA dump long data transfer timeout\n");
2643		return -EIO;
2644	}
2645
2646	/* Signal LDUMP interlocked - clear IO debug ack */
2647	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2648	       ioa_cfg->regs.clr_interrupt_reg);
2649
2650	/* Write Mailbox with starting address */
2651	writel(start_addr, ioa_cfg->ioa_mailbox);
2652
2653	/* Signal address valid - clear IOA Reset alert */
2654	writel(IPR_UPROCI_RESET_ALERT,
2655	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2656
2657	for (i = 0; i < length_in_words; i++) {
2658		/* Wait for IO debug acknowledge */
2659		if (ipr_wait_iodbg_ack(ioa_cfg,
2660				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2661			dev_err(&ioa_cfg->pdev->dev,
2662				"IOA dump short data transfer timeout\n");
2663			return -EIO;
2664		}
2665
2666		/* Read data from mailbox and increment destination pointer */
2667		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2668		dest++;
2669
2670		/* For all but the last word of data, signal data received */
2671		if (i < (length_in_words - 1)) {
2672			/* Signal dump data received - Clear IO debug Ack */
2673			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2674			       ioa_cfg->regs.clr_interrupt_reg);
2675		}
2676	}
2677
2678	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
2679	writel(IPR_UPROCI_RESET_ALERT,
2680	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2681
2682	writel(IPR_UPROCI_IO_DEBUG_ALERT,
2683	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2684
2685	/* Signal dump data received - Clear IO debug Ack */
2686	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2687	       ioa_cfg->regs.clr_interrupt_reg);
2688
2689	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2690	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2691		temp_pcii_reg =
2692		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2693
2694		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2695			return 0;
2696
2697		udelay(10);
2698		delay += 10;
2699	}
2700
2701	return 0;
2702}
2703
2704#ifdef CONFIG_SCSI_IPR_DUMP
2705/**
2706 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2707 * @ioa_cfg:		ioa config struct
2708 * @pci_address:	adapter address
2709 * @length:			length of data to copy
2710 *
2711 * Copy data from PCI adapter to kernel buffer.
2712 * Note: length MUST be a 4 byte multiple
2713 * Return value:
2714 * 	0 on success / other on failure
2715 **/
2716static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2717			unsigned long pci_address, u32 length)
2718{
2719	int bytes_copied = 0;
2720	int cur_len, rc, rem_len, rem_page_len;
2721	__be32 *page;
2722	unsigned long lock_flags = 0;
2723	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2724
2725	while (bytes_copied < length &&
2726	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2727		if (ioa_dump->page_offset >= PAGE_SIZE ||
2728		    ioa_dump->page_offset == 0) {
2729			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2730
2731			if (!page) {
2732				ipr_trace;
2733				return bytes_copied;
2734			}
2735
2736			ioa_dump->page_offset = 0;
2737			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2738			ioa_dump->next_page_index++;
2739		} else
2740			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2741
2742		rem_len = length - bytes_copied;
2743		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2744		cur_len = min(rem_len, rem_page_len);
2745
2746		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2747		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2748			rc = -EIO;
2749		} else {
2750			rc = ipr_get_ldump_data_section(ioa_cfg,
2751							pci_address + bytes_copied,
2752							&page[ioa_dump->page_offset / 4],
2753							(cur_len / sizeof(u32)));
2754		}
2755		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2756
2757		if (!rc) {
2758			ioa_dump->page_offset += cur_len;
2759			bytes_copied += cur_len;
2760		} else {
2761			ipr_trace;
2762			break;
2763		}
2764		schedule();
2765	}
2766
2767	return bytes_copied;
2768}
2769
2770/**
2771 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2772 * @hdr:	dump entry header struct
2773 *
2774 * Return value:
2775 * 	nothing
2776 **/
2777static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2778{
2779	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2780	hdr->num_elems = 1;
2781	hdr->offset = sizeof(*hdr);
2782	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2783}
2784
2785/**
2786 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2787 * @ioa_cfg:	ioa config struct
2788 * @driver_dump:	driver dump struct
2789 *
2790 * Return value:
2791 * 	nothing
2792 **/
2793static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2794				   struct ipr_driver_dump *driver_dump)
2795{
2796	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2797
2798	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2799	driver_dump->ioa_type_entry.hdr.len =
2800		sizeof(struct ipr_dump_ioa_type_entry) -
2801		sizeof(struct ipr_dump_entry_header);
2802	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2803	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2804	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2805	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2806		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2807		ucode_vpd->minor_release[1];
2808	driver_dump->hdr.num_entries++;
2809}
2810
2811/**
2812 * ipr_dump_version_data - Fill in the driver version in the dump.
2813 * @ioa_cfg:	ioa config struct
2814 * @driver_dump:	driver dump struct
2815 *
2816 * Return value:
2817 * 	nothing
2818 **/
2819static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2820				  struct ipr_driver_dump *driver_dump)
2821{
2822	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2823	driver_dump->version_entry.hdr.len =
2824		sizeof(struct ipr_dump_version_entry) -
2825		sizeof(struct ipr_dump_entry_header);
2826	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2827	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2828	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2829	driver_dump->hdr.num_entries++;
2830}
2831
2832/**
2833 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2834 * @ioa_cfg:	ioa config struct
2835 * @driver_dump:	driver dump struct
2836 *
2837 * Return value:
2838 * 	nothing
2839 **/
2840static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2841				   struct ipr_driver_dump *driver_dump)
2842{
2843	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2844	driver_dump->trace_entry.hdr.len =
2845		sizeof(struct ipr_dump_trace_entry) -
2846		sizeof(struct ipr_dump_entry_header);
2847	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2848	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2849	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2850	driver_dump->hdr.num_entries++;
2851}
2852
2853/**
2854 * ipr_dump_location_data - Fill in the IOA location in the dump.
2855 * @ioa_cfg:	ioa config struct
2856 * @driver_dump:	driver dump struct
2857 *
2858 * Return value:
2859 * 	nothing
2860 **/
2861static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2862				   struct ipr_driver_dump *driver_dump)
2863{
2864	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2865	driver_dump->location_entry.hdr.len =
2866		sizeof(struct ipr_dump_location_entry) -
2867		sizeof(struct ipr_dump_entry_header);
2868	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2869	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2870	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2871	driver_dump->hdr.num_entries++;
2872}
2873
2874/**
2875 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2876 * @ioa_cfg:	ioa config struct
2877 * @dump:		dump struct
2878 *
2879 * Return value:
2880 * 	nothing
2881 **/
2882static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2883{
2884	unsigned long start_addr, sdt_word;
2885	unsigned long lock_flags = 0;
2886	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2887	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2888	u32 num_entries, start_off, end_off;
2889	u32 bytes_to_copy, bytes_copied, rc;
2890	struct ipr_sdt *sdt;
2891	int valid = 1;
2892	int i;
2893
2894	ENTER;
2895
2896	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2897
2898	if (ioa_cfg->sdt_state != GET_DUMP) {
2899		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2900		return;
2901	}
2902
2903	if (ioa_cfg->sis64) {
2904		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2905		ssleep(IPR_DUMP_DELAY_SECONDS);
2906		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2907	}
2908
2909	start_addr = readl(ioa_cfg->ioa_mailbox);
2910
2911	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2912		dev_err(&ioa_cfg->pdev->dev,
2913			"Invalid dump table format: %lx\n", start_addr);
2914		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2915		return;
2916	}
2917
2918	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2919
2920	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2921
2922	/* Initialize the overall dump header */
2923	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2924	driver_dump->hdr.num_entries = 1;
2925	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2926	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2927	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2928	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2929
2930	ipr_dump_version_data(ioa_cfg, driver_dump);
2931	ipr_dump_location_data(ioa_cfg, driver_dump);
2932	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2933	ipr_dump_trace_data(ioa_cfg, driver_dump);
2934
2935	/* Update dump_header */
2936	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2937
2938	/* IOA Dump entry */
2939	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2940	ioa_dump->hdr.len = 0;
2941	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2942	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2943
2944	/* First entries in sdt are actually a list of dump addresses and
2945	 lengths to gather the real dump data.  sdt represents the pointer
2946	 to the ioa generated dump table.  Dump data will be extracted based
2947	 on entries in this table */
2948	sdt = &ioa_dump->sdt;
2949
2950	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2951					sizeof(struct ipr_sdt) / sizeof(__be32));
2952
2953	/* Smart Dump table is ready to use and the first entry is valid */
2954	if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2955	    (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
2956		dev_err(&ioa_cfg->pdev->dev,
2957			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
2958			rc, be32_to_cpu(sdt->hdr.state));
2959		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2960		ioa_cfg->sdt_state = DUMP_OBTAINED;
2961		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2962		return;
2963	}
2964
2965	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2966
2967	if (num_entries > IPR_NUM_SDT_ENTRIES)
2968		num_entries = IPR_NUM_SDT_ENTRIES;
2969
2970	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2971
2972	for (i = 0; i < num_entries; i++) {
2973		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2974			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2975			break;
2976		}
2977
2978		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2979			sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2980			if (ioa_cfg->sis64)
2981				bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2982			else {
2983				start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2984				end_off = be32_to_cpu(sdt->entry[i].end_token);
2985
2986				if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2987					bytes_to_copy = end_off - start_off;
2988				else
2989					valid = 0;
2990			}
2991			if (valid) {
2992				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2993					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2994					continue;
2995				}
2996
2997				/* Copy data from adapter to driver buffers */
2998				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2999							    bytes_to_copy);
3000
3001				ioa_dump->hdr.len += bytes_copied;
3002
3003				if (bytes_copied != bytes_to_copy) {
3004					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
3005					break;
3006				}
3007			}
3008		}
3009	}
3010
3011	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
3012
3013	/* Update dump_header */
3014	driver_dump->hdr.len += ioa_dump->hdr.len;
3015	wmb();
3016	ioa_cfg->sdt_state = DUMP_OBTAINED;
3017	LEAVE;
3018}
3019
3020#else
3021#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3022#endif
3023
3024/**
3025 * ipr_release_dump - Free adapter dump memory
3026 * @kref:	kref struct
3027 *
3028 * Return value:
3029 *	nothing
3030 **/
3031static void ipr_release_dump(struct kref *kref)
3032{
3033	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3034	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3035	unsigned long lock_flags = 0;
3036	int i;
3037
3038	ENTER;
3039	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3040	ioa_cfg->dump = NULL;
3041	ioa_cfg->sdt_state = INACTIVE;
3042	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3043
3044	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3045		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3046
3047	kfree(dump);
3048	LEAVE;
3049}
3050
3051/**
3052 * ipr_worker_thread - Worker thread
3053 * @work:		ioa config struct
3054 *
3055 * Called at task level from a work thread. This function takes care
3056 * of adding and removing device from the mid-layer as configuration
3057 * changes are detected by the adapter.
3058 *
3059 * Return value:
3060 * 	nothing
3061 **/
3062static void ipr_worker_thread(struct work_struct *work)
3063{
3064	unsigned long lock_flags;
3065	struct ipr_resource_entry *res;
3066	struct scsi_device *sdev;
3067	struct ipr_dump *dump;
3068	struct ipr_ioa_cfg *ioa_cfg =
3069		container_of(work, struct ipr_ioa_cfg, work_q);
3070	u8 bus, target, lun;
3071	int did_work;
3072
3073	ENTER;
3074	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3075
3076	if (ioa_cfg->sdt_state == GET_DUMP) {
3077		dump = ioa_cfg->dump;
3078		if (!dump) {
3079			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3080			return;
3081		}
3082		kref_get(&dump->kref);
3083		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3084		ipr_get_ioa_dump(ioa_cfg, dump);
3085		kref_put(&dump->kref, ipr_release_dump);
3086
3087		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3088		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3089			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3090		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3091		return;
3092	}
3093
3094restart:
3095	do {
3096		did_work = 0;
3097		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3098			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3099			return;
3100		}
3101
3102		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3103			if (res->del_from_ml && res->sdev) {
3104				did_work = 1;
3105				sdev = res->sdev;
3106				if (!scsi_device_get(sdev)) {
3107					if (!res->add_to_ml)
3108						list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3109					else
3110						res->del_from_ml = 0;
3111					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3112					scsi_remove_device(sdev);
3113					scsi_device_put(sdev);
3114					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3115				}
3116				break;
3117			}
3118		}
3119	} while(did_work);
3120
3121	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3122		if (res->add_to_ml) {
3123			bus = res->bus;
3124			target = res->target;
3125			lun = res->lun;
3126			res->add_to_ml = 0;
3127			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3128			scsi_add_device(ioa_cfg->host, bus, target, lun);
3129			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3130			goto restart;
3131		}
3132	}
3133
3134	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3135	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3136	LEAVE;
3137}
3138
3139#ifdef CONFIG_SCSI_IPR_TRACE
3140/**
3141 * ipr_read_trace - Dump the adapter trace
3142 * @filp:		open sysfs file
3143 * @kobj:		kobject struct
3144 * @bin_attr:		bin_attribute struct
3145 * @buf:		buffer
3146 * @off:		offset
3147 * @count:		buffer size
3148 *
3149 * Return value:
3150 *	number of bytes printed to buffer
3151 **/
3152static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3153			      struct bin_attribute *bin_attr,
3154			      char *buf, loff_t off, size_t count)
3155{
3156	struct device *dev = container_of(kobj, struct device, kobj);
3157	struct Scsi_Host *shost = class_to_shost(dev);
3158	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3159	unsigned long lock_flags = 0;
3160	ssize_t ret;
3161
3162	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3163	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3164				IPR_TRACE_SIZE);
3165	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3166
3167	return ret;
3168}
3169
3170static struct bin_attribute ipr_trace_attr = {
3171	.attr =	{
3172		.name = "trace",
3173		.mode = S_IRUGO,
3174	},
3175	.size = 0,
3176	.read = ipr_read_trace,
3177};
3178#endif
3179
3180/**
3181 * ipr_show_fw_version - Show the firmware version
3182 * @dev:	class device struct
3183 * @buf:	buffer
3184 *
3185 * Return value:
3186 *	number of bytes printed to buffer
3187 **/
3188static ssize_t ipr_show_fw_version(struct device *dev,
3189				   struct device_attribute *attr, char *buf)
3190{
3191	struct Scsi_Host *shost = class_to_shost(dev);
3192	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3193	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3194	unsigned long lock_flags = 0;
3195	int len;
3196
3197	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3198	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3199		       ucode_vpd->major_release, ucode_vpd->card_type,
3200		       ucode_vpd->minor_release[0],
3201		       ucode_vpd->minor_release[1]);
3202	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3203	return len;
3204}
3205
3206static struct device_attribute ipr_fw_version_attr = {
3207	.attr = {
3208		.name =		"fw_version",
3209		.mode =		S_IRUGO,
3210	},
3211	.show = ipr_show_fw_version,
3212};
3213
3214/**
3215 * ipr_show_log_level - Show the adapter's error logging level
3216 * @dev:	class device struct
3217 * @buf:	buffer
3218 *
3219 * Return value:
3220 * 	number of bytes printed to buffer
3221 **/
3222static ssize_t ipr_show_log_level(struct device *dev,
3223				   struct device_attribute *attr, char *buf)
3224{
3225	struct Scsi_Host *shost = class_to_shost(dev);
3226	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3227	unsigned long lock_flags = 0;
3228	int len;
3229
3230	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3231	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3232	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3233	return len;
3234}
3235
3236/**
3237 * ipr_store_log_level - Change the adapter's error logging level
3238 * @dev:	class device struct
3239 * @buf:	buffer
3240 *
3241 * Return value:
3242 * 	number of bytes printed to buffer
3243 **/
3244static ssize_t ipr_store_log_level(struct device *dev,
3245			           struct device_attribute *attr,
3246				   const char *buf, size_t count)
3247{
3248	struct Scsi_Host *shost = class_to_shost(dev);
3249	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3250	unsigned long lock_flags = 0;
3251
3252	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3253	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3254	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3255	return strlen(buf);
3256}
3257
3258static struct device_attribute ipr_log_level_attr = {
3259	.attr = {
3260		.name =		"log_level",
3261		.mode =		S_IRUGO | S_IWUSR,
3262	},
3263	.show = ipr_show_log_level,
3264	.store = ipr_store_log_level
3265};
3266
3267/**
3268 * ipr_store_diagnostics - IOA Diagnostics interface
3269 * @dev:	device struct
3270 * @buf:	buffer
3271 * @count:	buffer size
3272 *
3273 * This function will reset the adapter and wait a reasonable
3274 * amount of time for any errors that the adapter might log.
3275 *
3276 * Return value:
3277 * 	count on success / other on failure
3278 **/
3279static ssize_t ipr_store_diagnostics(struct device *dev,
3280				     struct device_attribute *attr,
3281				     const char *buf, size_t count)
3282{
3283	struct Scsi_Host *shost = class_to_shost(dev);
3284	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3285	unsigned long lock_flags = 0;
3286	int rc = count;
3287
3288	if (!capable(CAP_SYS_ADMIN))
3289		return -EACCES;
3290
3291	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3292	while(ioa_cfg->in_reset_reload) {
3293		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3294		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3295		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3296	}
3297
3298	ioa_cfg->errors_logged = 0;
3299	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3300
3301	if (ioa_cfg->in_reset_reload) {
3302		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3303		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3304
3305		/* Wait for a second for any errors to be logged */
3306		msleep(1000);
3307	} else {
3308		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3309		return -EIO;
3310	}
3311
3312	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3313	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3314		rc = -EIO;
3315	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3316
3317	return rc;
3318}
3319
3320static struct device_attribute ipr_diagnostics_attr = {
3321	.attr = {
3322		.name =		"run_diagnostics",
3323		.mode =		S_IWUSR,
3324	},
3325	.store = ipr_store_diagnostics
3326};
3327
3328/**
3329 * ipr_show_adapter_state - Show the adapter's state
3330 * @class_dev:	device struct
3331 * @buf:	buffer
3332 *
3333 * Return value:
3334 * 	number of bytes printed to buffer
3335 **/
3336static ssize_t ipr_show_adapter_state(struct device *dev,
3337				      struct device_attribute *attr, char *buf)
3338{
3339	struct Scsi_Host *shost = class_to_shost(dev);
3340	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3341	unsigned long lock_flags = 0;
3342	int len;
3343
3344	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3345	if (ioa_cfg->ioa_is_dead)
3346		len = snprintf(buf, PAGE_SIZE, "offline\n");
3347	else
3348		len = snprintf(buf, PAGE_SIZE, "online\n");
3349	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3350	return len;
3351}
3352
3353/**
3354 * ipr_store_adapter_state - Change adapter state
3355 * @dev:	device struct
3356 * @buf:	buffer
3357 * @count:	buffer size
3358 *
3359 * This function will change the adapter's state.
3360 *
3361 * Return value:
3362 * 	count on success / other on failure
3363 **/
3364static ssize_t ipr_store_adapter_state(struct device *dev,
3365				       struct device_attribute *attr,
3366				       const char *buf, size_t count)
3367{
3368	struct Scsi_Host *shost = class_to_shost(dev);
3369	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3370	unsigned long lock_flags;
3371	int result = count;
3372
3373	if (!capable(CAP_SYS_ADMIN))
3374		return -EACCES;
3375
3376	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3377	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3378		ioa_cfg->ioa_is_dead = 0;
3379		ioa_cfg->reset_retries = 0;
3380		ioa_cfg->in_ioa_bringdown = 0;
3381		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3382	}
3383	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3384	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3385
3386	return result;
3387}
3388
3389static struct device_attribute ipr_ioa_state_attr = {
3390	.attr = {
3391		.name =		"online_state",
3392		.mode =		S_IRUGO | S_IWUSR,
3393	},
3394	.show = ipr_show_adapter_state,
3395	.store = ipr_store_adapter_state
3396};
3397
3398/**
3399 * ipr_store_reset_adapter - Reset the adapter
3400 * @dev:	device struct
3401 * @buf:	buffer
3402 * @count:	buffer size
3403 *
3404 * This function will reset the adapter.
3405 *
3406 * Return value:
3407 * 	count on success / other on failure
3408 **/
3409static ssize_t ipr_store_reset_adapter(struct device *dev,
3410				       struct device_attribute *attr,
3411				       const char *buf, size_t count)
3412{
3413	struct Scsi_Host *shost = class_to_shost(dev);
3414	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3415	unsigned long lock_flags;
3416	int result = count;
3417
3418	if (!capable(CAP_SYS_ADMIN))
3419		return -EACCES;
3420
3421	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3422	if (!ioa_cfg->in_reset_reload)
3423		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3424	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3425	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3426
3427	return result;
3428}
3429
3430static struct device_attribute ipr_ioa_reset_attr = {
3431	.attr = {
3432		.name =		"reset_host",
3433		.mode =		S_IWUSR,
3434	},
3435	.store = ipr_store_reset_adapter
3436};
3437
3438/**
3439 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3440 * @buf_len:		buffer length
3441 *
3442 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3443 * list to use for microcode download
3444 *
3445 * Return value:
3446 * 	pointer to sglist / NULL on failure
3447 **/
3448static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3449{
3450	int sg_size, order, bsize_elem, num_elem, i, j;
3451	struct ipr_sglist *sglist;
3452	struct scatterlist *scatterlist;
3453	struct page *page;
3454
3455	/* Get the minimum size per scatter/gather element */
3456	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3457
3458	/* Get the actual size per element */
3459	order = get_order(sg_size);
3460
3461	/* Determine the actual number of bytes per element */
3462	bsize_elem = PAGE_SIZE * (1 << order);
3463
3464	/* Determine the actual number of sg entries needed */
3465	if (buf_len % bsize_elem)
3466		num_elem = (buf_len / bsize_elem) + 1;
3467	else
3468		num_elem = buf_len / bsize_elem;
3469
3470	/* Allocate a scatter/gather list for the DMA */
3471	sglist = kzalloc(sizeof(struct ipr_sglist) +
3472			 (sizeof(struct scatterlist) * (num_elem - 1)),
3473			 GFP_KERNEL);
3474
3475	if (sglist == NULL) {
3476		ipr_trace;
3477		return NULL;
3478	}
3479
3480	scatterlist = sglist->scatterlist;
3481	sg_init_table(scatterlist, num_elem);
3482
3483	sglist->order = order;
3484	sglist->num_sg = num_elem;
3485
3486	/* Allocate a bunch of sg elements */
3487	for (i = 0; i < num_elem; i++) {
3488		page = alloc_pages(GFP_KERNEL, order);
3489		if (!page) {
3490			ipr_trace;
3491
3492			/* Free up what we already allocated */
3493			for (j = i - 1; j >= 0; j--)
3494				__free_pages(sg_page(&scatterlist[j]), order);
3495			kfree(sglist);
3496			return NULL;
3497		}
3498
3499		sg_set_page(&scatterlist[i], page, 0, 0);
3500	}
3501
3502	return sglist;
3503}
3504
3505/**
3506 * ipr_free_ucode_buffer - Frees a microcode download buffer
3507 * @p_dnld:		scatter/gather list pointer
3508 *
3509 * Free a DMA'able ucode download buffer previously allocated with
3510 * ipr_alloc_ucode_buffer
3511 *
3512 * Return value:
3513 * 	nothing
3514 **/
3515static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3516{
3517	int i;
3518
3519	for (i = 0; i < sglist->num_sg; i++)
3520		__free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3521
3522	kfree(sglist);
3523}
3524
3525/**
3526 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3527 * @sglist:		scatter/gather list pointer
3528 * @buffer:		buffer pointer
3529 * @len:		buffer length
3530 *
3531 * Copy a microcode image from a user buffer into a buffer allocated by
3532 * ipr_alloc_ucode_buffer
3533 *
3534 * Return value:
3535 * 	0 on success / other on failure
3536 **/
3537static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3538				 u8 *buffer, u32 len)
3539{
3540	int bsize_elem, i, result = 0;
3541	struct scatterlist *scatterlist;
3542	void *kaddr;
3543
3544	/* Determine the actual number of bytes per element */
3545	bsize_elem = PAGE_SIZE * (1 << sglist->order);
3546
3547	scatterlist = sglist->scatterlist;
3548
3549	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3550		struct page *page = sg_page(&scatterlist[i]);
3551
3552		kaddr = kmap(page);
3553		memcpy(kaddr, buffer, bsize_elem);
3554		kunmap(page);
3555
3556		scatterlist[i].length = bsize_elem;
3557
3558		if (result != 0) {
3559			ipr_trace;
3560			return result;
3561		}
3562	}
3563
3564	if (len % bsize_elem) {
3565		struct page *page = sg_page(&scatterlist[i]);
3566
3567		kaddr = kmap(page);
3568		memcpy(kaddr, buffer, len % bsize_elem);
3569		kunmap(page);
3570
3571		scatterlist[i].length = len % bsize_elem;
3572	}
3573
3574	sglist->buffer_len = len;
3575	return result;
3576}
3577
3578/**
3579 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3580 * @ipr_cmd:		ipr command struct
3581 * @sglist:		scatter/gather list
3582 *
3583 * Builds a microcode download IOA data list (IOADL).
3584 *
3585 **/
3586static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3587				    struct ipr_sglist *sglist)
3588{
3589	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3590	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3591	struct scatterlist *scatterlist = sglist->scatterlist;
3592	int i;
3593
3594	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3595	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3596	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3597
3598	ioarcb->ioadl_len =
3599		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3600	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3601		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3602		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3603		ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3604	}
3605
3606	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3607}
3608
3609/**
3610 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3611 * @ipr_cmd:	ipr command struct
3612 * @sglist:		scatter/gather list
3613 *
3614 * Builds a microcode download IOA data list (IOADL).
3615 *
3616 **/
3617static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3618				  struct ipr_sglist *sglist)
3619{
3620	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3621	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3622	struct scatterlist *scatterlist = sglist->scatterlist;
3623	int i;
3624
3625	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3626	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3627	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3628
3629	ioarcb->ioadl_len =
3630		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3631
3632	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3633		ioadl[i].flags_and_data_len =
3634			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3635		ioadl[i].address =
3636			cpu_to_be32(sg_dma_address(&scatterlist[i]));
3637	}
3638
3639	ioadl[i-1].flags_and_data_len |=
3640		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3641}
3642
3643/**
3644 * ipr_update_ioa_ucode - Update IOA's microcode
3645 * @ioa_cfg:	ioa config struct
3646 * @sglist:		scatter/gather list
3647 *
3648 * Initiate an adapter reset to update the IOA's microcode
3649 *
3650 * Return value:
3651 * 	0 on success / -EIO on failure
3652 **/
3653static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3654				struct ipr_sglist *sglist)
3655{
3656	unsigned long lock_flags;
3657
3658	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3659	while(ioa_cfg->in_reset_reload) {
3660		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3661		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3662		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3663	}
3664
3665	if (ioa_cfg->ucode_sglist) {
3666		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3667		dev_err(&ioa_cfg->pdev->dev,
3668			"Microcode download already in progress\n");
3669		return -EIO;
3670	}
3671
3672	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3673					sglist->num_sg, DMA_TO_DEVICE);
3674
3675	if (!sglist->num_dma_sg) {
3676		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3677		dev_err(&ioa_cfg->pdev->dev,
3678			"Failed to map microcode download buffer!\n");
3679		return -EIO;
3680	}
3681
3682	ioa_cfg->ucode_sglist = sglist;
3683	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3684	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3685	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3686
3687	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3688	ioa_cfg->ucode_sglist = NULL;
3689	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3690	return 0;
3691}
3692
3693/**
3694 * ipr_store_update_fw - Update the firmware on the adapter
3695 * @class_dev:	device struct
3696 * @buf:	buffer
3697 * @count:	buffer size
3698 *
3699 * This function will update the firmware on the adapter.
3700 *
3701 * Return value:
3702 * 	count on success / other on failure
3703 **/
3704static ssize_t ipr_store_update_fw(struct device *dev,
3705				   struct device_attribute *attr,
3706				   const char *buf, size_t count)
3707{
3708	struct Scsi_Host *shost = class_to_shost(dev);
3709	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3710	struct ipr_ucode_image_header *image_hdr;
3711	const struct firmware *fw_entry;
3712	struct ipr_sglist *sglist;
3713	char fname[100];
3714	char *src;
3715	int len, result, dnld_size;
3716
3717	if (!capable(CAP_SYS_ADMIN))
3718		return -EACCES;
3719
3720	len = snprintf(fname, 99, "%s", buf);
3721	fname[len-1] = '\0';
3722
3723	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3724		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3725		return -EIO;
3726	}
3727
3728	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3729
3730	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3731	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
3732	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3733		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3734		release_firmware(fw_entry);
3735		return -EINVAL;
3736	}
3737
3738	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3739	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3740	sglist = ipr_alloc_ucode_buffer(dnld_size);
3741
3742	if (!sglist) {
3743		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3744		release_firmware(fw_entry);
3745		return -ENOMEM;
3746	}
3747
3748	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3749
3750	if (result) {
3751		dev_err(&ioa_cfg->pdev->dev,
3752			"Microcode buffer copy to DMA buffer failed\n");
3753		goto out;
3754	}
3755
3756	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3757
3758	if (!result)
3759		result = count;
3760out:
3761	ipr_free_ucode_buffer(sglist);
3762	release_firmware(fw_entry);
3763	return result;
3764}
3765
3766static struct device_attribute ipr_update_fw_attr = {
3767	.attr = {
3768		.name =		"update_fw",
3769		.mode =		S_IWUSR,
3770	},
3771	.store = ipr_store_update_fw
3772};
3773
3774/**
3775 * ipr_show_fw_type - Show the adapter's firmware type.
3776 * @dev:	class device struct
3777 * @buf:	buffer
3778 *
3779 * Return value:
3780 *	number of bytes printed to buffer
3781 **/
3782static ssize_t ipr_show_fw_type(struct device *dev,
3783				struct device_attribute *attr, char *buf)
3784{
3785	struct Scsi_Host *shost = class_to_shost(dev);
3786	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3787	unsigned long lock_flags = 0;
3788	int len;
3789
3790	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3791	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->sis64);
3792	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3793	return len;
3794}
3795
3796static struct device_attribute ipr_ioa_fw_type_attr = {
3797	.attr = {
3798		.name =		"fw_type",
3799		.mode =		S_IRUGO,
3800	},
3801	.show = ipr_show_fw_type
3802};
3803
3804static struct device_attribute *ipr_ioa_attrs[] = {
3805	&ipr_fw_version_attr,
3806	&ipr_log_level_attr,
3807	&ipr_diagnostics_attr,
3808	&ipr_ioa_state_attr,
3809	&ipr_ioa_reset_attr,
3810	&ipr_update_fw_attr,
3811	&ipr_ioa_fw_type_attr,
3812	NULL,
3813};
3814
3815#ifdef CONFIG_SCSI_IPR_DUMP
3816/**
3817 * ipr_read_dump - Dump the adapter
3818 * @filp:		open sysfs file
3819 * @kobj:		kobject struct
3820 * @bin_attr:		bin_attribute struct
3821 * @buf:		buffer
3822 * @off:		offset
3823 * @count:		buffer size
3824 *
3825 * Return value:
3826 *	number of bytes printed to buffer
3827 **/
3828static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3829			     struct bin_attribute *bin_attr,
3830			     char *buf, loff_t off, size_t count)
3831{
3832	struct device *cdev = container_of(kobj, struct device, kobj);
3833	struct Scsi_Host *shost = class_to_shost(cdev);
3834	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3835	struct ipr_dump *dump;
3836	unsigned long lock_flags = 0;
3837	char *src;
3838	int len;
3839	size_t rc = count;
3840
3841	if (!capable(CAP_SYS_ADMIN))
3842		return -EACCES;
3843
3844	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3845	dump = ioa_cfg->dump;
3846
3847	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3848		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3849		return 0;
3850	}
3851	kref_get(&dump->kref);
3852	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3853
3854	if (off > dump->driver_dump.hdr.len) {
3855		kref_put(&dump->kref, ipr_release_dump);
3856		return 0;
3857	}
3858
3859	if (off + count > dump->driver_dump.hdr.len) {
3860		count = dump->driver_dump.hdr.len - off;
3861		rc = count;
3862	}
3863
3864	if (count && off < sizeof(dump->driver_dump)) {
3865		if (off + count > sizeof(dump->driver_dump))
3866			len = sizeof(dump->driver_dump) - off;
3867		else
3868			len = count;
3869		src = (u8 *)&dump->driver_dump + off;
3870		memcpy(buf, src, len);
3871		buf += len;
3872		off += len;
3873		count -= len;
3874	}
3875
3876	off -= sizeof(dump->driver_dump);
3877
3878	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3879		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3880			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3881		else
3882			len = count;
3883		src = (u8 *)&dump->ioa_dump + off;
3884		memcpy(buf, src, len);
3885		buf += len;
3886		off += len;
3887		count -= len;
3888	}
3889
3890	off -= offsetof(struct ipr_ioa_dump, ioa_data);
3891
3892	while (count) {
3893		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3894			len = PAGE_ALIGN(off) - off;
3895		else
3896			len = count;
3897		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3898		src += off & ~PAGE_MASK;
3899		memcpy(buf, src, len);
3900		buf += len;
3901		off += len;
3902		count -= len;
3903	}
3904
3905	kref_put(&dump->kref, ipr_release_dump);
3906	return rc;
3907}
3908
3909/**
3910 * ipr_alloc_dump - Prepare for adapter dump
3911 * @ioa_cfg:	ioa config struct
3912 *
3913 * Return value:
3914 *	0 on success / other on failure
3915 **/
3916static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3917{
3918	struct ipr_dump *dump;
3919	unsigned long lock_flags = 0;
3920
3921	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3922
3923	if (!dump) {
3924		ipr_err("Dump memory allocation failed\n");
3925		return -ENOMEM;
3926	}
3927
3928	kref_init(&dump->kref);
3929	dump->ioa_cfg = ioa_cfg;
3930
3931	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3932
3933	if (INACTIVE != ioa_cfg->sdt_state) {
3934		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3935		kfree(dump);
3936		return 0;
3937	}
3938
3939	ioa_cfg->dump = dump;
3940	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3941	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3942		ioa_cfg->dump_taken = 1;
3943		schedule_work(&ioa_cfg->work_q);
3944	}
3945	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3946
3947	return 0;
3948}
3949
3950/**
3951 * ipr_free_dump - Free adapter dump memory
3952 * @ioa_cfg:	ioa config struct
3953 *
3954 * Return value:
3955 *	0 on success / other on failure
3956 **/
3957static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3958{
3959	struct ipr_dump *dump;
3960	unsigned long lock_flags = 0;
3961
3962	ENTER;
3963
3964	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3965	dump = ioa_cfg->dump;
3966	if (!dump) {
3967		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3968		return 0;
3969	}
3970
3971	ioa_cfg->dump = NULL;
3972	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3973
3974	kref_put(&dump->kref, ipr_release_dump);
3975
3976	LEAVE;
3977	return 0;
3978}
3979
3980/**
3981 * ipr_write_dump - Setup dump state of adapter
3982 * @filp:		open sysfs file
3983 * @kobj:		kobject struct
3984 * @bin_attr:		bin_attribute struct
3985 * @buf:		buffer
3986 * @off:		offset
3987 * @count:		buffer size
3988 *
3989 * Return value:
3990 *	number of bytes printed to buffer
3991 **/
3992static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
3993			      struct bin_attribute *bin_attr,
3994			      char *buf, loff_t off, size_t count)
3995{
3996	struct device *cdev = container_of(kobj, struct device, kobj);
3997	struct Scsi_Host *shost = class_to_shost(cdev);
3998	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3999	int rc;
4000
4001	if (!capable(CAP_SYS_ADMIN))
4002		return -EACCES;
4003
4004	if (buf[0] == '1')
4005		rc = ipr_alloc_dump(ioa_cfg);
4006	else if (buf[0] == '0')
4007		rc = ipr_free_dump(ioa_cfg);
4008	else
4009		return -EINVAL;
4010
4011	if (rc)
4012		return rc;
4013	else
4014		return count;
4015}
4016
4017static struct bin_attribute ipr_dump_attr = {
4018	.attr =	{
4019		.name = "dump",
4020		.mode = S_IRUSR | S_IWUSR,
4021	},
4022	.size = 0,
4023	.read = ipr_read_dump,
4024	.write = ipr_write_dump
4025};
4026#else
4027static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
4028#endif
4029
4030/**
4031 * ipr_change_queue_depth - Change the device's queue depth
4032 * @sdev:	scsi device struct
4033 * @qdepth:	depth to set
4034 * @reason:	calling context
4035 *
4036 * Return value:
4037 * 	actual depth set
4038 **/
4039static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
4040				  int reason)
4041{
4042	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4043	struct ipr_resource_entry *res;
4044	unsigned long lock_flags = 0;
4045
4046	if (reason != SCSI_QDEPTH_DEFAULT)
4047		return -EOPNOTSUPP;
4048
4049	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4050	res = (struct ipr_resource_entry *)sdev->hostdata;
4051
4052	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4053		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4054	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4055
4056	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4057	return sdev->queue_depth;
4058}
4059
4060/**
4061 * ipr_change_queue_type - Change the device's queue type
4062 * @dsev:		scsi device struct
4063 * @tag_type:	type of tags to use
4064 *
4065 * Return value:
4066 * 	actual queue type set
4067 **/
4068static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4069{
4070	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4071	struct ipr_resource_entry *res;
4072	unsigned long lock_flags = 0;
4073
4074	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4075	res = (struct ipr_resource_entry *)sdev->hostdata;
4076
4077	if (res) {
4078		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4079			/*
4080			 * We don't bother quiescing the device here since the
4081			 * adapter firmware does it for us.
4082			 */
4083			scsi_set_tag_type(sdev, tag_type);
4084
4085			if (tag_type)
4086				scsi_activate_tcq(sdev, sdev->queue_depth);
4087			else
4088				scsi_deactivate_tcq(sdev, sdev->queue_depth);
4089		} else
4090			tag_type = 0;
4091	} else
4092		tag_type = 0;
4093
4094	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4095	return tag_type;
4096}
4097
4098/**
4099 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4100 * @dev:	device struct
4101 * @attr:	device attribute structure
4102 * @buf:	buffer
4103 *
4104 * Return value:
4105 * 	number of bytes printed to buffer
4106 **/
4107static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4108{
4109	struct scsi_device *sdev = to_scsi_device(dev);
4110	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4111	struct ipr_resource_entry *res;
4112	unsigned long lock_flags = 0;
4113	ssize_t len = -ENXIO;
4114
4115	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4116	res = (struct ipr_resource_entry *)sdev->hostdata;
4117	if (res)
4118		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4119	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4120	return len;
4121}
4122
4123static struct device_attribute ipr_adapter_handle_attr = {
4124	.attr = {
4125		.name = 	"adapter_handle",
4126		.mode =		S_IRUSR,
4127	},
4128	.show = ipr_show_adapter_handle
4129};
4130
4131/**
4132 * ipr_show_resource_path - Show the resource path or the resource address for
4133 *			    this device.
4134 * @dev:	device struct
4135 * @attr:	device attribute structure
4136 * @buf:	buffer
4137 *
4138 * Return value:
4139 * 	number of bytes printed to buffer
4140 **/
4141static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4142{
4143	struct scsi_device *sdev = to_scsi_device(dev);
4144	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4145	struct ipr_resource_entry *res;
4146	unsigned long lock_flags = 0;
4147	ssize_t len = -ENXIO;
4148	char buffer[IPR_MAX_RES_PATH_LENGTH];
4149
4150	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4151	res = (struct ipr_resource_entry *)sdev->hostdata;
4152	if (res && ioa_cfg->sis64)
4153		len = snprintf(buf, PAGE_SIZE, "%s\n",
4154			       ipr_format_res_path(res->res_path, buffer,
4155						   sizeof(buffer)));
4156	else if (res)
4157		len = snprintf(buf, PAGE_SIZE, "%d:%d:%d:%d\n", ioa_cfg->host->host_no,
4158			       res->bus, res->target, res->lun);
4159
4160	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4161	return len;
4162}
4163
4164static struct device_attribute ipr_resource_path_attr = {
4165	.attr = {
4166		.name = 	"resource_path",
4167		.mode =		S_IRUGO,
4168	},
4169	.show = ipr_show_resource_path
4170};
4171
4172/**
4173 * ipr_show_device_id - Show the device_id for this device.
4174 * @dev:	device struct
4175 * @attr:	device attribute structure
4176 * @buf:	buffer
4177 *
4178 * Return value:
4179 *	number of bytes printed to buffer
4180 **/
4181static ssize_t ipr_show_device_id(struct device *dev, struct device_attribute *attr, char *buf)
4182{
4183	struct scsi_device *sdev = to_scsi_device(dev);
4184	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4185	struct ipr_resource_entry *res;
4186	unsigned long lock_flags = 0;
4187	ssize_t len = -ENXIO;
4188
4189	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4190	res = (struct ipr_resource_entry *)sdev->hostdata;
4191	if (res && ioa_cfg->sis64)
4192		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->dev_id);
4193	else if (res)
4194		len = snprintf(buf, PAGE_SIZE, "0x%llx\n", res->lun_wwn);
4195
4196	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4197	return len;
4198}
4199
4200static struct device_attribute ipr_device_id_attr = {
4201	.attr = {
4202		.name =		"device_id",
4203		.mode =		S_IRUGO,
4204	},
4205	.show = ipr_show_device_id
4206};
4207
4208/**
4209 * ipr_show_resource_type - Show the resource type for this device.
4210 * @dev:	device struct
4211 * @attr:	device attribute structure
4212 * @buf:	buffer
4213 *
4214 * Return value:
4215 *	number of bytes printed to buffer
4216 **/
4217static ssize_t ipr_show_resource_type(struct device *dev, struct device_attribute *attr, char *buf)
4218{
4219	struct scsi_device *sdev = to_scsi_device(dev);
4220	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4221	struct ipr_resource_entry *res;
4222	unsigned long lock_flags = 0;
4223	ssize_t len = -ENXIO;
4224
4225	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4226	res = (struct ipr_resource_entry *)sdev->hostdata;
4227
4228	if (res)
4229		len = snprintf(buf, PAGE_SIZE, "%x\n", res->type);
4230
4231	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4232	return len;
4233}
4234
4235static struct device_attribute ipr_resource_type_attr = {
4236	.attr = {
4237		.name =		"resource_type",
4238		.mode =		S_IRUGO,
4239	},
4240	.show = ipr_show_resource_type
4241};
4242
4243static struct device_attribute *ipr_dev_attrs[] = {
4244	&ipr_adapter_handle_attr,
4245	&ipr_resource_path_attr,
4246	&ipr_device_id_attr,
4247	&ipr_resource_type_attr,
4248	NULL,
4249};
4250
4251/**
4252 * ipr_biosparam - Return the HSC mapping
4253 * @sdev:			scsi device struct
4254 * @block_device:	block device pointer
4255 * @capacity:		capacity of the device
4256 * @parm:			Array containing returned HSC values.
4257 *
4258 * This function generates the HSC parms that fdisk uses.
4259 * We want to make sure we return something that places partitions
4260 * on 4k boundaries for best performance with the IOA.
4261 *
4262 * Return value:
4263 * 	0 on success
4264 **/
4265static int ipr_biosparam(struct scsi_device *sdev,
4266			 struct block_device *block_device,
4267			 sector_t capacity, int *parm)
4268{
4269	int heads, sectors;
4270	sector_t cylinders;
4271
4272	heads = 128;
4273	sectors = 32;
4274
4275	cylinders = capacity;
4276	sector_div(cylinders, (128 * 32));
4277
4278	/* return result */
4279	parm[0] = heads;
4280	parm[1] = sectors;
4281	parm[2] = cylinders;
4282
4283	return 0;
4284}
4285
4286/**
4287 * ipr_find_starget - Find target based on bus/target.
4288 * @starget:	scsi target struct
4289 *
4290 * Return value:
4291 * 	resource entry pointer if found / NULL if not found
4292 **/
4293static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4294{
4295	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4296	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4297	struct ipr_resource_entry *res;
4298
4299	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4300		if ((res->bus == starget->channel) &&
4301		    (res->target == starget->id) &&
4302		    (res->lun == 0)) {
4303			return res;
4304		}
4305	}
4306
4307	return NULL;
4308}
4309
4310static struct ata_port_info sata_port_info;
4311
4312/**
4313 * ipr_target_alloc - Prepare for commands to a SCSI target
4314 * @starget:	scsi target struct
4315 *
4316 * If the device is a SATA device, this function allocates an
4317 * ATA port with libata, else it does nothing.
4318 *
4319 * Return value:
4320 * 	0 on success / non-0 on failure
4321 **/
4322static int ipr_target_alloc(struct scsi_target *starget)
4323{
4324	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4325	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4326	struct ipr_sata_port *sata_port;
4327	struct ata_port *ap;
4328	struct ipr_resource_entry *res;
4329	unsigned long lock_flags;
4330
4331	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4332	res = ipr_find_starget(starget);
4333	starget->hostdata = NULL;
4334
4335	if (res && ipr_is_gata(res)) {
4336		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4337		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4338		if (!sata_port)
4339			return -ENOMEM;
4340
4341		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4342		if (ap) {
4343			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4344			sata_port->ioa_cfg = ioa_cfg;
4345			sata_port->ap = ap;
4346			sata_port->res = res;
4347
4348			res->sata_port = sata_port;
4349			ap->private_data = sata_port;
4350			starget->hostdata = sata_port;
4351		} else {
4352			kfree(sata_port);
4353			return -ENOMEM;
4354		}
4355	}
4356	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4357
4358	return 0;
4359}
4360
4361/**
4362 * ipr_target_destroy - Destroy a SCSI target
4363 * @starget:	scsi target struct
4364 *
4365 * If the device was a SATA device, this function frees the libata
4366 * ATA port, else it does nothing.
4367 *
4368 **/
4369static void ipr_target_destroy(struct scsi_target *starget)
4370{
4371	struct ipr_sata_port *sata_port = starget->hostdata;
4372	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4373	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4374
4375	if (ioa_cfg->sis64) {
4376		if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4377			clear_bit(starget->id, ioa_cfg->array_ids);
4378		else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4379			clear_bit(starget->id, ioa_cfg->vset_ids);
4380		else if (starget->channel == 0)
4381			clear_bit(starget->id, ioa_cfg->target_ids);
4382	}
4383
4384	if (sata_port) {
4385		starget->hostdata = NULL;
4386		ata_sas_port_destroy(sata_port->ap);
4387		kfree(sata_port);
4388	}
4389}
4390
4391/**
4392 * ipr_find_sdev - Find device based on bus/target/lun.
4393 * @sdev:	scsi device struct
4394 *
4395 * Return value:
4396 * 	resource entry pointer if found / NULL if not found
4397 **/
4398static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4399{
4400	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4401	struct ipr_resource_entry *res;
4402
4403	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4404		if ((res->bus == sdev->channel) &&
4405		    (res->target == sdev->id) &&
4406		    (res->lun == sdev->lun))
4407			return res;
4408	}
4409
4410	return NULL;
4411}
4412
4413/**
4414 * ipr_slave_destroy - Unconfigure a SCSI device
4415 * @sdev:	scsi device struct
4416 *
4417 * Return value:
4418 * 	nothing
4419 **/
4420static void ipr_slave_destroy(struct scsi_device *sdev)
4421{
4422	struct ipr_resource_entry *res;
4423	struct ipr_ioa_cfg *ioa_cfg;
4424	unsigned long lock_flags = 0;
4425
4426	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4427
4428	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4429	res = (struct ipr_resource_entry *) sdev->hostdata;
4430	if (res) {
4431		if (res->sata_port)
4432			res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4433		sdev->hostdata = NULL;
4434		res->sdev = NULL;
4435		res->sata_port = NULL;
4436	}
4437	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4438}
4439
4440/**
4441 * ipr_slave_configure - Configure a SCSI device
4442 * @sdev:	scsi device struct
4443 *
4444 * This function configures the specified scsi device.
4445 *
4446 * Return value:
4447 * 	0 on success
4448 **/
4449static int ipr_slave_configure(struct scsi_device *sdev)
4450{
4451	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4452	struct ipr_resource_entry *res;
4453	struct ata_port *ap = NULL;
4454	unsigned long lock_flags = 0;
4455	char buffer[IPR_MAX_RES_PATH_LENGTH];
4456
4457	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4458	res = sdev->hostdata;
4459	if (res) {
4460		if (ipr_is_af_dasd_device(res))
4461			sdev->type = TYPE_RAID;
4462		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4463			sdev->scsi_level = 4;
4464			sdev->no_uld_attach = 1;
4465		}
4466		if (ipr_is_vset_device(res)) {
4467			blk_queue_rq_timeout(sdev->request_queue,
4468					     IPR_VSET_RW_TIMEOUT);
4469			blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4470		}
4471		if (ipr_is_gata(res) && res->sata_port)
4472			ap = res->sata_port->ap;
4473		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4474
4475		if (ap) {
4476			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4477			ata_sas_slave_configure(sdev, ap);
4478		} else
4479			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4480		if (ioa_cfg->sis64)
4481			sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4482				    ipr_format_res_path(res->res_path, buffer,
4483							sizeof(buffer)));
4484		return 0;
4485	}
4486	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4487	return 0;
4488}
4489
4490/**
4491 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4492 * @sdev:	scsi device struct
4493 *
4494 * This function initializes an ATA port so that future commands
4495 * sent through queuecommand will work.
4496 *
4497 * Return value:
4498 * 	0 on success
4499 **/
4500static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4501{
4502	struct ipr_sata_port *sata_port = NULL;
4503	int rc = -ENXIO;
4504
4505	ENTER;
4506	if (sdev->sdev_target)
4507		sata_port = sdev->sdev_target->hostdata;
4508	if (sata_port)
4509		rc = ata_sas_port_init(sata_port->ap);
4510	if (rc)
4511		ipr_slave_destroy(sdev);
4512
4513	LEAVE;
4514	return rc;
4515}
4516
4517/**
4518 * ipr_slave_alloc - Prepare for commands to a device.
4519 * @sdev:	scsi device struct
4520 *
4521 * This function saves a pointer to the resource entry
4522 * in the scsi device struct if the device exists. We
4523 * can then use this pointer in ipr_queuecommand when
4524 * handling new commands.
4525 *
4526 * Return value:
4527 * 	0 on success / -ENXIO if device does not exist
4528 **/
4529static int ipr_slave_alloc(struct scsi_device *sdev)
4530{
4531	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4532	struct ipr_resource_entry *res;
4533	unsigned long lock_flags;
4534	int rc = -ENXIO;
4535
4536	sdev->hostdata = NULL;
4537
4538	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4539
4540	res = ipr_find_sdev(sdev);
4541	if (res) {
4542		res->sdev = sdev;
4543		res->add_to_ml = 0;
4544		res->in_erp = 0;
4545		sdev->hostdata = res;
4546		if (!ipr_is_naca_model(res))
4547			res->needs_sync_complete = 1;
4548		rc = 0;
4549		if (ipr_is_gata(res)) {
4550			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4551			return ipr_ata_slave_alloc(sdev);
4552		}
4553	}
4554
4555	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4556
4557	return rc;
4558}
4559
4560/**
4561 * ipr_eh_host_reset - Reset the host adapter
4562 * @scsi_cmd:	scsi command struct
4563 *
4564 * Return value:
4565 * 	SUCCESS / FAILED
4566 **/
4567static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
4568{
4569	struct ipr_ioa_cfg *ioa_cfg;
4570	int rc;
4571
4572	ENTER;
4573	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4574
4575	dev_err(&ioa_cfg->pdev->dev,
4576		"Adapter being reset as a result of error recovery.\n");
4577
4578	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4579		ioa_cfg->sdt_state = GET_DUMP;
4580
4581	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4582
4583	LEAVE;
4584	return rc;
4585}
4586
4587static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4588{
4589	int rc;
4590
4591	spin_lock_irq(cmd->device->host->host_lock);
4592	rc = __ipr_eh_host_reset(cmd);
4593	spin_unlock_irq(cmd->device->host->host_lock);
4594
4595	return rc;
4596}
4597
4598/**
4599 * ipr_device_reset - Reset the device
4600 * @ioa_cfg:	ioa config struct
4601 * @res:		resource entry struct
4602 *
4603 * This function issues a device reset to the affected device.
4604 * If the device is a SCSI device, a LUN reset will be sent
4605 * to the device first. If that does not work, a target reset
4606 * will be sent. If the device is a SATA device, a PHY reset will
4607 * be sent.
4608 *
4609 * Return value:
4610 *	0 on success / non-zero on failure
4611 **/
4612static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4613			    struct ipr_resource_entry *res)
4614{
4615	struct ipr_cmnd *ipr_cmd;
4616	struct ipr_ioarcb *ioarcb;
4617	struct ipr_cmd_pkt *cmd_pkt;
4618	struct ipr_ioarcb_ata_regs *regs;
4619	u32 ioasc;
4620
4621	ENTER;
4622	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4623	ioarcb = &ipr_cmd->ioarcb;
4624	cmd_pkt = &ioarcb->cmd_pkt;
4625
4626	if (ipr_cmd->ioa_cfg->sis64) {
4627		regs = &ipr_cmd->i.ata_ioadl.regs;
4628		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4629	} else
4630		regs = &ioarcb->u.add_data.u.regs;
4631
4632	ioarcb->res_handle = res->res_handle;
4633	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4634	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4635	if (ipr_is_gata(res)) {
4636		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4637		ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4638		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4639	}
4640
4641	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4642	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4643	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4644	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4645		if (ipr_cmd->ioa_cfg->sis64)
4646			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4647			       sizeof(struct ipr_ioasa_gata));
4648		else
4649			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4650			       sizeof(struct ipr_ioasa_gata));
4651	}
4652
4653	LEAVE;
4654	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4655}
4656
4657/**
4658 * ipr_sata_reset - Reset the SATA port
4659 * @link:	SATA link to reset
4660 * @classes:	class of the attached device
4661 *
4662 * This function issues a SATA phy reset to the affected ATA link.
4663 *
4664 * Return value:
4665 *	0 on success / non-zero on failure
4666 **/
4667static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4668				unsigned long deadline)
4669{
4670	struct ipr_sata_port *sata_port = link->ap->private_data;
4671	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4672	struct ipr_resource_entry *res;
4673	unsigned long lock_flags = 0;
4674	int rc = -ENXIO;
4675
4676	ENTER;
4677	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4678	while(ioa_cfg->in_reset_reload) {
4679		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4680		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4681		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4682	}
4683
4684	res = sata_port->res;
4685	if (res) {
4686		rc = ipr_device_reset(ioa_cfg, res);
4687		*classes = res->ata_class;
4688	}
4689
4690	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4691	LEAVE;
4692	return rc;
4693}
4694
4695/**
4696 * ipr_eh_dev_reset - Reset the device
4697 * @scsi_cmd:	scsi command struct
4698 *
4699 * This function issues a device reset to the affected device.
4700 * A LUN reset will be sent to the device first. If that does
4701 * not work, a target reset will be sent.
4702 *
4703 * Return value:
4704 *	SUCCESS / FAILED
4705 **/
4706static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4707{
4708	struct ipr_cmnd *ipr_cmd;
4709	struct ipr_ioa_cfg *ioa_cfg;
4710	struct ipr_resource_entry *res;
4711	struct ata_port *ap;
4712	int rc = 0;
4713
4714	ENTER;
4715	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4716	res = scsi_cmd->device->hostdata;
4717
4718	if (!res)
4719		return FAILED;
4720
4721	/*
4722	 * If we are currently going through reset/reload, return failed. This will force the
4723	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4724	 * reset to complete
4725	 */
4726	if (ioa_cfg->in_reset_reload)
4727		return FAILED;
4728	if (ioa_cfg->ioa_is_dead)
4729		return FAILED;
4730
4731	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4732		if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4733			if (ipr_cmd->scsi_cmd)
4734				ipr_cmd->done = ipr_scsi_eh_done;
4735			if (ipr_cmd->qc)
4736				ipr_cmd->done = ipr_sata_eh_done;
4737			if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4738				ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4739				ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4740			}
4741		}
4742	}
4743
4744	res->resetting_device = 1;
4745	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4746
4747	if (ipr_is_gata(res) && res->sata_port) {
4748		ap = res->sata_port->ap;
4749		spin_unlock_irq(scsi_cmd->device->host->host_lock);
4750		ata_std_error_handler(ap);
4751		spin_lock_irq(scsi_cmd->device->host->host_lock);
4752
4753		list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4754			if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4755				rc = -EIO;
4756				break;
4757			}
4758		}
4759	} else
4760		rc = ipr_device_reset(ioa_cfg, res);
4761	res->resetting_device = 0;
4762
4763	LEAVE;
4764	return (rc ? FAILED : SUCCESS);
4765}
4766
4767static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4768{
4769	int rc;
4770
4771	spin_lock_irq(cmd->device->host->host_lock);
4772	rc = __ipr_eh_dev_reset(cmd);
4773	spin_unlock_irq(cmd->device->host->host_lock);
4774
4775	return rc;
4776}
4777
4778/**
4779 * ipr_bus_reset_done - Op done function for bus reset.
4780 * @ipr_cmd:	ipr command struct
4781 *
4782 * This function is the op done function for a bus reset
4783 *
4784 * Return value:
4785 * 	none
4786 **/
4787static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4788{
4789	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4790	struct ipr_resource_entry *res;
4791
4792	ENTER;
4793	if (!ioa_cfg->sis64)
4794		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4795			if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4796				scsi_report_bus_reset(ioa_cfg->host, res->bus);
4797				break;
4798			}
4799		}
4800
4801	/*
4802	 * If abort has not completed, indicate the reset has, else call the
4803	 * abort's done function to wake the sleeping eh thread
4804	 */
4805	if (ipr_cmd->sibling->sibling)
4806		ipr_cmd->sibling->sibling = NULL;
4807	else
4808		ipr_cmd->sibling->done(ipr_cmd->sibling);
4809
4810	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4811	LEAVE;
4812}
4813
4814/**
4815 * ipr_abort_timeout - An abort task has timed out
4816 * @ipr_cmd:	ipr command struct
4817 *
4818 * This function handles when an abort task times out. If this
4819 * happens we issue a bus reset since we have resources tied
4820 * up that must be freed before returning to the midlayer.
4821 *
4822 * Return value:
4823 *	none
4824 **/
4825static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4826{
4827	struct ipr_cmnd *reset_cmd;
4828	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4829	struct ipr_cmd_pkt *cmd_pkt;
4830	unsigned long lock_flags = 0;
4831
4832	ENTER;
4833	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4834	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4835		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4836		return;
4837	}
4838
4839	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4840	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4841	ipr_cmd->sibling = reset_cmd;
4842	reset_cmd->sibling = ipr_cmd;
4843	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4844	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4845	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4846	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4847	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4848
4849	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4850	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4851	LEAVE;
4852}
4853
4854/**
4855 * ipr_cancel_op - Cancel specified op
4856 * @scsi_cmd:	scsi command struct
4857 *
4858 * This function cancels specified op.
4859 *
4860 * Return value:
4861 *	SUCCESS / FAILED
4862 **/
4863static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4864{
4865	struct ipr_cmnd *ipr_cmd;
4866	struct ipr_ioa_cfg *ioa_cfg;
4867	struct ipr_resource_entry *res;
4868	struct ipr_cmd_pkt *cmd_pkt;
4869	u32 ioasc;
4870	int op_found = 0;
4871
4872	ENTER;
4873	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4874	res = scsi_cmd->device->hostdata;
4875
4876	/* If we are currently going through reset/reload, return failed.
4877	 * This will force the mid-layer to call ipr_eh_host_reset,
4878	 * which will then go to sleep and wait for the reset to complete
4879	 */
4880	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4881		return FAILED;
4882	if (!res || !ipr_is_gscsi(res))
4883		return FAILED;
4884
4885	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4886		if (ipr_cmd->scsi_cmd == scsi_cmd) {
4887			ipr_cmd->done = ipr_scsi_eh_done;
4888			op_found = 1;
4889			break;
4890		}
4891	}
4892
4893	if (!op_found)
4894		return SUCCESS;
4895
4896	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4897	ipr_cmd->ioarcb.res_handle = res->res_handle;
4898	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4899	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4900	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4901	ipr_cmd->u.sdev = scsi_cmd->device;
4902
4903	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4904		    scsi_cmd->cmnd[0]);
4905	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4906	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4907
4908	/*
4909	 * If the abort task timed out and we sent a bus reset, we will get
4910	 * one the following responses to the abort
4911	 */
4912	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4913		ioasc = 0;
4914		ipr_trace;
4915	}
4916
4917	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4918	if (!ipr_is_naca_model(res))
4919		res->needs_sync_complete = 1;
4920
4921	LEAVE;
4922	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4923}
4924
4925/**
4926 * ipr_eh_abort - Abort a single op
4927 * @scsi_cmd:	scsi command struct
4928 *
4929 * Return value:
4930 * 	SUCCESS / FAILED
4931 **/
4932static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4933{
4934	unsigned long flags;
4935	int rc;
4936
4937	ENTER;
4938
4939	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4940	rc = ipr_cancel_op(scsi_cmd);
4941	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4942
4943	LEAVE;
4944	return rc;
4945}
4946
4947/**
4948 * ipr_handle_other_interrupt - Handle "other" interrupts
4949 * @ioa_cfg:	ioa config struct
4950 * @int_reg:	interrupt register
4951 *
4952 * Return value:
4953 * 	IRQ_NONE / IRQ_HANDLED
4954 **/
4955static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4956					      u32 int_reg)
4957{
4958	irqreturn_t rc = IRQ_HANDLED;
4959	u32 int_mask_reg;
4960
4961	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4962	int_reg &= ~int_mask_reg;
4963
4964	/* If an interrupt on the adapter did not occur, ignore it.
4965	 * Or in the case of SIS 64, check for a stage change interrupt.
4966	 */
4967	if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
4968		if (ioa_cfg->sis64) {
4969			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4970			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4971			if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4972
4973				/* clear stage change */
4974				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4975				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4976				list_del(&ioa_cfg->reset_cmd->queue);
4977				del_timer(&ioa_cfg->reset_cmd->timer);
4978				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4979				return IRQ_HANDLED;
4980			}
4981		}
4982
4983		return IRQ_NONE;
4984	}
4985
4986	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4987		/* Mask the interrupt */
4988		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4989
4990		/* Clear the interrupt */
4991		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4992		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4993
4994		list_del(&ioa_cfg->reset_cmd->queue);
4995		del_timer(&ioa_cfg->reset_cmd->timer);
4996		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4997	} else if ((int_reg & IPR_PCII_HRRQ_UPDATED) == int_reg) {
4998		if (ipr_debug && printk_ratelimit())
4999			dev_err(&ioa_cfg->pdev->dev,
5000				"Spurious interrupt detected. 0x%08X\n", int_reg);
5001		writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5002		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5003		return IRQ_NONE;
5004	} else {
5005		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
5006			ioa_cfg->ioa_unit_checked = 1;
5007		else
5008			dev_err(&ioa_cfg->pdev->dev,
5009				"Permanent IOA failure. 0x%08X\n", int_reg);
5010
5011		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5012			ioa_cfg->sdt_state = GET_DUMP;
5013
5014		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
5015		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5016	}
5017
5018	return rc;
5019}
5020
5021/**
5022 * ipr_isr_eh - Interrupt service routine error handler
5023 * @ioa_cfg:	ioa config struct
5024 * @msg:	message to log
5025 *
5026 * Return value:
5027 * 	none
5028 **/
5029static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
5030{
5031	ioa_cfg->errors_logged++;
5032	dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
5033
5034	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
5035		ioa_cfg->sdt_state = GET_DUMP;
5036
5037	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
5038}
5039
5040/**
5041 * ipr_isr - Interrupt service routine
5042 * @irq:	irq number
5043 * @devp:	pointer to ioa config struct
5044 *
5045 * Return value:
5046 * 	IRQ_NONE / IRQ_HANDLED
5047 **/
5048static irqreturn_t ipr_isr(int irq, void *devp)
5049{
5050	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
5051	unsigned long lock_flags = 0;
5052	u32 int_reg = 0;
5053	u32 ioasc;
5054	u16 cmd_index;
5055	int num_hrrq = 0;
5056	int irq_none = 0;
5057	struct ipr_cmnd *ipr_cmd;
5058	irqreturn_t rc = IRQ_NONE;
5059
5060	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
5061
5062	/* If interrupts are disabled, ignore the interrupt */
5063	if (!ioa_cfg->allow_interrupts) {
5064		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5065		return IRQ_NONE;
5066	}
5067
5068	while (1) {
5069		ipr_cmd = NULL;
5070
5071		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
5072		       ioa_cfg->toggle_bit) {
5073
5074			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
5075				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
5076
5077			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
5078				ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
5079				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5080				return IRQ_HANDLED;
5081			}
5082
5083			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
5084
5085			ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5086
5087			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
5088
5089			list_del(&ipr_cmd->queue);
5090			del_timer(&ipr_cmd->timer);
5091			ipr_cmd->done(ipr_cmd);
5092
5093			rc = IRQ_HANDLED;
5094
5095			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
5096				ioa_cfg->hrrq_curr++;
5097			} else {
5098				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
5099				ioa_cfg->toggle_bit ^= 1u;
5100			}
5101		}
5102
5103		if (ipr_cmd != NULL) {
5104			/* Clear the PCI interrupt */
5105			do {
5106				writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
5107				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5108			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
5109					num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
5110
5111			if (int_reg & IPR_PCII_HRRQ_UPDATED) {
5112				ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
5113				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5114				return IRQ_HANDLED;
5115			}
5116
5117		} else if (rc == IRQ_NONE && irq_none == 0) {
5118			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
5119			irq_none++;
5120		} else
5121			break;
5122	}
5123
5124	if (unlikely(rc == IRQ_NONE))
5125		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
5126
5127	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
5128	return rc;
5129}
5130
5131/**
5132 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
5133 * @ioa_cfg:	ioa config struct
5134 * @ipr_cmd:	ipr command struct
5135 *
5136 * Return value:
5137 * 	0 on success / -1 on failure
5138 **/
5139static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5140			     struct ipr_cmnd *ipr_cmd)
5141{
5142	int i, nseg;
5143	struct scatterlist *sg;
5144	u32 length;
5145	u32 ioadl_flags = 0;
5146	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5147	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5148	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5149
5150	length = scsi_bufflen(scsi_cmd);
5151	if (!length)
5152		return 0;
5153
5154	nseg = scsi_dma_map(scsi_cmd);
5155	if (nseg < 0) {
5156		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5157		return -1;
5158	}
5159
5160	ipr_cmd->dma_use_sg = nseg;
5161
5162	ioarcb->data_transfer_length = cpu_to_be32(length);
5163	ioarcb->ioadl_len =
5164		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5165
5166	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5167		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5168		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5169	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5170		ioadl_flags = IPR_IOADL_FLAGS_READ;
5171
5172	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5173		ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5174		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5175		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5176	}
5177
5178	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5179	return 0;
5180}
5181
5182/**
5183 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5184 * @ioa_cfg:	ioa config struct
5185 * @ipr_cmd:	ipr command struct
5186 *
5187 * Return value:
5188 * 	0 on success / -1 on failure
5189 **/
5190static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5191			   struct ipr_cmnd *ipr_cmd)
5192{
5193	int i, nseg;
5194	struct scatterlist *sg;
5195	u32 length;
5196	u32 ioadl_flags = 0;
5197	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5198	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5199	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5200
5201	length = scsi_bufflen(scsi_cmd);
5202	if (!length)
5203		return 0;
5204
5205	nseg = scsi_dma_map(scsi_cmd);
5206	if (nseg < 0) {
5207		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5208		return -1;
5209	}
5210
5211	ipr_cmd->dma_use_sg = nseg;
5212
5213	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5214		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5215		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5216		ioarcb->data_transfer_length = cpu_to_be32(length);
5217		ioarcb->ioadl_len =
5218			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5219	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5220		ioadl_flags = IPR_IOADL_FLAGS_READ;
5221		ioarcb->read_data_transfer_length = cpu_to_be32(length);
5222		ioarcb->read_ioadl_len =
5223			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5224	}
5225
5226	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5227		ioadl = ioarcb->u.add_data.u.ioadl;
5228		ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5229				    offsetof(struct ipr_ioarcb, u.add_data));
5230		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5231	}
5232
5233	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5234		ioadl[i].flags_and_data_len =
5235			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5236		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5237	}
5238
5239	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5240	return 0;
5241}
5242
5243/**
5244 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5245 * @scsi_cmd:	scsi command struct
5246 *
5247 * Return value:
5248 * 	task attributes
5249 **/
5250static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5251{
5252	u8 tag[2];
5253	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5254
5255	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5256		switch (tag[0]) {
5257		case MSG_SIMPLE_TAG:
5258			rc = IPR_FLAGS_LO_SIMPLE_TASK;
5259			break;
5260		case MSG_HEAD_TAG:
5261			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5262			break;
5263		case MSG_ORDERED_TAG:
5264			rc = IPR_FLAGS_LO_ORDERED_TASK;
5265			break;
5266		};
5267	}
5268
5269	return rc;
5270}
5271
5272/**
5273 * ipr_erp_done - Process completion of ERP for a device
5274 * @ipr_cmd:		ipr command struct
5275 *
5276 * This function copies the sense buffer into the scsi_cmd
5277 * struct and pushes the scsi_done function.
5278 *
5279 * Return value:
5280 * 	nothing
5281 **/
5282static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5283{
5284	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5285	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5286	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5287	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5288
5289	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5290		scsi_cmd->result |= (DID_ERROR << 16);
5291		scmd_printk(KERN_ERR, scsi_cmd,
5292			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5293	} else {
5294		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5295		       SCSI_SENSE_BUFFERSIZE);
5296	}
5297
5298	if (res) {
5299		if (!ipr_is_naca_model(res))
5300			res->needs_sync_complete = 1;
5301		res->in_erp = 0;
5302	}
5303	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5304	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5305	scsi_cmd->scsi_done(scsi_cmd);
5306}
5307
5308/**
5309 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5310 * @ipr_cmd:	ipr command struct
5311 *
5312 * Return value:
5313 * 	none
5314 **/
5315static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5316{
5317	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5318	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5319	dma_addr_t dma_addr = ipr_cmd->dma_addr;
5320
5321	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5322	ioarcb->data_transfer_length = 0;
5323	ioarcb->read_data_transfer_length = 0;
5324	ioarcb->ioadl_len = 0;
5325	ioarcb->read_ioadl_len = 0;
5326	ioasa->hdr.ioasc = 0;
5327	ioasa->hdr.residual_data_len = 0;
5328
5329	if (ipr_cmd->ioa_cfg->sis64)
5330		ioarcb->u.sis64_addr_data.data_ioadl_addr =
5331			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5332	else {
5333		ioarcb->write_ioadl_addr =
5334			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5335		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5336	}
5337}
5338
5339/**
5340 * ipr_erp_request_sense - Send request sense to a device
5341 * @ipr_cmd:	ipr command struct
5342 *
5343 * This function sends a request sense to a device as a result
5344 * of a check condition.
5345 *
5346 * Return value:
5347 * 	nothing
5348 **/
5349static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5350{
5351	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5352	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5353
5354	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5355		ipr_erp_done(ipr_cmd);
5356		return;
5357	}
5358
5359	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5360
5361	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5362	cmd_pkt->cdb[0] = REQUEST_SENSE;
5363	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5364	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5365	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5366	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5367
5368	ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5369		       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5370
5371	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5372		   IPR_REQUEST_SENSE_TIMEOUT * 2);
5373}
5374
5375/**
5376 * ipr_erp_cancel_all - Send cancel all to a device
5377 * @ipr_cmd:	ipr command struct
5378 *
5379 * This function sends a cancel all to a device to clear the
5380 * queue. If we are running TCQ on the device, QERR is set to 1,
5381 * which means all outstanding ops have been dropped on the floor.
5382 * Cancel all will return them to us.
5383 *
5384 * Return value:
5385 * 	nothing
5386 **/
5387static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5388{
5389	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5390	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5391	struct ipr_cmd_pkt *cmd_pkt;
5392
5393	res->in_erp = 1;
5394
5395	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5396
5397	if (!scsi_get_tag_type(scsi_cmd->device)) {
5398		ipr_erp_request_sense(ipr_cmd);
5399		return;
5400	}
5401
5402	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5403	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5404	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5405
5406	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5407		   IPR_CANCEL_ALL_TIMEOUT);
5408}
5409
5410/**
5411 * ipr_dump_ioasa - Dump contents of IOASA
5412 * @ioa_cfg:	ioa config struct
5413 * @ipr_cmd:	ipr command struct
5414 * @res:		resource entry struct
5415 *
5416 * This function is invoked by the interrupt handler when ops
5417 * fail. It will log the IOASA if appropriate. Only called
5418 * for GPDD ops.
5419 *
5420 * Return value:
5421 * 	none
5422 **/
5423static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5424			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5425{
5426	int i;
5427	u16 data_len;
5428	u32 ioasc, fd_ioasc;
5429	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5430	__be32 *ioasa_data = (__be32 *)ioasa;
5431	int error_index;
5432
5433	ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5434	fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5435
5436	if (0 == ioasc)
5437		return;
5438
5439	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5440		return;
5441
5442	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5443		error_index = ipr_get_error(fd_ioasc);
5444	else
5445		error_index = ipr_get_error(ioasc);
5446
5447	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5448		/* Don't log an error if the IOA already logged one */
5449		if (ioasa->hdr.ilid != 0)
5450			return;
5451
5452		if (!ipr_is_gscsi(res))
5453			return;
5454
5455		if (ipr_error_table[error_index].log_ioasa == 0)
5456			return;
5457	}
5458
5459	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5460
5461	data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5462	if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5463		data_len = sizeof(struct ipr_ioasa64);
5464	else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5465		data_len = sizeof(struct ipr_ioasa);
5466
5467	ipr_err("IOASA Dump:\n");
5468
5469	for (i = 0; i < data_len / 4; i += 4) {
5470		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5471			be32_to_cpu(ioasa_data[i]),
5472			be32_to_cpu(ioasa_data[i+1]),
5473			be32_to_cpu(ioasa_data[i+2]),
5474			be32_to_cpu(ioasa_data[i+3]));
5475	}
5476}
5477
5478/**
5479 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5480 * @ioasa:		IOASA
5481 * @sense_buf:	sense data buffer
5482 *
5483 * Return value:
5484 * 	none
5485 **/
5486static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5487{
5488	u32 failing_lba;
5489	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5490	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5491	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5492	u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5493
5494	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5495
5496	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5497		return;
5498
5499	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5500
5501	if (ipr_is_vset_device(res) &&
5502	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5503	    ioasa->u.vset.failing_lba_hi != 0) {
5504		sense_buf[0] = 0x72;
5505		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5506		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5507		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5508
5509		sense_buf[7] = 12;
5510		sense_buf[8] = 0;
5511		sense_buf[9] = 0x0A;
5512		sense_buf[10] = 0x80;
5513
5514		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5515
5516		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5517		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5518		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5519		sense_buf[15] = failing_lba & 0x000000ff;
5520
5521		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5522
5523		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5524		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5525		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5526		sense_buf[19] = failing_lba & 0x000000ff;
5527	} else {
5528		sense_buf[0] = 0x70;
5529		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5530		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5531		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5532
5533		/* Illegal request */
5534		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5535		    (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5536			sense_buf[7] = 10;	/* additional length */
5537
5538			/* IOARCB was in error */
5539			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5540				sense_buf[15] = 0xC0;
5541			else	/* Parameter data was invalid */
5542				sense_buf[15] = 0x80;
5543
5544			sense_buf[16] =
5545			    ((IPR_FIELD_POINTER_MASK &
5546			      be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5547			sense_buf[17] =
5548			    (IPR_FIELD_POINTER_MASK &
5549			     be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5550		} else {
5551			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5552				if (ipr_is_vset_device(res))
5553					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5554				else
5555					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5556
5557				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
5558				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5559				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5560				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5561				sense_buf[6] = failing_lba & 0x000000ff;
5562			}
5563
5564			sense_buf[7] = 6;	/* additional length */
5565		}
5566	}
5567}
5568
5569/**
5570 * ipr_get_autosense - Copy autosense data to sense buffer
5571 * @ipr_cmd:	ipr command struct
5572 *
5573 * This function copies the autosense buffer to the buffer
5574 * in the scsi_cmd, if there is autosense available.
5575 *
5576 * Return value:
5577 *	1 if autosense was available / 0 if not
5578 **/
5579static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5580{
5581	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5582	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5583
5584	if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5585		return 0;
5586
5587	if (ipr_cmd->ioa_cfg->sis64)
5588		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5589		       min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5590			   SCSI_SENSE_BUFFERSIZE));
5591	else
5592		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5593		       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5594			   SCSI_SENSE_BUFFERSIZE));
5595	return 1;
5596}
5597
5598/**
5599 * ipr_erp_start - Process an error response for a SCSI op
5600 * @ioa_cfg:	ioa config struct
5601 * @ipr_cmd:	ipr command struct
5602 *
5603 * This function determines whether or not to initiate ERP
5604 * on the affected device.
5605 *
5606 * Return value:
5607 * 	nothing
5608 **/
5609static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5610			      struct ipr_cmnd *ipr_cmd)
5611{
5612	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5613	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5614	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5615	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5616
5617	if (!res) {
5618		ipr_scsi_eh_done(ipr_cmd);
5619		return;
5620	}
5621
5622	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
5623		ipr_gen_sense(ipr_cmd);
5624
5625	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5626
5627	switch (masked_ioasc) {
5628	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
5629		if (ipr_is_naca_model(res))
5630			scsi_cmd->result |= (DID_ABORT << 16);
5631		else
5632			scsi_cmd->result |= (DID_IMM_RETRY << 16);
5633		break;
5634	case IPR_IOASC_IR_RESOURCE_HANDLE:
5635	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
5636		scsi_cmd->result |= (DID_NO_CONNECT << 16);
5637		break;
5638	case IPR_IOASC_HW_SEL_TIMEOUT:
5639		scsi_cmd->result |= (DID_NO_CONNECT << 16);
5640		if (!ipr_is_naca_model(res))
5641			res->needs_sync_complete = 1;
5642		break;
5643	case IPR_IOASC_SYNC_REQUIRED:
5644		if (!res->in_erp)
5645			res->needs_sync_complete = 1;
5646		scsi_cmd->result |= (DID_IMM_RETRY << 16);
5647		break;
5648	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
5649	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
5650		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5651		break;
5652	case IPR_IOASC_BUS_WAS_RESET:
5653	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5654		/*
5655		 * Report the bus reset and ask for a retry. The device
5656		 * will give CC/UA the next command.
5657		 */
5658		if (!res->resetting_device)
5659			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5660		scsi_cmd->result |= (DID_ERROR << 16);
5661		if (!ipr_is_naca_model(res))
5662			res->needs_sync_complete = 1;
5663		break;
5664	case IPR_IOASC_HW_DEV_BUS_STATUS:
5665		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5666		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
5667			if (!ipr_get_autosense(ipr_cmd)) {
5668				if (!ipr_is_naca_model(res)) {
5669					ipr_erp_cancel_all(ipr_cmd);
5670					return;
5671				}
5672			}
5673		}
5674		if (!ipr_is_naca_model(res))
5675			res->needs_sync_complete = 1;
5676		break;
5677	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5678		break;
5679	default:
5680		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5681			scsi_cmd->result |= (DID_ERROR << 16);
5682		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
5683			res->needs_sync_complete = 1;
5684		break;
5685	}
5686
5687	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5688	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5689	scsi_cmd->scsi_done(scsi_cmd);
5690}
5691
5692/**
5693 * ipr_scsi_done - mid-layer done function
5694 * @ipr_cmd:	ipr command struct
5695 *
5696 * This function is invoked by the interrupt handler for
5697 * ops generated by the SCSI mid-layer
5698 *
5699 * Return value:
5700 * 	none
5701 **/
5702static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5703{
5704	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5705	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5706	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5707
5708	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5709
5710	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5711		scsi_dma_unmap(ipr_cmd->scsi_cmd);
5712		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5713		scsi_cmd->scsi_done(scsi_cmd);
5714	} else
5715		ipr_erp_start(ioa_cfg, ipr_cmd);
5716}
5717
5718/**
5719 * ipr_queuecommand - Queue a mid-layer request
5720 * @scsi_cmd:	scsi command struct
5721 * @done:		done function
5722 *
5723 * This function queues a request generated by the mid-layer.
5724 *
5725 * Return value:
5726 *	0 on success
5727 *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5728 *	SCSI_MLQUEUE_HOST_BUSY if host is busy
5729 **/
5730static int ipr_queuecommand_lck(struct scsi_cmnd *scsi_cmd,
5731			    void (*done) (struct scsi_cmnd *))
5732{
5733	struct ipr_ioa_cfg *ioa_cfg;
5734	struct ipr_resource_entry *res;
5735	struct ipr_ioarcb *ioarcb;
5736	struct ipr_cmnd *ipr_cmd;
5737	int rc = 0;
5738
5739	scsi_cmd->scsi_done = done;
5740	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5741	res = scsi_cmd->device->hostdata;
5742	scsi_cmd->result = (DID_OK << 16);
5743
5744	/*
5745	 * We are currently blocking all devices due to a host reset
5746	 * We have told the host to stop giving us new requests, but
5747	 * ERP ops don't count. FIXME
5748	 */
5749	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5750		return SCSI_MLQUEUE_HOST_BUSY;
5751
5752	/*
5753	 * FIXME - Create scsi_set_host_offline interface
5754	 *  and the ioa_is_dead check can be removed
5755	 */
5756	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5757		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5758		scsi_cmd->result = (DID_NO_CONNECT << 16);
5759		scsi_cmd->scsi_done(scsi_cmd);
5760		return 0;
5761	}
5762
5763	if (ipr_is_gata(res) && res->sata_port)
5764		return ata_sas_queuecmd(scsi_cmd, res->sata_port->ap);
5765
5766	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5767	ioarcb = &ipr_cmd->ioarcb;
5768	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5769
5770	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5771	ipr_cmd->scsi_cmd = scsi_cmd;
5772	ioarcb->res_handle = res->res_handle;
5773	ipr_cmd->done = ipr_scsi_done;
5774	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5775
5776	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5777		if (scsi_cmd->underflow == 0)
5778			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5779
5780		if (res->needs_sync_complete) {
5781			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5782			res->needs_sync_complete = 0;
5783		}
5784
5785		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5786		if (ipr_is_gscsi(res))
5787			ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5788		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5789		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5790	}
5791
5792	if (scsi_cmd->cmnd[0] >= 0xC0 &&
5793	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5794		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5795
5796	if (likely(rc == 0)) {
5797		if (ioa_cfg->sis64)
5798			rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5799		else
5800			rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5801	}
5802
5803	if (likely(rc == 0)) {
5804		mb();
5805		ipr_send_command(ipr_cmd);
5806	} else {
5807		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5808		 return SCSI_MLQUEUE_HOST_BUSY;
5809	}
5810
5811	return 0;
5812}
5813
5814static DEF_SCSI_QCMD(ipr_queuecommand)
5815
5816/**
5817 * ipr_ioctl - IOCTL handler
5818 * @sdev:	scsi device struct
5819 * @cmd:	IOCTL cmd
5820 * @arg:	IOCTL arg
5821 *
5822 * Return value:
5823 * 	0 on success / other on failure
5824 **/
5825static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5826{
5827	struct ipr_resource_entry *res;
5828
5829	res = (struct ipr_resource_entry *)sdev->hostdata;
5830	if (res && ipr_is_gata(res)) {
5831		if (cmd == HDIO_GET_IDENTITY)
5832			return -ENOTTY;
5833		return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
5834	}
5835
5836	return -EINVAL;
5837}
5838
5839/**
5840 * ipr_info - Get information about the card/driver
5841 * @scsi_host:	scsi host struct
5842 *
5843 * Return value:
5844 * 	pointer to buffer with description string
5845 **/
5846static const char * ipr_ioa_info(struct Scsi_Host *host)
5847{
5848	static char buffer[512];
5849	struct ipr_ioa_cfg *ioa_cfg;
5850	unsigned long lock_flags = 0;
5851
5852	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5853
5854	spin_lock_irqsave(host->host_lock, lock_flags);
5855	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5856	spin_unlock_irqrestore(host->host_lock, lock_flags);
5857
5858	return buffer;
5859}
5860
5861static struct scsi_host_template driver_template = {
5862	.module = THIS_MODULE,
5863	.name = "IPR",
5864	.info = ipr_ioa_info,
5865	.ioctl = ipr_ioctl,
5866	.queuecommand = ipr_queuecommand,
5867	.eh_abort_handler = ipr_eh_abort,
5868	.eh_device_reset_handler = ipr_eh_dev_reset,
5869	.eh_host_reset_handler = ipr_eh_host_reset,
5870	.slave_alloc = ipr_slave_alloc,
5871	.slave_configure = ipr_slave_configure,
5872	.slave_destroy = ipr_slave_destroy,
5873	.target_alloc = ipr_target_alloc,
5874	.target_destroy = ipr_target_destroy,
5875	.change_queue_depth = ipr_change_queue_depth,
5876	.change_queue_type = ipr_change_queue_type,
5877	.bios_param = ipr_biosparam,
5878	.can_queue = IPR_MAX_COMMANDS,
5879	.this_id = -1,
5880	.sg_tablesize = IPR_MAX_SGLIST,
5881	.max_sectors = IPR_IOA_MAX_SECTORS,
5882	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5883	.use_clustering = ENABLE_CLUSTERING,
5884	.shost_attrs = ipr_ioa_attrs,
5885	.sdev_attrs = ipr_dev_attrs,
5886	.proc_name = IPR_NAME
5887};
5888
5889/**
5890 * ipr_ata_phy_reset - libata phy_reset handler
5891 * @ap:		ata port to reset
5892 *
5893 **/
5894static void ipr_ata_phy_reset(struct ata_port *ap)
5895{
5896	unsigned long flags;
5897	struct ipr_sata_port *sata_port = ap->private_data;
5898	struct ipr_resource_entry *res = sata_port->res;
5899	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5900	int rc;
5901
5902	ENTER;
5903	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5904	while(ioa_cfg->in_reset_reload) {
5905		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5906		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5907		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5908	}
5909
5910	if (!ioa_cfg->allow_cmds)
5911		goto out_unlock;
5912
5913	rc = ipr_device_reset(ioa_cfg, res);
5914
5915	if (rc) {
5916		ap->link.device[0].class = ATA_DEV_NONE;
5917		goto out_unlock;
5918	}
5919
5920	ap->link.device[0].class = res->ata_class;
5921	if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5922		ap->link.device[0].class = ATA_DEV_NONE;
5923
5924out_unlock:
5925	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5926	LEAVE;
5927}
5928
5929/**
5930 * ipr_ata_post_internal - Cleanup after an internal command
5931 * @qc:	ATA queued command
5932 *
5933 * Return value:
5934 * 	none
5935 **/
5936static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5937{
5938	struct ipr_sata_port *sata_port = qc->ap->private_data;
5939	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5940	struct ipr_cmnd *ipr_cmd;
5941	unsigned long flags;
5942
5943	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5944	while(ioa_cfg->in_reset_reload) {
5945		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5946		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5947		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5948	}
5949
5950	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5951		if (ipr_cmd->qc == qc) {
5952			ipr_device_reset(ioa_cfg, sata_port->res);
5953			break;
5954		}
5955	}
5956	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5957}
5958
5959/**
5960 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5961 * @regs:	destination
5962 * @tf:	source ATA taskfile
5963 *
5964 * Return value:
5965 * 	none
5966 **/
5967static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5968			     struct ata_taskfile *tf)
5969{
5970	regs->feature = tf->feature;
5971	regs->nsect = tf->nsect;
5972	regs->lbal = tf->lbal;
5973	regs->lbam = tf->lbam;
5974	regs->lbah = tf->lbah;
5975	regs->device = tf->device;
5976	regs->command = tf->command;
5977	regs->hob_feature = tf->hob_feature;
5978	regs->hob_nsect = tf->hob_nsect;
5979	regs->hob_lbal = tf->hob_lbal;
5980	regs->hob_lbam = tf->hob_lbam;
5981	regs->hob_lbah = tf->hob_lbah;
5982	regs->ctl = tf->ctl;
5983}
5984
5985/**
5986 * ipr_sata_done - done function for SATA commands
5987 * @ipr_cmd:	ipr command struct
5988 *
5989 * This function is invoked by the interrupt handler for
5990 * ops generated by the SCSI mid-layer to SATA devices
5991 *
5992 * Return value:
5993 * 	none
5994 **/
5995static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5996{
5997	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5998	struct ata_queued_cmd *qc = ipr_cmd->qc;
5999	struct ipr_sata_port *sata_port = qc->ap->private_data;
6000	struct ipr_resource_entry *res = sata_port->res;
6001	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6002
6003	if (ipr_cmd->ioa_cfg->sis64)
6004		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
6005		       sizeof(struct ipr_ioasa_gata));
6006	else
6007		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
6008		       sizeof(struct ipr_ioasa_gata));
6009	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
6010
6011	if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
6012		scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
6013
6014	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
6015		qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
6016	else
6017		qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
6018	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6019	ata_qc_complete(qc);
6020}
6021
6022/**
6023 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
6024 * @ipr_cmd:	ipr command struct
6025 * @qc:		ATA queued command
6026 *
6027 **/
6028static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
6029				  struct ata_queued_cmd *qc)
6030{
6031	u32 ioadl_flags = 0;
6032	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6033	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
6034	struct ipr_ioadl64_desc *last_ioadl64 = NULL;
6035	int len = qc->nbytes;
6036	struct scatterlist *sg;
6037	unsigned int si;
6038	dma_addr_t dma_addr = ipr_cmd->dma_addr;
6039
6040	if (len == 0)
6041		return;
6042
6043	if (qc->dma_dir == DMA_TO_DEVICE) {
6044		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6045		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6046	} else if (qc->dma_dir == DMA_FROM_DEVICE)
6047		ioadl_flags = IPR_IOADL_FLAGS_READ;
6048
6049	ioarcb->data_transfer_length = cpu_to_be32(len);
6050	ioarcb->ioadl_len =
6051		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
6052	ioarcb->u.sis64_addr_data.data_ioadl_addr =
6053		cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
6054
6055	for_each_sg(qc->sg, sg, qc->n_elem, si) {
6056		ioadl64->flags = cpu_to_be32(ioadl_flags);
6057		ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
6058		ioadl64->address = cpu_to_be64(sg_dma_address(sg));
6059
6060		last_ioadl64 = ioadl64;
6061		ioadl64++;
6062	}
6063
6064	if (likely(last_ioadl64))
6065		last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6066}
6067
6068/**
6069 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
6070 * @ipr_cmd:	ipr command struct
6071 * @qc:		ATA queued command
6072 *
6073 **/
6074static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
6075				struct ata_queued_cmd *qc)
6076{
6077	u32 ioadl_flags = 0;
6078	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6079	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
6080	struct ipr_ioadl_desc *last_ioadl = NULL;
6081	int len = qc->nbytes;
6082	struct scatterlist *sg;
6083	unsigned int si;
6084
6085	if (len == 0)
6086		return;
6087
6088	if (qc->dma_dir == DMA_TO_DEVICE) {
6089		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
6090		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6091		ioarcb->data_transfer_length = cpu_to_be32(len);
6092		ioarcb->ioadl_len =
6093			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6094	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
6095		ioadl_flags = IPR_IOADL_FLAGS_READ;
6096		ioarcb->read_data_transfer_length = cpu_to_be32(len);
6097		ioarcb->read_ioadl_len =
6098			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
6099	}
6100
6101	for_each_sg(qc->sg, sg, qc->n_elem, si) {
6102		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
6103		ioadl->address = cpu_to_be32(sg_dma_address(sg));
6104
6105		last_ioadl = ioadl;
6106		ioadl++;
6107	}
6108
6109	if (likely(last_ioadl))
6110		last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
6111}
6112
6113/**
6114 * ipr_qc_issue - Issue a SATA qc to a device
6115 * @qc:	queued command
6116 *
6117 * Return value:
6118 * 	0 if success
6119 **/
6120static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
6121{
6122	struct ata_port *ap = qc->ap;
6123	struct ipr_sata_port *sata_port = ap->private_data;
6124	struct ipr_resource_entry *res = sata_port->res;
6125	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
6126	struct ipr_cmnd *ipr_cmd;
6127	struct ipr_ioarcb *ioarcb;
6128	struct ipr_ioarcb_ata_regs *regs;
6129
6130	if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
6131		return AC_ERR_SYSTEM;
6132
6133	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
6134	ioarcb = &ipr_cmd->ioarcb;
6135
6136	if (ioa_cfg->sis64) {
6137		regs = &ipr_cmd->i.ata_ioadl.regs;
6138		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
6139	} else
6140		regs = &ioarcb->u.add_data.u.regs;
6141
6142	memset(regs, 0, sizeof(*regs));
6143	ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6144
6145	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6146	ipr_cmd->qc = qc;
6147	ipr_cmd->done = ipr_sata_done;
6148	ipr_cmd->ioarcb.res_handle = res->res_handle;
6149	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6150	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6151	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6152	ipr_cmd->dma_use_sg = qc->n_elem;
6153
6154	if (ioa_cfg->sis64)
6155		ipr_build_ata_ioadl64(ipr_cmd, qc);
6156	else
6157		ipr_build_ata_ioadl(ipr_cmd, qc);
6158
6159	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6160	ipr_copy_sata_tf(regs, &qc->tf);
6161	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6162	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6163
6164	switch (qc->tf.protocol) {
6165	case ATA_PROT_NODATA:
6166	case ATA_PROT_PIO:
6167		break;
6168
6169	case ATA_PROT_DMA:
6170		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6171		break;
6172
6173	case ATAPI_PROT_PIO:
6174	case ATAPI_PROT_NODATA:
6175		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6176		break;
6177
6178	case ATAPI_PROT_DMA:
6179		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6180		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6181		break;
6182
6183	default:
6184		WARN_ON(1);
6185		return AC_ERR_INVALID;
6186	}
6187
6188	mb();
6189
6190	ipr_send_command(ipr_cmd);
6191
6192	return 0;
6193}
6194
6195/**
6196 * ipr_qc_fill_rtf - Read result TF
6197 * @qc: ATA queued command
6198 *
6199 * Return value:
6200 * 	true
6201 **/
6202static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6203{
6204	struct ipr_sata_port *sata_port = qc->ap->private_data;
6205	struct ipr_ioasa_gata *g = &sata_port->ioasa;
6206	struct ata_taskfile *tf = &qc->result_tf;
6207
6208	tf->feature = g->error;
6209	tf->nsect = g->nsect;
6210	tf->lbal = g->lbal;
6211	tf->lbam = g->lbam;
6212	tf->lbah = g->lbah;
6213	tf->device = g->device;
6214	tf->command = g->status;
6215	tf->hob_nsect = g->hob_nsect;
6216	tf->hob_lbal = g->hob_lbal;
6217	tf->hob_lbam = g->hob_lbam;
6218	tf->hob_lbah = g->hob_lbah;
6219	tf->ctl = g->alt_status;
6220
6221	return true;
6222}
6223
6224static struct ata_port_operations ipr_sata_ops = {
6225	.phy_reset = ipr_ata_phy_reset,
6226	.hardreset = ipr_sata_reset,
6227	.post_internal_cmd = ipr_ata_post_internal,
6228	.qc_prep = ata_noop_qc_prep,
6229	.qc_issue = ipr_qc_issue,
6230	.qc_fill_rtf = ipr_qc_fill_rtf,
6231	.port_start = ata_sas_port_start,
6232	.port_stop = ata_sas_port_stop
6233};
6234
6235static struct ata_port_info sata_port_info = {
6236	.flags		= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA,
6237	.pio_mask	= ATA_PIO4_ONLY,
6238	.mwdma_mask	= ATA_MWDMA2,
6239	.udma_mask	= ATA_UDMA6,
6240	.port_ops	= &ipr_sata_ops
6241};
6242
6243#ifdef CONFIG_PPC_PSERIES
6244static const u16 ipr_blocked_processors[] = {
6245	PV_NORTHSTAR,
6246	PV_PULSAR,
6247	PV_POWER4,
6248	PV_ICESTAR,
6249	PV_SSTAR,
6250	PV_POWER4p,
6251	PV_630,
6252	PV_630p
6253};
6254
6255/**
6256 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6257 * @ioa_cfg:	ioa cfg struct
6258 *
6259 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6260 * certain pSeries hardware. This function determines if the given
6261 * adapter is in one of these confgurations or not.
6262 *
6263 * Return value:
6264 * 	1 if adapter is not supported / 0 if adapter is supported
6265 **/
6266static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6267{
6268	int i;
6269
6270	if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6271		for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6272			if (__is_processor(ipr_blocked_processors[i]))
6273				return 1;
6274		}
6275	}
6276	return 0;
6277}
6278#else
6279#define ipr_invalid_adapter(ioa_cfg) 0
6280#endif
6281
6282/**
6283 * ipr_ioa_bringdown_done - IOA bring down completion.
6284 * @ipr_cmd:	ipr command struct
6285 *
6286 * This function processes the completion of an adapter bring down.
6287 * It wakes any reset sleepers.
6288 *
6289 * Return value:
6290 * 	IPR_RC_JOB_RETURN
6291 **/
6292static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6293{
6294	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6295
6296	ENTER;
6297	ioa_cfg->in_reset_reload = 0;
6298	ioa_cfg->reset_retries = 0;
6299	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6300	wake_up_all(&ioa_cfg->reset_wait_q);
6301
6302	spin_unlock_irq(ioa_cfg->host->host_lock);
6303	scsi_unblock_requests(ioa_cfg->host);
6304	spin_lock_irq(ioa_cfg->host->host_lock);
6305	LEAVE;
6306
6307	return IPR_RC_JOB_RETURN;
6308}
6309
6310/**
6311 * ipr_ioa_reset_done - IOA reset completion.
6312 * @ipr_cmd:	ipr command struct
6313 *
6314 * This function processes the completion of an adapter reset.
6315 * It schedules any necessary mid-layer add/removes and
6316 * wakes any reset sleepers.
6317 *
6318 * Return value:
6319 * 	IPR_RC_JOB_RETURN
6320 **/
6321static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6322{
6323	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6324	struct ipr_resource_entry *res;
6325	struct ipr_hostrcb *hostrcb, *temp;
6326	int i = 0;
6327
6328	ENTER;
6329	ioa_cfg->in_reset_reload = 0;
6330	ioa_cfg->allow_cmds = 1;
6331	ioa_cfg->reset_cmd = NULL;
6332	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6333
6334	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6335		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6336			ipr_trace;
6337			break;
6338		}
6339	}
6340	schedule_work(&ioa_cfg->work_q);
6341
6342	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6343		list_del(&hostrcb->queue);
6344		if (i++ < IPR_NUM_LOG_HCAMS)
6345			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6346		else
6347			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6348	}
6349
6350	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6351	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6352
6353	ioa_cfg->reset_retries = 0;
6354	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6355	wake_up_all(&ioa_cfg->reset_wait_q);
6356
6357	spin_unlock(ioa_cfg->host->host_lock);
6358	scsi_unblock_requests(ioa_cfg->host);
6359	spin_lock(ioa_cfg->host->host_lock);
6360
6361	if (!ioa_cfg->allow_cmds)
6362		scsi_block_requests(ioa_cfg->host);
6363
6364	LEAVE;
6365	return IPR_RC_JOB_RETURN;
6366}
6367
6368/**
6369 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6370 * @supported_dev:	supported device struct
6371 * @vpids:			vendor product id struct
6372 *
6373 * Return value:
6374 * 	none
6375 **/
6376static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6377				 struct ipr_std_inq_vpids *vpids)
6378{
6379	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6380	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6381	supported_dev->num_records = 1;
6382	supported_dev->data_length =
6383		cpu_to_be16(sizeof(struct ipr_supported_device));
6384	supported_dev->reserved = 0;
6385}
6386
6387/**
6388 * ipr_set_supported_devs - Send Set Supported Devices for a device
6389 * @ipr_cmd:	ipr command struct
6390 *
6391 * This function sends a Set Supported Devices to the adapter
6392 *
6393 * Return value:
6394 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6395 **/
6396static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6397{
6398	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6399	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6400	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6401	struct ipr_resource_entry *res = ipr_cmd->u.res;
6402
6403	ipr_cmd->job_step = ipr_ioa_reset_done;
6404
6405	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6406		if (!ipr_is_scsi_disk(res))
6407			continue;
6408
6409		ipr_cmd->u.res = res;
6410		ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6411
6412		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6413		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6414		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6415
6416		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6417		ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6418		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6419		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6420
6421		ipr_init_ioadl(ipr_cmd,
6422			       ioa_cfg->vpd_cbs_dma +
6423				 offsetof(struct ipr_misc_cbs, supp_dev),
6424			       sizeof(struct ipr_supported_device),
6425			       IPR_IOADL_FLAGS_WRITE_LAST);
6426
6427		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6428			   IPR_SET_SUP_DEVICE_TIMEOUT);
6429
6430		if (!ioa_cfg->sis64)
6431			ipr_cmd->job_step = ipr_set_supported_devs;
6432		return IPR_RC_JOB_RETURN;
6433	}
6434
6435	return IPR_RC_JOB_CONTINUE;
6436}
6437
6438/**
6439 * ipr_get_mode_page - Locate specified mode page
6440 * @mode_pages:	mode page buffer
6441 * @page_code:	page code to find
6442 * @len:		minimum required length for mode page
6443 *
6444 * Return value:
6445 * 	pointer to mode page / NULL on failure
6446 **/
6447static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6448			       u32 page_code, u32 len)
6449{
6450	struct ipr_mode_page_hdr *mode_hdr;
6451	u32 page_length;
6452	u32 length;
6453
6454	if (!mode_pages || (mode_pages->hdr.length == 0))
6455		return NULL;
6456
6457	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6458	mode_hdr = (struct ipr_mode_page_hdr *)
6459		(mode_pages->data + mode_pages->hdr.block_desc_len);
6460
6461	while (length) {
6462		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6463			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6464				return mode_hdr;
6465			break;
6466		} else {
6467			page_length = (sizeof(struct ipr_mode_page_hdr) +
6468				       mode_hdr->page_length);
6469			length -= page_length;
6470			mode_hdr = (struct ipr_mode_page_hdr *)
6471				((unsigned long)mode_hdr + page_length);
6472		}
6473	}
6474	return NULL;
6475}
6476
6477/**
6478 * ipr_check_term_power - Check for term power errors
6479 * @ioa_cfg:	ioa config struct
6480 * @mode_pages:	IOAFP mode pages buffer
6481 *
6482 * Check the IOAFP's mode page 28 for term power errors
6483 *
6484 * Return value:
6485 * 	nothing
6486 **/
6487static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6488				 struct ipr_mode_pages *mode_pages)
6489{
6490	int i;
6491	int entry_length;
6492	struct ipr_dev_bus_entry *bus;
6493	struct ipr_mode_page28 *mode_page;
6494
6495	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6496				      sizeof(struct ipr_mode_page28));
6497
6498	entry_length = mode_page->entry_length;
6499
6500	bus = mode_page->bus;
6501
6502	for (i = 0; i < mode_page->num_entries; i++) {
6503		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6504			dev_err(&ioa_cfg->pdev->dev,
6505				"Term power is absent on scsi bus %d\n",
6506				bus->res_addr.bus);
6507		}
6508
6509		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6510	}
6511}
6512
6513/**
6514 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6515 * @ioa_cfg:	ioa config struct
6516 *
6517 * Looks through the config table checking for SES devices. If
6518 * the SES device is in the SES table indicating a maximum SCSI
6519 * bus speed, the speed is limited for the bus.
6520 *
6521 * Return value:
6522 * 	none
6523 **/
6524static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6525{
6526	u32 max_xfer_rate;
6527	int i;
6528
6529	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6530		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6531						       ioa_cfg->bus_attr[i].bus_width);
6532
6533		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6534			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6535	}
6536}
6537
6538/**
6539 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6540 * @ioa_cfg:	ioa config struct
6541 * @mode_pages:	mode page 28 buffer
6542 *
6543 * Updates mode page 28 based on driver configuration
6544 *
6545 * Return value:
6546 * 	none
6547 **/
6548static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6549					  	struct ipr_mode_pages *mode_pages)
6550{
6551	int i, entry_length;
6552	struct ipr_dev_bus_entry *bus;
6553	struct ipr_bus_attributes *bus_attr;
6554	struct ipr_mode_page28 *mode_page;
6555
6556	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6557				      sizeof(struct ipr_mode_page28));
6558
6559	entry_length = mode_page->entry_length;
6560
6561	/* Loop for each device bus entry */
6562	for (i = 0, bus = mode_page->bus;
6563	     i < mode_page->num_entries;
6564	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6565		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6566			dev_err(&ioa_cfg->pdev->dev,
6567				"Invalid resource address reported: 0x%08X\n",
6568				IPR_GET_PHYS_LOC(bus->res_addr));
6569			continue;
6570		}
6571
6572		bus_attr = &ioa_cfg->bus_attr[i];
6573		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6574		bus->bus_width = bus_attr->bus_width;
6575		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6576		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6577		if (bus_attr->qas_enabled)
6578			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6579		else
6580			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6581	}
6582}
6583
6584/**
6585 * ipr_build_mode_select - Build a mode select command
6586 * @ipr_cmd:	ipr command struct
6587 * @res_handle:	resource handle to send command to
6588 * @parm:		Byte 2 of Mode Sense command
6589 * @dma_addr:	DMA buffer address
6590 * @xfer_len:	data transfer length
6591 *
6592 * Return value:
6593 * 	none
6594 **/
6595static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
6596				  __be32 res_handle, u8 parm,
6597				  dma_addr_t dma_addr, u8 xfer_len)
6598{
6599	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6600
6601	ioarcb->res_handle = res_handle;
6602	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6603	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6604	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6605	ioarcb->cmd_pkt.cdb[1] = parm;
6606	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6607
6608	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
6609}
6610
6611/**
6612 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6613 * @ipr_cmd:	ipr command struct
6614 *
6615 * This function sets up the SCSI bus attributes and sends
6616 * a Mode Select for Page 28 to activate them.
6617 *
6618 * Return value:
6619 * 	IPR_RC_JOB_RETURN
6620 **/
6621static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6622{
6623	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6624	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6625	int length;
6626
6627	ENTER;
6628	ipr_scsi_bus_speed_limit(ioa_cfg);
6629	ipr_check_term_power(ioa_cfg, mode_pages);
6630	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6631	length = mode_pages->hdr.length + 1;
6632	mode_pages->hdr.length = 0;
6633
6634	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6635			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6636			      length);
6637
6638	ipr_cmd->job_step = ipr_set_supported_devs;
6639	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6640				    struct ipr_resource_entry, queue);
6641	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6642
6643	LEAVE;
6644	return IPR_RC_JOB_RETURN;
6645}
6646
6647/**
6648 * ipr_build_mode_sense - Builds a mode sense command
6649 * @ipr_cmd:	ipr command struct
6650 * @res:		resource entry struct
6651 * @parm:		Byte 2 of mode sense command
6652 * @dma_addr:	DMA address of mode sense buffer
6653 * @xfer_len:	Size of DMA buffer
6654 *
6655 * Return value:
6656 * 	none
6657 **/
6658static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6659				 __be32 res_handle,
6660				 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
6661{
6662	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6663
6664	ioarcb->res_handle = res_handle;
6665	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6666	ioarcb->cmd_pkt.cdb[2] = parm;
6667	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6668	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6669
6670	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6671}
6672
6673/**
6674 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6675 * @ipr_cmd:	ipr command struct
6676 *
6677 * This function handles the failure of an IOA bringup command.
6678 *
6679 * Return value:
6680 * 	IPR_RC_JOB_RETURN
6681 **/
6682static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6683{
6684	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6685	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6686
6687	dev_err(&ioa_cfg->pdev->dev,
6688		"0x%02X failed with IOASC: 0x%08X\n",
6689		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6690
6691	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6692	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6693	return IPR_RC_JOB_RETURN;
6694}
6695
6696/**
6697 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6698 * @ipr_cmd:	ipr command struct
6699 *
6700 * This function handles the failure of a Mode Sense to the IOAFP.
6701 * Some adapters do not handle all mode pages.
6702 *
6703 * Return value:
6704 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6705 **/
6706static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6707{
6708	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6709	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6710
6711	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6712		ipr_cmd->job_step = ipr_set_supported_devs;
6713		ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6714					    struct ipr_resource_entry, queue);
6715		return IPR_RC_JOB_CONTINUE;
6716	}
6717
6718	return ipr_reset_cmd_failed(ipr_cmd);
6719}
6720
6721/**
6722 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6723 * @ipr_cmd:	ipr command struct
6724 *
6725 * This function send a Page 28 mode sense to the IOA to
6726 * retrieve SCSI bus attributes.
6727 *
6728 * Return value:
6729 * 	IPR_RC_JOB_RETURN
6730 **/
6731static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6732{
6733	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6734
6735	ENTER;
6736	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6737			     0x28, ioa_cfg->vpd_cbs_dma +
6738			     offsetof(struct ipr_misc_cbs, mode_pages),
6739			     sizeof(struct ipr_mode_pages));
6740
6741	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6742	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6743
6744	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6745
6746	LEAVE;
6747	return IPR_RC_JOB_RETURN;
6748}
6749
6750/**
6751 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6752 * @ipr_cmd:	ipr command struct
6753 *
6754 * This function enables dual IOA RAID support if possible.
6755 *
6756 * Return value:
6757 * 	IPR_RC_JOB_RETURN
6758 **/
6759static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6760{
6761	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6762	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6763	struct ipr_mode_page24 *mode_page;
6764	int length;
6765
6766	ENTER;
6767	mode_page = ipr_get_mode_page(mode_pages, 0x24,
6768				      sizeof(struct ipr_mode_page24));
6769
6770	if (mode_page)
6771		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6772
6773	length = mode_pages->hdr.length + 1;
6774	mode_pages->hdr.length = 0;
6775
6776	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6777			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6778			      length);
6779
6780	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6781	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6782
6783	LEAVE;
6784	return IPR_RC_JOB_RETURN;
6785}
6786
6787/**
6788 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6789 * @ipr_cmd:	ipr command struct
6790 *
6791 * This function handles the failure of a Mode Sense to the IOAFP.
6792 * Some adapters do not handle all mode pages.
6793 *
6794 * Return value:
6795 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6796 **/
6797static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6798{
6799	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6800
6801	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6802		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6803		return IPR_RC_JOB_CONTINUE;
6804	}
6805
6806	return ipr_reset_cmd_failed(ipr_cmd);
6807}
6808
6809/**
6810 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6811 * @ipr_cmd:	ipr command struct
6812 *
6813 * This function send a mode sense to the IOA to retrieve
6814 * the IOA Advanced Function Control mode page.
6815 *
6816 * Return value:
6817 * 	IPR_RC_JOB_RETURN
6818 **/
6819static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6820{
6821	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6822
6823	ENTER;
6824	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6825			     0x24, ioa_cfg->vpd_cbs_dma +
6826			     offsetof(struct ipr_misc_cbs, mode_pages),
6827			     sizeof(struct ipr_mode_pages));
6828
6829	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6830	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6831
6832	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6833
6834	LEAVE;
6835	return IPR_RC_JOB_RETURN;
6836}
6837
6838/**
6839 * ipr_init_res_table - Initialize the resource table
6840 * @ipr_cmd:	ipr command struct
6841 *
6842 * This function looks through the existing resource table, comparing
6843 * it with the config table. This function will take care of old/new
6844 * devices and schedule adding/removing them from the mid-layer
6845 * as appropriate.
6846 *
6847 * Return value:
6848 * 	IPR_RC_JOB_CONTINUE
6849 **/
6850static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6851{
6852	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6853	struct ipr_resource_entry *res, *temp;
6854	struct ipr_config_table_entry_wrapper cfgtew;
6855	int entries, found, flag, i;
6856	LIST_HEAD(old_res);
6857
6858	ENTER;
6859	if (ioa_cfg->sis64)
6860		flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6861	else
6862		flag = ioa_cfg->u.cfg_table->hdr.flags;
6863
6864	if (flag & IPR_UCODE_DOWNLOAD_REQ)
6865		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6866
6867	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6868		list_move_tail(&res->queue, &old_res);
6869
6870	if (ioa_cfg->sis64)
6871		entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
6872	else
6873		entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6874
6875	for (i = 0; i < entries; i++) {
6876		if (ioa_cfg->sis64)
6877			cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6878		else
6879			cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
6880		found = 0;
6881
6882		list_for_each_entry_safe(res, temp, &old_res, queue) {
6883			if (ipr_is_same_device(res, &cfgtew)) {
6884				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6885				found = 1;
6886				break;
6887			}
6888		}
6889
6890		if (!found) {
6891			if (list_empty(&ioa_cfg->free_res_q)) {
6892				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6893				break;
6894			}
6895
6896			found = 1;
6897			res = list_entry(ioa_cfg->free_res_q.next,
6898					 struct ipr_resource_entry, queue);
6899			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6900			ipr_init_res_entry(res, &cfgtew);
6901			res->add_to_ml = 1;
6902		} else if (res->sdev && (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)))
6903			res->sdev->allow_restart = 1;
6904
6905		if (found)
6906			ipr_update_res_entry(res, &cfgtew);
6907	}
6908
6909	list_for_each_entry_safe(res, temp, &old_res, queue) {
6910		if (res->sdev) {
6911			res->del_from_ml = 1;
6912			res->res_handle = IPR_INVALID_RES_HANDLE;
6913			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6914		}
6915	}
6916
6917	list_for_each_entry_safe(res, temp, &old_res, queue) {
6918		ipr_clear_res_target(res);
6919		list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6920	}
6921
6922	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6923		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6924	else
6925		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6926
6927	LEAVE;
6928	return IPR_RC_JOB_CONTINUE;
6929}
6930
6931/**
6932 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6933 * @ipr_cmd:	ipr command struct
6934 *
6935 * This function sends a Query IOA Configuration command
6936 * to the adapter to retrieve the IOA configuration table.
6937 *
6938 * Return value:
6939 * 	IPR_RC_JOB_RETURN
6940 **/
6941static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6942{
6943	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6944	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6945	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6946	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6947
6948	ENTER;
6949	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6950		ioa_cfg->dual_raid = 1;
6951	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6952		 ucode_vpd->major_release, ucode_vpd->card_type,
6953		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6954	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6955	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6956
6957	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6958	ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
6959	ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6960	ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
6961
6962	ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
6963		       IPR_IOADL_FLAGS_READ_LAST);
6964
6965	ipr_cmd->job_step = ipr_init_res_table;
6966
6967	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6968
6969	LEAVE;
6970	return IPR_RC_JOB_RETURN;
6971}
6972
6973/**
6974 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6975 * @ipr_cmd:	ipr command struct
6976 *
6977 * This utility function sends an inquiry to the adapter.
6978 *
6979 * Return value:
6980 * 	none
6981 **/
6982static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6983			      dma_addr_t dma_addr, u8 xfer_len)
6984{
6985	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6986
6987	ENTER;
6988	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6989	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6990
6991	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6992	ioarcb->cmd_pkt.cdb[1] = flags;
6993	ioarcb->cmd_pkt.cdb[2] = page;
6994	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6995
6996	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6997
6998	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6999	LEAVE;
7000}
7001
7002/**
7003 * ipr_inquiry_page_supported - Is the given inquiry page supported
7004 * @page0:		inquiry page 0 buffer
7005 * @page:		page code.
7006 *
7007 * This function determines if the specified inquiry page is supported.
7008 *
7009 * Return value:
7010 *	1 if page is supported / 0 if not
7011 **/
7012static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
7013{
7014	int i;
7015
7016	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
7017		if (page0->page[i] == page)
7018			return 1;
7019
7020	return 0;
7021}
7022
7023/**
7024 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
7025 * @ipr_cmd:	ipr command struct
7026 *
7027 * This function sends a Page 0xD0 inquiry to the adapter
7028 * to retrieve adapter capabilities.
7029 *
7030 * Return value:
7031 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7032 **/
7033static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
7034{
7035	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7036	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
7037	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
7038
7039	ENTER;
7040	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
7041	memset(cap, 0, sizeof(*cap));
7042
7043	if (ipr_inquiry_page_supported(page0, 0xD0)) {
7044		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
7045				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
7046				  sizeof(struct ipr_inquiry_cap));
7047		return IPR_RC_JOB_RETURN;
7048	}
7049
7050	LEAVE;
7051	return IPR_RC_JOB_CONTINUE;
7052}
7053
7054/**
7055 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
7056 * @ipr_cmd:	ipr command struct
7057 *
7058 * This function sends a Page 3 inquiry to the adapter
7059 * to retrieve software VPD information.
7060 *
7061 * Return value:
7062 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7063 **/
7064static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
7065{
7066	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7067
7068	ENTER;
7069
7070	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
7071
7072	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
7073			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
7074			  sizeof(struct ipr_inquiry_page3));
7075
7076	LEAVE;
7077	return IPR_RC_JOB_RETURN;
7078}
7079
7080/**
7081 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
7082 * @ipr_cmd:	ipr command struct
7083 *
7084 * This function sends a Page 0 inquiry to the adapter
7085 * to retrieve supported inquiry pages.
7086 *
7087 * Return value:
7088 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7089 **/
7090static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
7091{
7092	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7093	char type[5];
7094
7095	ENTER;
7096
7097	/* Grab the type out of the VPD and store it away */
7098	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
7099	type[4] = '\0';
7100	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
7101
7102	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
7103
7104	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
7105			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
7106			  sizeof(struct ipr_inquiry_page0));
7107
7108	LEAVE;
7109	return IPR_RC_JOB_RETURN;
7110}
7111
7112/**
7113 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
7114 * @ipr_cmd:	ipr command struct
7115 *
7116 * This function sends a standard inquiry to the adapter.
7117 *
7118 * Return value:
7119 * 	IPR_RC_JOB_RETURN
7120 **/
7121static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
7122{
7123	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7124
7125	ENTER;
7126	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
7127
7128	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
7129			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
7130			  sizeof(struct ipr_ioa_vpd));
7131
7132	LEAVE;
7133	return IPR_RC_JOB_RETURN;
7134}
7135
7136/**
7137 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
7138 * @ipr_cmd:	ipr command struct
7139 *
7140 * This function send an Identify Host Request Response Queue
7141 * command to establish the HRRQ with the adapter.
7142 *
7143 * Return value:
7144 * 	IPR_RC_JOB_RETURN
7145 **/
7146static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7147{
7148	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7149	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7150
7151	ENTER;
7152	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7153
7154	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7155	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7156
7157	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7158	if (ioa_cfg->sis64)
7159		ioarcb->cmd_pkt.cdb[1] = 0x1;
7160	ioarcb->cmd_pkt.cdb[2] =
7161		((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
7162	ioarcb->cmd_pkt.cdb[3] =
7163		((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
7164	ioarcb->cmd_pkt.cdb[4] =
7165		((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
7166	ioarcb->cmd_pkt.cdb[5] =
7167		((u64) ioa_cfg->host_rrq_dma) & 0xff;
7168	ioarcb->cmd_pkt.cdb[7] =
7169		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7170	ioarcb->cmd_pkt.cdb[8] =
7171		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7172
7173	if (ioa_cfg->sis64) {
7174		ioarcb->cmd_pkt.cdb[10] =
7175			((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7176		ioarcb->cmd_pkt.cdb[11] =
7177			((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7178		ioarcb->cmd_pkt.cdb[12] =
7179			((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7180		ioarcb->cmd_pkt.cdb[13] =
7181			((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7182	}
7183
7184	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7185
7186	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7187
7188	LEAVE;
7189	return IPR_RC_JOB_RETURN;
7190}
7191
7192/**
7193 * ipr_reset_timer_done - Adapter reset timer function
7194 * @ipr_cmd:	ipr command struct
7195 *
7196 * Description: This function is used in adapter reset processing
7197 * for timing events. If the reset_cmd pointer in the IOA
7198 * config struct is not this adapter's we are doing nested
7199 * resets and fail_all_ops will take care of freeing the
7200 * command block.
7201 *
7202 * Return value:
7203 * 	none
7204 **/
7205static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7206{
7207	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7208	unsigned long lock_flags = 0;
7209
7210	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7211
7212	if (ioa_cfg->reset_cmd == ipr_cmd) {
7213		list_del(&ipr_cmd->queue);
7214		ipr_cmd->done(ipr_cmd);
7215	}
7216
7217	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7218}
7219
7220/**
7221 * ipr_reset_start_timer - Start a timer for adapter reset job
7222 * @ipr_cmd:	ipr command struct
7223 * @timeout:	timeout value
7224 *
7225 * Description: This function is used in adapter reset processing
7226 * for timing events. If the reset_cmd pointer in the IOA
7227 * config struct is not this adapter's we are doing nested
7228 * resets and fail_all_ops will take care of freeing the
7229 * command block.
7230 *
7231 * Return value:
7232 * 	none
7233 **/
7234static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7235				  unsigned long timeout)
7236{
7237	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7238	ipr_cmd->done = ipr_reset_ioa_job;
7239
7240	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7241	ipr_cmd->timer.expires = jiffies + timeout;
7242	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7243	add_timer(&ipr_cmd->timer);
7244}
7245
7246/**
7247 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7248 * @ioa_cfg:	ioa cfg struct
7249 *
7250 * Return value:
7251 * 	nothing
7252 **/
7253static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7254{
7255	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7256
7257	/* Initialize Host RRQ pointers */
7258	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7259	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7260	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7261	ioa_cfg->toggle_bit = 1;
7262
7263	/* Zero out config table */
7264	memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7265}
7266
7267/**
7268 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7269 * @ipr_cmd:	ipr command struct
7270 *
7271 * Return value:
7272 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7273 **/
7274static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7275{
7276	unsigned long stage, stage_time;
7277	u32 feedback;
7278	volatile u32 int_reg;
7279	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7280	u64 maskval = 0;
7281
7282	feedback = readl(ioa_cfg->regs.init_feedback_reg);
7283	stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7284	stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7285
7286	ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7287
7288	/* sanity check the stage_time value */
7289	if (stage_time == 0)
7290		stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7291	else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7292		stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7293	else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7294		stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7295
7296	if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7297		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7298		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7299		stage_time = ioa_cfg->transop_timeout;
7300		ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7301	} else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7302		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7303		if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7304			ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7305			maskval = IPR_PCII_IPL_STAGE_CHANGE;
7306			maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7307			writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7308			int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7309			return IPR_RC_JOB_CONTINUE;
7310		}
7311	}
7312
7313	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7314	ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7315	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7316	ipr_cmd->done = ipr_reset_ioa_job;
7317	add_timer(&ipr_cmd->timer);
7318	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7319
7320	return IPR_RC_JOB_RETURN;
7321}
7322
7323/**
7324 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7325 * @ipr_cmd:	ipr command struct
7326 *
7327 * This function reinitializes some control blocks and
7328 * enables destructive diagnostics on the adapter.
7329 *
7330 * Return value:
7331 * 	IPR_RC_JOB_RETURN
7332 **/
7333static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7334{
7335	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7336	volatile u32 int_reg;
7337	volatile u64 maskval;
7338
7339	ENTER;
7340	ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7341	ipr_init_ioa_mem(ioa_cfg);
7342
7343	ioa_cfg->allow_interrupts = 1;
7344	if (ioa_cfg->sis64) {
7345		/* Set the adapter to the correct endian mode. */
7346		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7347		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7348	}
7349
7350	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
7351
7352	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7353		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7354		       ioa_cfg->regs.clr_interrupt_mask_reg32);
7355		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7356		return IPR_RC_JOB_CONTINUE;
7357	}
7358
7359	/* Enable destructive diagnostics on IOA */
7360	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7361
7362	if (ioa_cfg->sis64) {
7363		maskval = IPR_PCII_IPL_STAGE_CHANGE;
7364		maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
7365		writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
7366	} else
7367		writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7368
7369	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7370
7371	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7372
7373	if (ioa_cfg->sis64) {
7374		ipr_cmd->job_step = ipr_reset_next_stage;
7375		return IPR_RC_JOB_CONTINUE;
7376	}
7377
7378	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7379	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7380	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7381	ipr_cmd->done = ipr_reset_ioa_job;
7382	add_timer(&ipr_cmd->timer);
7383	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7384
7385	LEAVE;
7386	return IPR_RC_JOB_RETURN;
7387}
7388
7389/**
7390 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7391 * @ipr_cmd:	ipr command struct
7392 *
7393 * This function is invoked when an adapter dump has run out
7394 * of processing time.
7395 *
7396 * Return value:
7397 * 	IPR_RC_JOB_CONTINUE
7398 **/
7399static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7400{
7401	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7402
7403	if (ioa_cfg->sdt_state == GET_DUMP)
7404		ioa_cfg->sdt_state = ABORT_DUMP;
7405
7406	ipr_cmd->job_step = ipr_reset_alert;
7407
7408	return IPR_RC_JOB_CONTINUE;
7409}
7410
7411/**
7412 * ipr_unit_check_no_data - Log a unit check/no data error log
7413 * @ioa_cfg:		ioa config struct
7414 *
7415 * Logs an error indicating the adapter unit checked, but for some
7416 * reason, we were unable to fetch the unit check buffer.
7417 *
7418 * Return value:
7419 * 	nothing
7420 **/
7421static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7422{
7423	ioa_cfg->errors_logged++;
7424	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7425}
7426
7427/**
7428 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7429 * @ioa_cfg:		ioa config struct
7430 *
7431 * Fetches the unit check buffer from the adapter by clocking the data
7432 * through the mailbox register.
7433 *
7434 * Return value:
7435 * 	nothing
7436 **/
7437static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7438{
7439	unsigned long mailbox;
7440	struct ipr_hostrcb *hostrcb;
7441	struct ipr_uc_sdt sdt;
7442	int rc, length;
7443	u32 ioasc;
7444
7445	mailbox = readl(ioa_cfg->ioa_mailbox);
7446
7447	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7448		ipr_unit_check_no_data(ioa_cfg);
7449		return;
7450	}
7451
7452	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7453	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7454					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7455
7456	if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7457	    ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7458	    (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7459		ipr_unit_check_no_data(ioa_cfg);
7460		return;
7461	}
7462
7463	/* Find length of the first sdt entry (UC buffer) */
7464	if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7465		length = be32_to_cpu(sdt.entry[0].end_token);
7466	else
7467		length = (be32_to_cpu(sdt.entry[0].end_token) -
7468			  be32_to_cpu(sdt.entry[0].start_token)) &
7469			  IPR_FMT2_MBX_ADDR_MASK;
7470
7471	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7472			     struct ipr_hostrcb, queue);
7473	list_del(&hostrcb->queue);
7474	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7475
7476	rc = ipr_get_ldump_data_section(ioa_cfg,
7477					be32_to_cpu(sdt.entry[0].start_token),
7478					(__be32 *)&hostrcb->hcam,
7479					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7480
7481	if (!rc) {
7482		ipr_handle_log_data(ioa_cfg, hostrcb);
7483		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7484		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7485		    ioa_cfg->sdt_state == GET_DUMP)
7486			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7487	} else
7488		ipr_unit_check_no_data(ioa_cfg);
7489
7490	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7491}
7492
7493/**
7494 * ipr_reset_get_unit_check_job - Call to get the unit check buffer.
7495 * @ipr_cmd:	ipr command struct
7496 *
7497 * Description: This function will call to get the unit check buffer.
7498 *
7499 * Return value:
7500 *	IPR_RC_JOB_RETURN
7501 **/
7502static int ipr_reset_get_unit_check_job(struct ipr_cmnd *ipr_cmd)
7503{
7504	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7505
7506	ENTER;
7507	ioa_cfg->ioa_unit_checked = 0;
7508	ipr_get_unit_check_buffer(ioa_cfg);
7509	ipr_cmd->job_step = ipr_reset_alert;
7510	ipr_reset_start_timer(ipr_cmd, 0);
7511
7512	LEAVE;
7513	return IPR_RC_JOB_RETURN;
7514}
7515
7516/**
7517 * ipr_reset_restore_cfg_space - Restore PCI config space.
7518 * @ipr_cmd:	ipr command struct
7519 *
7520 * Description: This function restores the saved PCI config space of
7521 * the adapter, fails all outstanding ops back to the callers, and
7522 * fetches the dump/unit check if applicable to this reset.
7523 *
7524 * Return value:
7525 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7526 **/
7527static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7528{
7529	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7530	u32 int_reg;
7531
7532	ENTER;
7533	ioa_cfg->pdev->state_saved = true;
7534	pci_restore_state(ioa_cfg->pdev);
7535
7536	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7537		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7538		return IPR_RC_JOB_CONTINUE;
7539	}
7540
7541	ipr_fail_all_ops(ioa_cfg);
7542
7543	if (ioa_cfg->sis64) {
7544		/* Set the adapter to the correct endian mode. */
7545		writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
7546		int_reg = readl(ioa_cfg->regs.endian_swap_reg);
7547	}
7548
7549	if (ioa_cfg->ioa_unit_checked) {
7550		if (ioa_cfg->sis64) {
7551			ipr_cmd->job_step = ipr_reset_get_unit_check_job;
7552			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_DELAY_TIMEOUT);
7553			return IPR_RC_JOB_RETURN;
7554		} else {
7555			ioa_cfg->ioa_unit_checked = 0;
7556			ipr_get_unit_check_buffer(ioa_cfg);
7557			ipr_cmd->job_step = ipr_reset_alert;
7558			ipr_reset_start_timer(ipr_cmd, 0);
7559			return IPR_RC_JOB_RETURN;
7560		}
7561	}
7562
7563	if (ioa_cfg->in_ioa_bringdown) {
7564		ipr_cmd->job_step = ipr_ioa_bringdown_done;
7565	} else {
7566		ipr_cmd->job_step = ipr_reset_enable_ioa;
7567
7568		if (GET_DUMP == ioa_cfg->sdt_state) {
7569			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7570			ipr_cmd->job_step = ipr_reset_wait_for_dump;
7571			schedule_work(&ioa_cfg->work_q);
7572			return IPR_RC_JOB_RETURN;
7573		}
7574	}
7575
7576	LEAVE;
7577	return IPR_RC_JOB_CONTINUE;
7578}
7579
7580/**
7581 * ipr_reset_bist_done - BIST has completed on the adapter.
7582 * @ipr_cmd:	ipr command struct
7583 *
7584 * Description: Unblock config space and resume the reset process.
7585 *
7586 * Return value:
7587 * 	IPR_RC_JOB_CONTINUE
7588 **/
7589static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7590{
7591	ENTER;
7592	pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7593	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7594	LEAVE;
7595	return IPR_RC_JOB_CONTINUE;
7596}
7597
7598/**
7599 * ipr_reset_start_bist - Run BIST on the adapter.
7600 * @ipr_cmd:	ipr command struct
7601 *
7602 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7603 *
7604 * Return value:
7605 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7606 **/
7607static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7608{
7609	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7610	int rc = PCIBIOS_SUCCESSFUL;
7611
7612	ENTER;
7613	pci_block_user_cfg_access(ioa_cfg->pdev);
7614
7615	if (ioa_cfg->ipr_chip->bist_method == IPR_MMIO)
7616		writel(IPR_UPROCI_SIS64_START_BIST,
7617		       ioa_cfg->regs.set_uproc_interrupt_reg32);
7618	else
7619		rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7620
7621	if (rc == PCIBIOS_SUCCESSFUL) {
7622		ipr_cmd->job_step = ipr_reset_bist_done;
7623		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7624		rc = IPR_RC_JOB_RETURN;
7625	} else {
7626		pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7627		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7628		rc = IPR_RC_JOB_CONTINUE;
7629	}
7630
7631	LEAVE;
7632	return rc;
7633}
7634
7635/**
7636 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7637 * @ipr_cmd:	ipr command struct
7638 *
7639 * Description: This clears PCI reset to the adapter and delays two seconds.
7640 *
7641 * Return value:
7642 * 	IPR_RC_JOB_RETURN
7643 **/
7644static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7645{
7646	ENTER;
7647	pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7648	ipr_cmd->job_step = ipr_reset_bist_done;
7649	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7650	LEAVE;
7651	return IPR_RC_JOB_RETURN;
7652}
7653
7654/**
7655 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7656 * @ipr_cmd:	ipr command struct
7657 *
7658 * Description: This asserts PCI reset to the adapter.
7659 *
7660 * Return value:
7661 * 	IPR_RC_JOB_RETURN
7662 **/
7663static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7664{
7665	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7666	struct pci_dev *pdev = ioa_cfg->pdev;
7667
7668	ENTER;
7669	pci_block_user_cfg_access(pdev);
7670	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7671	ipr_cmd->job_step = ipr_reset_slot_reset_done;
7672	ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7673	LEAVE;
7674	return IPR_RC_JOB_RETURN;
7675}
7676
7677/**
7678 * ipr_reset_allowed - Query whether or not IOA can be reset
7679 * @ioa_cfg:	ioa config struct
7680 *
7681 * Return value:
7682 * 	0 if reset not allowed / non-zero if reset is allowed
7683 **/
7684static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7685{
7686	volatile u32 temp_reg;
7687
7688	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7689	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7690}
7691
7692/**
7693 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7694 * @ipr_cmd:	ipr command struct
7695 *
7696 * Description: This function waits for adapter permission to run BIST,
7697 * then runs BIST. If the adapter does not give permission after a
7698 * reasonable time, we will reset the adapter anyway. The impact of
7699 * resetting the adapter without warning the adapter is the risk of
7700 * losing the persistent error log on the adapter. If the adapter is
7701 * reset while it is writing to the flash on the adapter, the flash
7702 * segment will have bad ECC and be zeroed.
7703 *
7704 * Return value:
7705 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7706 **/
7707static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7708{
7709	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7710	int rc = IPR_RC_JOB_RETURN;
7711
7712	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7713		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7714		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7715	} else {
7716		ipr_cmd->job_step = ioa_cfg->reset;
7717		rc = IPR_RC_JOB_CONTINUE;
7718	}
7719
7720	return rc;
7721}
7722
7723/**
7724 * ipr_reset_alert - Alert the adapter of a pending reset
7725 * @ipr_cmd:	ipr command struct
7726 *
7727 * Description: This function alerts the adapter that it will be reset.
7728 * If memory space is not currently enabled, proceed directly
7729 * to running BIST on the adapter. The timer must always be started
7730 * so we guarantee we do not run BIST from ipr_isr.
7731 *
7732 * Return value:
7733 * 	IPR_RC_JOB_RETURN
7734 **/
7735static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7736{
7737	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7738	u16 cmd_reg;
7739	int rc;
7740
7741	ENTER;
7742	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7743
7744	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7745		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7746		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
7747		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7748	} else {
7749		ipr_cmd->job_step = ioa_cfg->reset;
7750	}
7751
7752	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7753	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7754
7755	LEAVE;
7756	return IPR_RC_JOB_RETURN;
7757}
7758
7759/**
7760 * ipr_reset_ucode_download_done - Microcode download completion
7761 * @ipr_cmd:	ipr command struct
7762 *
7763 * Description: This function unmaps the microcode download buffer.
7764 *
7765 * Return value:
7766 * 	IPR_RC_JOB_CONTINUE
7767 **/
7768static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7769{
7770	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7771	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7772
7773	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7774		     sglist->num_sg, DMA_TO_DEVICE);
7775
7776	ipr_cmd->job_step = ipr_reset_alert;
7777	return IPR_RC_JOB_CONTINUE;
7778}
7779
7780/**
7781 * ipr_reset_ucode_download - Download microcode to the adapter
7782 * @ipr_cmd:	ipr command struct
7783 *
7784 * Description: This function checks to see if it there is microcode
7785 * to download to the adapter. If there is, a download is performed.
7786 *
7787 * Return value:
7788 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7789 **/
7790static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7791{
7792	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7793	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7794
7795	ENTER;
7796	ipr_cmd->job_step = ipr_reset_alert;
7797
7798	if (!sglist)
7799		return IPR_RC_JOB_CONTINUE;
7800
7801	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7802	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7803	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7804	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7805	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7806	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7807	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7808
7809	if (ioa_cfg->sis64)
7810		ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7811	else
7812		ipr_build_ucode_ioadl(ipr_cmd, sglist);
7813	ipr_cmd->job_step = ipr_reset_ucode_download_done;
7814
7815	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7816		   IPR_WRITE_BUFFER_TIMEOUT);
7817
7818	LEAVE;
7819	return IPR_RC_JOB_RETURN;
7820}
7821
7822/**
7823 * ipr_reset_shutdown_ioa - Shutdown the adapter
7824 * @ipr_cmd:	ipr command struct
7825 *
7826 * Description: This function issues an adapter shutdown of the
7827 * specified type to the specified adapter as part of the
7828 * adapter reset job.
7829 *
7830 * Return value:
7831 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7832 **/
7833static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7834{
7835	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7836	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7837	unsigned long timeout;
7838	int rc = IPR_RC_JOB_CONTINUE;
7839
7840	ENTER;
7841	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7842		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7843		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7844		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7845		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7846
7847		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7848			timeout = IPR_SHUTDOWN_TIMEOUT;
7849		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7850			timeout = IPR_INTERNAL_TIMEOUT;
7851		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7852			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
7853		else
7854			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
7855
7856		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7857
7858		rc = IPR_RC_JOB_RETURN;
7859		ipr_cmd->job_step = ipr_reset_ucode_download;
7860	} else
7861		ipr_cmd->job_step = ipr_reset_alert;
7862
7863	LEAVE;
7864	return rc;
7865}
7866
7867/**
7868 * ipr_reset_ioa_job - Adapter reset job
7869 * @ipr_cmd:	ipr command struct
7870 *
7871 * Description: This function is the job router for the adapter reset job.
7872 *
7873 * Return value:
7874 * 	none
7875 **/
7876static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7877{
7878	u32 rc, ioasc;
7879	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7880
7881	do {
7882		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7883
7884		if (ioa_cfg->reset_cmd != ipr_cmd) {
7885			/*
7886			 * We are doing nested adapter resets and this is
7887			 * not the current reset job.
7888			 */
7889			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7890			return;
7891		}
7892
7893		if (IPR_IOASC_SENSE_KEY(ioasc)) {
7894			rc = ipr_cmd->job_step_failed(ipr_cmd);
7895			if (rc == IPR_RC_JOB_RETURN)
7896				return;
7897		}
7898
7899		ipr_reinit_ipr_cmnd(ipr_cmd);
7900		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
7901		rc = ipr_cmd->job_step(ipr_cmd);
7902	} while(rc == IPR_RC_JOB_CONTINUE);
7903}
7904
7905/**
7906 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7907 * @ioa_cfg:		ioa config struct
7908 * @job_step:		first job step of reset job
7909 * @shutdown_type:	shutdown type
7910 *
7911 * Description: This function will initiate the reset of the given adapter
7912 * starting at the selected job step.
7913 * If the caller needs to wait on the completion of the reset,
7914 * the caller must sleep on the reset_wait_q.
7915 *
7916 * Return value:
7917 * 	none
7918 **/
7919static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7920				    int (*job_step) (struct ipr_cmnd *),
7921				    enum ipr_shutdown_type shutdown_type)
7922{
7923	struct ipr_cmnd *ipr_cmd;
7924
7925	ioa_cfg->in_reset_reload = 1;
7926	ioa_cfg->allow_cmds = 0;
7927	scsi_block_requests(ioa_cfg->host);
7928
7929	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7930	ioa_cfg->reset_cmd = ipr_cmd;
7931	ipr_cmd->job_step = job_step;
7932	ipr_cmd->u.shutdown_type = shutdown_type;
7933
7934	ipr_reset_ioa_job(ipr_cmd);
7935}
7936
7937/**
7938 * ipr_initiate_ioa_reset - Initiate an adapter reset
7939 * @ioa_cfg:		ioa config struct
7940 * @shutdown_type:	shutdown type
7941 *
7942 * Description: This function will initiate the reset of the given adapter.
7943 * If the caller needs to wait on the completion of the reset,
7944 * the caller must sleep on the reset_wait_q.
7945 *
7946 * Return value:
7947 * 	none
7948 **/
7949static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7950				   enum ipr_shutdown_type shutdown_type)
7951{
7952	if (ioa_cfg->ioa_is_dead)
7953		return;
7954
7955	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7956		ioa_cfg->sdt_state = ABORT_DUMP;
7957
7958	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7959		dev_err(&ioa_cfg->pdev->dev,
7960			"IOA taken offline - error recovery failed\n");
7961
7962		ioa_cfg->reset_retries = 0;
7963		ioa_cfg->ioa_is_dead = 1;
7964
7965		if (ioa_cfg->in_ioa_bringdown) {
7966			ioa_cfg->reset_cmd = NULL;
7967			ioa_cfg->in_reset_reload = 0;
7968			ipr_fail_all_ops(ioa_cfg);
7969			wake_up_all(&ioa_cfg->reset_wait_q);
7970
7971			spin_unlock_irq(ioa_cfg->host->host_lock);
7972			scsi_unblock_requests(ioa_cfg->host);
7973			spin_lock_irq(ioa_cfg->host->host_lock);
7974			return;
7975		} else {
7976			ioa_cfg->in_ioa_bringdown = 1;
7977			shutdown_type = IPR_SHUTDOWN_NONE;
7978		}
7979	}
7980
7981	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7982				shutdown_type);
7983}
7984
7985/**
7986 * ipr_reset_freeze - Hold off all I/O activity
7987 * @ipr_cmd:	ipr command struct
7988 *
7989 * Description: If the PCI slot is frozen, hold off all I/O
7990 * activity; then, as soon as the slot is available again,
7991 * initiate an adapter reset.
7992 */
7993static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7994{
7995	/* Disallow new interrupts, avoid loop */
7996	ipr_cmd->ioa_cfg->allow_interrupts = 0;
7997	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7998	ipr_cmd->done = ipr_reset_ioa_job;
7999	return IPR_RC_JOB_RETURN;
8000}
8001
8002/**
8003 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
8004 * @pdev:	PCI device struct
8005 *
8006 * Description: This routine is called to tell us that the PCI bus
8007 * is down. Can't do anything here, except put the device driver
8008 * into a holding pattern, waiting for the PCI bus to come back.
8009 */
8010static void ipr_pci_frozen(struct pci_dev *pdev)
8011{
8012	unsigned long flags = 0;
8013	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8014
8015	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8016	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
8017	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8018}
8019
8020/**
8021 * ipr_pci_slot_reset - Called when PCI slot has been reset.
8022 * @pdev:	PCI device struct
8023 *
8024 * Description: This routine is called by the pci error recovery
8025 * code after the PCI slot has been reset, just before we
8026 * should resume normal operations.
8027 */
8028static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
8029{
8030	unsigned long flags = 0;
8031	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8032
8033	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8034	if (ioa_cfg->needs_warm_reset)
8035		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8036	else
8037		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
8038					IPR_SHUTDOWN_NONE);
8039	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8040	return PCI_ERS_RESULT_RECOVERED;
8041}
8042
8043/**
8044 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
8045 * @pdev:	PCI device struct
8046 *
8047 * Description: This routine is called when the PCI bus has
8048 * permanently failed.
8049 */
8050static void ipr_pci_perm_failure(struct pci_dev *pdev)
8051{
8052	unsigned long flags = 0;
8053	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8054
8055	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8056	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8057		ioa_cfg->sdt_state = ABORT_DUMP;
8058	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
8059	ioa_cfg->in_ioa_bringdown = 1;
8060	ioa_cfg->allow_cmds = 0;
8061	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8062	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8063}
8064
8065/**
8066 * ipr_pci_error_detected - Called when a PCI error is detected.
8067 * @pdev:	PCI device struct
8068 * @state:	PCI channel state
8069 *
8070 * Description: Called when a PCI error is detected.
8071 *
8072 * Return value:
8073 * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
8074 */
8075static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
8076					       pci_channel_state_t state)
8077{
8078	switch (state) {
8079	case pci_channel_io_frozen:
8080		ipr_pci_frozen(pdev);
8081		return PCI_ERS_RESULT_NEED_RESET;
8082	case pci_channel_io_perm_failure:
8083		ipr_pci_perm_failure(pdev);
8084		return PCI_ERS_RESULT_DISCONNECT;
8085		break;
8086	default:
8087		break;
8088	}
8089	return PCI_ERS_RESULT_NEED_RESET;
8090}
8091
8092/**
8093 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
8094 * @ioa_cfg:	ioa cfg struct
8095 *
8096 * Description: This is the second phase of adapter intialization
8097 * This function takes care of initilizing the adapter to the point
8098 * where it can accept new commands.
8099
8100 * Return value:
8101 * 	0 on success / -EIO on failure
8102 **/
8103static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
8104{
8105	int rc = 0;
8106	unsigned long host_lock_flags = 0;
8107
8108	ENTER;
8109	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8110	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
8111	if (ioa_cfg->needs_hard_reset) {
8112		ioa_cfg->needs_hard_reset = 0;
8113		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
8114	} else
8115		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
8116					IPR_SHUTDOWN_NONE);
8117
8118	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8119	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8120	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8121
8122	if (ioa_cfg->ioa_is_dead) {
8123		rc = -EIO;
8124	} else if (ipr_invalid_adapter(ioa_cfg)) {
8125		if (!ipr_testmode)
8126			rc = -EIO;
8127
8128		dev_err(&ioa_cfg->pdev->dev,
8129			"Adapter not supported in this hardware configuration.\n");
8130	}
8131
8132	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8133
8134	LEAVE;
8135	return rc;
8136}
8137
8138/**
8139 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
8140 * @ioa_cfg:	ioa config struct
8141 *
8142 * Return value:
8143 * 	none
8144 **/
8145static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8146{
8147	int i;
8148
8149	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8150		if (ioa_cfg->ipr_cmnd_list[i])
8151			pci_pool_free(ioa_cfg->ipr_cmd_pool,
8152				      ioa_cfg->ipr_cmnd_list[i],
8153				      ioa_cfg->ipr_cmnd_list_dma[i]);
8154
8155		ioa_cfg->ipr_cmnd_list[i] = NULL;
8156	}
8157
8158	if (ioa_cfg->ipr_cmd_pool)
8159		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
8160
8161	ioa_cfg->ipr_cmd_pool = NULL;
8162}
8163
8164/**
8165 * ipr_free_mem - Frees memory allocated for an adapter
8166 * @ioa_cfg:	ioa cfg struct
8167 *
8168 * Return value:
8169 * 	nothing
8170 **/
8171static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
8172{
8173	int i;
8174
8175	kfree(ioa_cfg->res_entries);
8176	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
8177			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8178	ipr_free_cmd_blks(ioa_cfg);
8179	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8180			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8181	pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
8182			    ioa_cfg->u.cfg_table,
8183			    ioa_cfg->cfg_table_dma);
8184
8185	for (i = 0; i < IPR_NUM_HCAMS; i++) {
8186		pci_free_consistent(ioa_cfg->pdev,
8187				    sizeof(struct ipr_hostrcb),
8188				    ioa_cfg->hostrcb[i],
8189				    ioa_cfg->hostrcb_dma[i]);
8190	}
8191
8192	ipr_free_dump(ioa_cfg);
8193	kfree(ioa_cfg->trace);
8194}
8195
8196/**
8197 * ipr_free_all_resources - Free all allocated resources for an adapter.
8198 * @ipr_cmd:	ipr command struct
8199 *
8200 * This function frees all allocated resources for the
8201 * specified adapter.
8202 *
8203 * Return value:
8204 * 	none
8205 **/
8206static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8207{
8208	struct pci_dev *pdev = ioa_cfg->pdev;
8209
8210	ENTER;
8211	free_irq(pdev->irq, ioa_cfg);
8212	pci_disable_msi(pdev);
8213	iounmap(ioa_cfg->hdw_dma_regs);
8214	pci_release_regions(pdev);
8215	ipr_free_mem(ioa_cfg);
8216	scsi_host_put(ioa_cfg->host);
8217	pci_disable_device(pdev);
8218	LEAVE;
8219}
8220
8221/**
8222 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8223 * @ioa_cfg:	ioa config struct
8224 *
8225 * Return value:
8226 * 	0 on success / -ENOMEM on allocation failure
8227 **/
8228static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8229{
8230	struct ipr_cmnd *ipr_cmd;
8231	struct ipr_ioarcb *ioarcb;
8232	dma_addr_t dma_addr;
8233	int i;
8234
8235	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
8236						 sizeof(struct ipr_cmnd), 16, 0);
8237
8238	if (!ioa_cfg->ipr_cmd_pool)
8239		return -ENOMEM;
8240
8241	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8242		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8243
8244		if (!ipr_cmd) {
8245			ipr_free_cmd_blks(ioa_cfg);
8246			return -ENOMEM;
8247		}
8248
8249		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8250		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8251		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8252
8253		ioarcb = &ipr_cmd->ioarcb;
8254		ipr_cmd->dma_addr = dma_addr;
8255		if (ioa_cfg->sis64)
8256			ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8257		else
8258			ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8259
8260		ioarcb->host_response_handle = cpu_to_be32(i << 2);
8261		if (ioa_cfg->sis64) {
8262			ioarcb->u.sis64_addr_data.data_ioadl_addr =
8263				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8264			ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8265				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8266		} else {
8267			ioarcb->write_ioadl_addr =
8268				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8269			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8270			ioarcb->ioasa_host_pci_addr =
8271				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8272		}
8273		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8274		ipr_cmd->cmd_index = i;
8275		ipr_cmd->ioa_cfg = ioa_cfg;
8276		ipr_cmd->sense_buffer_dma = dma_addr +
8277			offsetof(struct ipr_cmnd, sense_buffer);
8278
8279		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8280	}
8281
8282	return 0;
8283}
8284
8285/**
8286 * ipr_alloc_mem - Allocate memory for an adapter
8287 * @ioa_cfg:	ioa config struct
8288 *
8289 * Return value:
8290 * 	0 on success / non-zero for error
8291 **/
8292static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8293{
8294	struct pci_dev *pdev = ioa_cfg->pdev;
8295	int i, rc = -ENOMEM;
8296
8297	ENTER;
8298	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8299				       ioa_cfg->max_devs_supported, GFP_KERNEL);
8300
8301	if (!ioa_cfg->res_entries)
8302		goto out;
8303
8304	if (ioa_cfg->sis64) {
8305		ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8306					      BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8307		ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8308					     BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8309		ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8310					    BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8311	}
8312
8313	for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8314		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8315		ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8316	}
8317
8318	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8319						sizeof(struct ipr_misc_cbs),
8320						&ioa_cfg->vpd_cbs_dma);
8321
8322	if (!ioa_cfg->vpd_cbs)
8323		goto out_free_res_entries;
8324
8325	if (ipr_alloc_cmd_blks(ioa_cfg))
8326		goto out_free_vpd_cbs;
8327
8328	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8329						 sizeof(u32) * IPR_NUM_CMD_BLKS,
8330						 &ioa_cfg->host_rrq_dma);
8331
8332	if (!ioa_cfg->host_rrq)
8333		goto out_ipr_free_cmd_blocks;
8334
8335	ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8336						    ioa_cfg->cfg_table_size,
8337						    &ioa_cfg->cfg_table_dma);
8338
8339	if (!ioa_cfg->u.cfg_table)
8340		goto out_free_host_rrq;
8341
8342	for (i = 0; i < IPR_NUM_HCAMS; i++) {
8343		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8344							   sizeof(struct ipr_hostrcb),
8345							   &ioa_cfg->hostrcb_dma[i]);
8346
8347		if (!ioa_cfg->hostrcb[i])
8348			goto out_free_hostrcb_dma;
8349
8350		ioa_cfg->hostrcb[i]->hostrcb_dma =
8351			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
8352		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
8353		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8354	}
8355
8356	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
8357				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8358
8359	if (!ioa_cfg->trace)
8360		goto out_free_hostrcb_dma;
8361
8362	rc = 0;
8363out:
8364	LEAVE;
8365	return rc;
8366
8367out_free_hostrcb_dma:
8368	while (i-- > 0) {
8369		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8370				    ioa_cfg->hostrcb[i],
8371				    ioa_cfg->hostrcb_dma[i]);
8372	}
8373	pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8374			    ioa_cfg->u.cfg_table,
8375			    ioa_cfg->cfg_table_dma);
8376out_free_host_rrq:
8377	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8378			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8379out_ipr_free_cmd_blocks:
8380	ipr_free_cmd_blks(ioa_cfg);
8381out_free_vpd_cbs:
8382	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8383			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8384out_free_res_entries:
8385	kfree(ioa_cfg->res_entries);
8386	goto out;
8387}
8388
8389/**
8390 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8391 * @ioa_cfg:	ioa config struct
8392 *
8393 * Return value:
8394 * 	none
8395 **/
8396static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8397{
8398	int i;
8399
8400	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8401		ioa_cfg->bus_attr[i].bus = i;
8402		ioa_cfg->bus_attr[i].qas_enabled = 0;
8403		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8404		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8405			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8406		else
8407			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8408	}
8409}
8410
8411/**
8412 * ipr_init_ioa_cfg - Initialize IOA config struct
8413 * @ioa_cfg:	ioa config struct
8414 * @host:		scsi host struct
8415 * @pdev:		PCI dev struct
8416 *
8417 * Return value:
8418 * 	none
8419 **/
8420static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8421				       struct Scsi_Host *host, struct pci_dev *pdev)
8422{
8423	const struct ipr_interrupt_offsets *p;
8424	struct ipr_interrupts *t;
8425	void __iomem *base;
8426
8427	ioa_cfg->host = host;
8428	ioa_cfg->pdev = pdev;
8429	ioa_cfg->log_level = ipr_log_level;
8430	ioa_cfg->doorbell = IPR_DOORBELL;
8431	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8432	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8433	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8434	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8435	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8436	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8437	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8438	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8439
8440	INIT_LIST_HEAD(&ioa_cfg->free_q);
8441	INIT_LIST_HEAD(&ioa_cfg->pending_q);
8442	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8443	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8444	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8445	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
8446	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
8447	init_waitqueue_head(&ioa_cfg->reset_wait_q);
8448	init_waitqueue_head(&ioa_cfg->msi_wait_q);
8449	ioa_cfg->sdt_state = INACTIVE;
8450
8451	ipr_initialize_bus_attr(ioa_cfg);
8452	ioa_cfg->max_devs_supported = ipr_max_devs;
8453
8454	if (ioa_cfg->sis64) {
8455		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8456		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8457		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8458			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8459	} else {
8460		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8461		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8462		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8463			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8464	}
8465	host->max_channel = IPR_MAX_BUS_TO_SCAN;
8466	host->unique_id = host->host_no;
8467	host->max_cmd_len = IPR_MAX_CDB_LEN;
8468	pci_set_drvdata(pdev, ioa_cfg);
8469
8470	p = &ioa_cfg->chip_cfg->regs;
8471	t = &ioa_cfg->regs;
8472	base = ioa_cfg->hdw_dma_regs;
8473
8474	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8475	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8476	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
8477	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8478	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
8479	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8480	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
8481	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8482	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
8483	t->ioarrin_reg = base + p->ioarrin_reg;
8484	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8485	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
8486	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8487	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
8488	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
8489	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
8490
8491	if (ioa_cfg->sis64) {
8492		t->init_feedback_reg = base + p->init_feedback_reg;
8493		t->dump_addr_reg = base + p->dump_addr_reg;
8494		t->dump_data_reg = base + p->dump_data_reg;
8495		t->endian_swap_reg = base + p->endian_swap_reg;
8496	}
8497}
8498
8499/**
8500 * ipr_get_chip_info - Find adapter chip information
8501 * @dev_id:		PCI device id struct
8502 *
8503 * Return value:
8504 * 	ptr to chip information on success / NULL on failure
8505 **/
8506static const struct ipr_chip_t * __devinit
8507ipr_get_chip_info(const struct pci_device_id *dev_id)
8508{
8509	int i;
8510
8511	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8512		if (ipr_chip[i].vendor == dev_id->vendor &&
8513		    ipr_chip[i].device == dev_id->device)
8514			return &ipr_chip[i];
8515	return NULL;
8516}
8517
8518/**
8519 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8520 * @pdev:		PCI device struct
8521 *
8522 * Description: Simply set the msi_received flag to 1 indicating that
8523 * Message Signaled Interrupts are supported.
8524 *
8525 * Return value:
8526 * 	0 on success / non-zero on failure
8527 **/
8528static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8529{
8530	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8531	unsigned long lock_flags = 0;
8532	irqreturn_t rc = IRQ_HANDLED;
8533
8534	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8535
8536	ioa_cfg->msi_received = 1;
8537	wake_up(&ioa_cfg->msi_wait_q);
8538
8539	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8540	return rc;
8541}
8542
8543/**
8544 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8545 * @pdev:		PCI device struct
8546 *
8547 * Description: The return value from pci_enable_msi() can not always be
8548 * trusted.  This routine sets up and initiates a test interrupt to determine
8549 * if the interrupt is received via the ipr_test_intr() service routine.
8550 * If the tests fails, the driver will fall back to LSI.
8551 *
8552 * Return value:
8553 * 	0 on success / non-zero on failure
8554 **/
8555static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8556				  struct pci_dev *pdev)
8557{
8558	int rc;
8559	volatile u32 int_reg;
8560	unsigned long lock_flags = 0;
8561
8562	ENTER;
8563
8564	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8565	init_waitqueue_head(&ioa_cfg->msi_wait_q);
8566	ioa_cfg->msi_received = 0;
8567	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8568	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
8569	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8570	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8571
8572	rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8573	if (rc) {
8574		dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8575		return rc;
8576	} else if (ipr_debug)
8577		dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8578
8579	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
8580	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8581	wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8582	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8583
8584	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8585	if (!ioa_cfg->msi_received) {
8586		/* MSI test failed */
8587		dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
8588		rc = -EOPNOTSUPP;
8589	} else if (ipr_debug)
8590		dev_info(&pdev->dev, "MSI test succeeded.\n");
8591
8592	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8593
8594	free_irq(pdev->irq, ioa_cfg);
8595
8596	LEAVE;
8597
8598	return rc;
8599}
8600
8601/**
8602 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8603 * @pdev:		PCI device struct
8604 * @dev_id:		PCI device id struct
8605 *
8606 * Return value:
8607 * 	0 on success / non-zero on failure
8608 **/
8609static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8610				   const struct pci_device_id *dev_id)
8611{
8612	struct ipr_ioa_cfg *ioa_cfg;
8613	struct Scsi_Host *host;
8614	unsigned long ipr_regs_pci;
8615	void __iomem *ipr_regs;
8616	int rc = PCIBIOS_SUCCESSFUL;
8617	volatile u32 mask, uproc, interrupts;
8618
8619	ENTER;
8620
8621	if ((rc = pci_enable_device(pdev))) {
8622		dev_err(&pdev->dev, "Cannot enable adapter\n");
8623		goto out;
8624	}
8625
8626	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8627
8628	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8629
8630	if (!host) {
8631		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8632		rc = -ENOMEM;
8633		goto out_disable;
8634	}
8635
8636	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8637	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8638	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8639		      sata_port_info.flags, &ipr_sata_ops);
8640
8641	ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
8642
8643	if (!ioa_cfg->ipr_chip) {
8644		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8645			dev_id->vendor, dev_id->device);
8646		goto out_scsi_host_put;
8647	}
8648
8649	/* set SIS 32 or SIS 64 */
8650	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
8651	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8652
8653	if (ipr_transop_timeout)
8654		ioa_cfg->transop_timeout = ipr_transop_timeout;
8655	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8656		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8657	else
8658		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8659
8660	ioa_cfg->revid = pdev->revision;
8661
8662	ipr_regs_pci = pci_resource_start(pdev, 0);
8663
8664	rc = pci_request_regions(pdev, IPR_NAME);
8665	if (rc < 0) {
8666		dev_err(&pdev->dev,
8667			"Couldn't register memory range of registers\n");
8668		goto out_scsi_host_put;
8669	}
8670
8671	ipr_regs = pci_ioremap_bar(pdev, 0);
8672
8673	if (!ipr_regs) {
8674		dev_err(&pdev->dev,
8675			"Couldn't map memory range of registers\n");
8676		rc = -ENOMEM;
8677		goto out_release_regions;
8678	}
8679
8680	ioa_cfg->hdw_dma_regs = ipr_regs;
8681	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8682	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8683
8684	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8685
8686	pci_set_master(pdev);
8687
8688	if (ioa_cfg->sis64) {
8689		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8690		if (rc < 0) {
8691			dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8692			rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8693		}
8694
8695	} else
8696		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8697
8698	if (rc < 0) {
8699		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8700		goto cleanup_nomem;
8701	}
8702
8703	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8704				   ioa_cfg->chip_cfg->cache_line_size);
8705
8706	if (rc != PCIBIOS_SUCCESSFUL) {
8707		dev_err(&pdev->dev, "Write of cache line size failed\n");
8708		rc = -EIO;
8709		goto cleanup_nomem;
8710	}
8711
8712	/* Enable MSI style interrupts if they are supported. */
8713	if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
8714		rc = ipr_test_msi(ioa_cfg, pdev);
8715		if (rc == -EOPNOTSUPP)
8716			pci_disable_msi(pdev);
8717		else if (rc)
8718			goto out_msi_disable;
8719		else
8720			dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8721	} else if (ipr_debug)
8722		dev_info(&pdev->dev, "Cannot enable MSI.\n");
8723
8724	/* Save away PCI config space for use following IOA reset */
8725	rc = pci_save_state(pdev);
8726
8727	if (rc != PCIBIOS_SUCCESSFUL) {
8728		dev_err(&pdev->dev, "Failed to save PCI config space\n");
8729		rc = -EIO;
8730		goto cleanup_nomem;
8731	}
8732
8733	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8734		goto cleanup_nomem;
8735
8736	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8737		goto cleanup_nomem;
8738
8739	if (ioa_cfg->sis64)
8740		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8741				+ ((sizeof(struct ipr_config_table_entry64)
8742				* ioa_cfg->max_devs_supported)));
8743	else
8744		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8745				+ ((sizeof(struct ipr_config_table_entry)
8746				* ioa_cfg->max_devs_supported)));
8747
8748	rc = ipr_alloc_mem(ioa_cfg);
8749	if (rc < 0) {
8750		dev_err(&pdev->dev,
8751			"Couldn't allocate enough memory for device driver!\n");
8752		goto cleanup_nomem;
8753	}
8754
8755	/*
8756	 * If HRRQ updated interrupt is not masked, or reset alert is set,
8757	 * the card is in an unknown state and needs a hard reset
8758	 */
8759	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8760	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8761	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
8762	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8763		ioa_cfg->needs_hard_reset = 1;
8764	if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8765		ioa_cfg->needs_hard_reset = 1;
8766	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8767		ioa_cfg->ioa_unit_checked = 1;
8768
8769	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8770	rc = request_irq(pdev->irq, ipr_isr,
8771			 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8772			 IPR_NAME, ioa_cfg);
8773
8774	if (rc) {
8775		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8776			pdev->irq, rc);
8777		goto cleanup_nolog;
8778	}
8779
8780	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8781	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8782		ioa_cfg->needs_warm_reset = 1;
8783		ioa_cfg->reset = ipr_reset_slot_reset;
8784	} else
8785		ioa_cfg->reset = ipr_reset_start_bist;
8786
8787	spin_lock(&ipr_driver_lock);
8788	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8789	spin_unlock(&ipr_driver_lock);
8790
8791	LEAVE;
8792out:
8793	return rc;
8794
8795cleanup_nolog:
8796	ipr_free_mem(ioa_cfg);
8797cleanup_nomem:
8798	iounmap(ipr_regs);
8799out_msi_disable:
8800	pci_disable_msi(pdev);
8801out_release_regions:
8802	pci_release_regions(pdev);
8803out_scsi_host_put:
8804	scsi_host_put(host);
8805out_disable:
8806	pci_disable_device(pdev);
8807	goto out;
8808}
8809
8810/**
8811 * ipr_scan_vsets - Scans for VSET devices
8812 * @ioa_cfg:	ioa config struct
8813 *
8814 * Description: Since the VSET resources do not follow SAM in that we can have
8815 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8816 *
8817 * Return value:
8818 * 	none
8819 **/
8820static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8821{
8822	int target, lun;
8823
8824	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8825		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8826			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8827}
8828
8829/**
8830 * ipr_initiate_ioa_bringdown - Bring down an adapter
8831 * @ioa_cfg:		ioa config struct
8832 * @shutdown_type:	shutdown type
8833 *
8834 * Description: This function will initiate bringing down the adapter.
8835 * This consists of issuing an IOA shutdown to the adapter
8836 * to flush the cache, and running BIST.
8837 * If the caller needs to wait on the completion of the reset,
8838 * the caller must sleep on the reset_wait_q.
8839 *
8840 * Return value:
8841 * 	none
8842 **/
8843static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8844				       enum ipr_shutdown_type shutdown_type)
8845{
8846	ENTER;
8847	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8848		ioa_cfg->sdt_state = ABORT_DUMP;
8849	ioa_cfg->reset_retries = 0;
8850	ioa_cfg->in_ioa_bringdown = 1;
8851	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8852	LEAVE;
8853}
8854
8855/**
8856 * __ipr_remove - Remove a single adapter
8857 * @pdev:	pci device struct
8858 *
8859 * Adapter hot plug remove entry point.
8860 *
8861 * Return value:
8862 * 	none
8863 **/
8864static void __ipr_remove(struct pci_dev *pdev)
8865{
8866	unsigned long host_lock_flags = 0;
8867	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8868	ENTER;
8869
8870	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8871	while(ioa_cfg->in_reset_reload) {
8872		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8873		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8874		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8875	}
8876
8877	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8878
8879	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8880	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8881	flush_work_sync(&ioa_cfg->work_q);
8882	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8883
8884	spin_lock(&ipr_driver_lock);
8885	list_del(&ioa_cfg->queue);
8886	spin_unlock(&ipr_driver_lock);
8887
8888	if (ioa_cfg->sdt_state == ABORT_DUMP)
8889		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8890	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8891
8892	ipr_free_all_resources(ioa_cfg);
8893
8894	LEAVE;
8895}
8896
8897/**
8898 * ipr_remove - IOA hot plug remove entry point
8899 * @pdev:	pci device struct
8900 *
8901 * Adapter hot plug remove entry point.
8902 *
8903 * Return value:
8904 * 	none
8905 **/
8906static void __devexit ipr_remove(struct pci_dev *pdev)
8907{
8908	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8909
8910	ENTER;
8911
8912	ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8913			      &ipr_trace_attr);
8914	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
8915			     &ipr_dump_attr);
8916	scsi_remove_host(ioa_cfg->host);
8917
8918	__ipr_remove(pdev);
8919
8920	LEAVE;
8921}
8922
8923/**
8924 * ipr_probe - Adapter hot plug add entry point
8925 *
8926 * Return value:
8927 * 	0 on success / non-zero on failure
8928 **/
8929static int __devinit ipr_probe(struct pci_dev *pdev,
8930			       const struct pci_device_id *dev_id)
8931{
8932	struct ipr_ioa_cfg *ioa_cfg;
8933	int rc;
8934
8935	rc = ipr_probe_ioa(pdev, dev_id);
8936
8937	if (rc)
8938		return rc;
8939
8940	ioa_cfg = pci_get_drvdata(pdev);
8941	rc = ipr_probe_ioa_part2(ioa_cfg);
8942
8943	if (rc) {
8944		__ipr_remove(pdev);
8945		return rc;
8946	}
8947
8948	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8949
8950	if (rc) {
8951		__ipr_remove(pdev);
8952		return rc;
8953	}
8954
8955	rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
8956				   &ipr_trace_attr);
8957
8958	if (rc) {
8959		scsi_remove_host(ioa_cfg->host);
8960		__ipr_remove(pdev);
8961		return rc;
8962	}
8963
8964	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
8965				   &ipr_dump_attr);
8966
8967	if (rc) {
8968		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8969				      &ipr_trace_attr);
8970		scsi_remove_host(ioa_cfg->host);
8971		__ipr_remove(pdev);
8972		return rc;
8973	}
8974
8975	scsi_scan_host(ioa_cfg->host);
8976	ipr_scan_vsets(ioa_cfg);
8977	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8978	ioa_cfg->allow_ml_add_del = 1;
8979	ioa_cfg->host->max_channel = IPR_VSET_BUS;
8980	schedule_work(&ioa_cfg->work_q);
8981	return 0;
8982}
8983
8984/**
8985 * ipr_shutdown - Shutdown handler.
8986 * @pdev:	pci device struct
8987 *
8988 * This function is invoked upon system shutdown/reboot. It will issue
8989 * an adapter shutdown to the adapter to flush the write cache.
8990 *
8991 * Return value:
8992 * 	none
8993 **/
8994static void ipr_shutdown(struct pci_dev *pdev)
8995{
8996	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8997	unsigned long lock_flags = 0;
8998
8999	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9000	while(ioa_cfg->in_reset_reload) {
9001		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9002		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9003		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
9004	}
9005
9006	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
9007	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
9008	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
9009}
9010
9011static struct pci_device_id ipr_pci_table[] __devinitdata = {
9012	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9013		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
9014	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9015		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
9016	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9017		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
9018	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
9019		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
9020	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9021		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
9022	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9023		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
9024	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9025		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
9026	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
9027		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
9028		IPR_USE_LONG_TRANSOP_TIMEOUT },
9029	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9030	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9031	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9032	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9033	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9034	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
9035	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9036	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9037	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9038	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
9039	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9040	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
9041	      IPR_USE_LONG_TRANSOP_TIMEOUT},
9042	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
9043	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
9044	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9045	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9046	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
9047	      IPR_USE_LONG_TRANSOP_TIMEOUT },
9048	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9049	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
9050	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9051	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CC, 0, 0, 0 },
9052	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
9053	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
9054	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
9055	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
9056		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
9057	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9058		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
9059	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9060		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
9061		IPR_USE_LONG_TRANSOP_TIMEOUT },
9062	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
9063		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
9064		IPR_USE_LONG_TRANSOP_TIMEOUT },
9065	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9066		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
9067	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9068		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
9069	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9070		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
9071	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
9072		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
9073	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
9074		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
9075	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
9076		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
9077	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
9078		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
9079	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
9080		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
9081	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
9082		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
9083	{ }
9084};
9085MODULE_DEVICE_TABLE(pci, ipr_pci_table);
9086
9087static struct pci_error_handlers ipr_err_handler = {
9088	.error_detected = ipr_pci_error_detected,
9089	.slot_reset = ipr_pci_slot_reset,
9090};
9091
9092static struct pci_driver ipr_driver = {
9093	.name = IPR_NAME,
9094	.id_table = ipr_pci_table,
9095	.probe = ipr_probe,
9096	.remove = __devexit_p(ipr_remove),
9097	.shutdown = ipr_shutdown,
9098	.err_handler = &ipr_err_handler,
9099};
9100
9101/**
9102 * ipr_halt_done - Shutdown prepare completion
9103 *
9104 * Return value:
9105 * 	none
9106 **/
9107static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
9108{
9109	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
9110
9111	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
9112}
9113
9114/**
9115 * ipr_halt - Issue shutdown prepare to all adapters
9116 *
9117 * Return value:
9118 * 	NOTIFY_OK on success / NOTIFY_DONE on failure
9119 **/
9120static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
9121{
9122	struct ipr_cmnd *ipr_cmd;
9123	struct ipr_ioa_cfg *ioa_cfg;
9124	unsigned long flags = 0;
9125
9126	if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
9127		return NOTIFY_DONE;
9128
9129	spin_lock(&ipr_driver_lock);
9130
9131	list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
9132		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
9133		if (!ioa_cfg->allow_cmds) {
9134			spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9135			continue;
9136		}
9137
9138		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
9139		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
9140		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
9141		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
9142		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
9143
9144		ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
9145		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
9146	}
9147	spin_unlock(&ipr_driver_lock);
9148
9149	return NOTIFY_OK;
9150}
9151
9152static struct notifier_block ipr_notifier = {
9153	ipr_halt, NULL, 0
9154};
9155
9156/**
9157 * ipr_init - Module entry point
9158 *
9159 * Return value:
9160 * 	0 on success / negative value on failure
9161 **/
9162static int __init ipr_init(void)
9163{
9164	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
9165		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
9166
9167	register_reboot_notifier(&ipr_notifier);
9168	return pci_register_driver(&ipr_driver);
9169}
9170
9171/**
9172 * ipr_exit - Module unload
9173 *
9174 * Module unload entry point.
9175 *
9176 * Return value:
9177 * 	none
9178 **/
9179static void __exit ipr_exit(void)
9180{
9181	unregister_reboot_notifier(&ipr_notifier);
9182	pci_unregister_driver(&ipr_driver);
9183}
9184
9185module_init(ipr_init);
9186module_exit(ipr_exit);
9187