ipr.c revision 96d21f00ab59c9f27fad191d12a2ccfeff3c9108
1/*
2 * ipr.c -- driver for IBM Power Linux RAID adapters
3 *
4 * Written By: Brian King <brking@us.ibm.com>, IBM Corporation
5 *
6 * Copyright (C) 2003, 2004 IBM Corporation
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
21 *
22 */
23
24/*
25 * Notes:
26 *
27 * This driver is used to control the following SCSI adapters:
28 *
29 * IBM iSeries: 5702, 5703, 2780, 5709, 570A, 570B
30 *
31 * IBM pSeries: PCI-X Dual Channel Ultra 320 SCSI RAID Adapter
32 *              PCI-X Dual Channel Ultra 320 SCSI Adapter
33 *              PCI-X Dual Channel Ultra 320 SCSI RAID Enablement Card
34 *              Embedded SCSI adapter on p615 and p655 systems
35 *
36 * Supported Hardware Features:
37 *	- Ultra 320 SCSI controller
38 *	- PCI-X host interface
39 *	- Embedded PowerPC RISC Processor and Hardware XOR DMA Engine
40 *	- Non-Volatile Write Cache
41 *	- Supports attachment of non-RAID disks, tape, and optical devices
42 *	- RAID Levels 0, 5, 10
43 *	- Hot spare
44 *	- Background Parity Checking
45 *	- Background Data Scrubbing
46 *	- Ability to increase the capacity of an existing RAID 5 disk array
47 *		by adding disks
48 *
49 * Driver Features:
50 *	- Tagged command queuing
51 *	- Adapter microcode download
52 *	- PCI hot plug
53 *	- SCSI device hot plug
54 *
55 */
56
57#include <linux/fs.h>
58#include <linux/init.h>
59#include <linux/types.h>
60#include <linux/errno.h>
61#include <linux/kernel.h>
62#include <linux/slab.h>
63#include <linux/ioport.h>
64#include <linux/delay.h>
65#include <linux/pci.h>
66#include <linux/wait.h>
67#include <linux/spinlock.h>
68#include <linux/sched.h>
69#include <linux/interrupt.h>
70#include <linux/blkdev.h>
71#include <linux/firmware.h>
72#include <linux/module.h>
73#include <linux/moduleparam.h>
74#include <linux/libata.h>
75#include <linux/hdreg.h>
76#include <linux/reboot.h>
77#include <linux/stringify.h>
78#include <asm/io.h>
79#include <asm/irq.h>
80#include <asm/processor.h>
81#include <scsi/scsi.h>
82#include <scsi/scsi_host.h>
83#include <scsi/scsi_tcq.h>
84#include <scsi/scsi_eh.h>
85#include <scsi/scsi_cmnd.h>
86#include "ipr.h"
87
88/*
89 *   Global Data
90 */
91static LIST_HEAD(ipr_ioa_head);
92static unsigned int ipr_log_level = IPR_DEFAULT_LOG_LEVEL;
93static unsigned int ipr_max_speed = 1;
94static int ipr_testmode = 0;
95static unsigned int ipr_fastfail = 0;
96static unsigned int ipr_transop_timeout = 0;
97static unsigned int ipr_debug = 0;
98static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
99static unsigned int ipr_dual_ioa_raid = 1;
100static DEFINE_SPINLOCK(ipr_driver_lock);
101
102/* This table describes the differences between DMA controller chips */
103static const struct ipr_chip_cfg_t ipr_chip_cfg[] = {
104	{ /* Gemstone, Citrine, Obsidian, and Obsidian-E */
105		.mailbox = 0x0042C,
106		.cache_line_size = 0x20,
107		{
108			.set_interrupt_mask_reg = 0x0022C,
109			.clr_interrupt_mask_reg = 0x00230,
110			.clr_interrupt_mask_reg32 = 0x00230,
111			.sense_interrupt_mask_reg = 0x0022C,
112			.sense_interrupt_mask_reg32 = 0x0022C,
113			.clr_interrupt_reg = 0x00228,
114			.clr_interrupt_reg32 = 0x00228,
115			.sense_interrupt_reg = 0x00224,
116			.sense_interrupt_reg32 = 0x00224,
117			.ioarrin_reg = 0x00404,
118			.sense_uproc_interrupt_reg = 0x00214,
119			.sense_uproc_interrupt_reg32 = 0x00214,
120			.set_uproc_interrupt_reg = 0x00214,
121			.set_uproc_interrupt_reg32 = 0x00214,
122			.clr_uproc_interrupt_reg = 0x00218,
123			.clr_uproc_interrupt_reg32 = 0x00218
124		}
125	},
126	{ /* Snipe and Scamp */
127		.mailbox = 0x0052C,
128		.cache_line_size = 0x20,
129		{
130			.set_interrupt_mask_reg = 0x00288,
131			.clr_interrupt_mask_reg = 0x0028C,
132			.clr_interrupt_mask_reg32 = 0x0028C,
133			.sense_interrupt_mask_reg = 0x00288,
134			.sense_interrupt_mask_reg32 = 0x00288,
135			.clr_interrupt_reg = 0x00284,
136			.clr_interrupt_reg32 = 0x00284,
137			.sense_interrupt_reg = 0x00280,
138			.sense_interrupt_reg32 = 0x00280,
139			.ioarrin_reg = 0x00504,
140			.sense_uproc_interrupt_reg = 0x00290,
141			.sense_uproc_interrupt_reg32 = 0x00290,
142			.set_uproc_interrupt_reg = 0x00290,
143			.set_uproc_interrupt_reg32 = 0x00290,
144			.clr_uproc_interrupt_reg = 0x00294,
145			.clr_uproc_interrupt_reg32 = 0x00294
146		}
147	},
148	{ /* CRoC */
149		.mailbox = 0x00040,
150		.cache_line_size = 0x20,
151		{
152			.set_interrupt_mask_reg = 0x00010,
153			.clr_interrupt_mask_reg = 0x00018,
154			.clr_interrupt_mask_reg32 = 0x0001C,
155			.sense_interrupt_mask_reg = 0x00010,
156			.sense_interrupt_mask_reg32 = 0x00014,
157			.clr_interrupt_reg = 0x00008,
158			.clr_interrupt_reg32 = 0x0000C,
159			.sense_interrupt_reg = 0x00000,
160			.sense_interrupt_reg32 = 0x00004,
161			.ioarrin_reg = 0x00070,
162			.sense_uproc_interrupt_reg = 0x00020,
163			.sense_uproc_interrupt_reg32 = 0x00024,
164			.set_uproc_interrupt_reg = 0x00020,
165			.set_uproc_interrupt_reg32 = 0x00024,
166			.clr_uproc_interrupt_reg = 0x00028,
167			.clr_uproc_interrupt_reg32 = 0x0002C,
168			.init_feedback_reg = 0x0005C,
169			.dump_addr_reg = 0x00064,
170			.dump_data_reg = 0x00068
171		}
172	},
173};
174
175static const struct ipr_chip_t ipr_chip[] = {
176	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
177	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
178	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
179	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
180	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
181	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
182	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
183	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] },
184	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }
185};
186
187static int ipr_max_bus_speeds [] = {
188	IPR_80MBs_SCSI_RATE, IPR_U160_SCSI_RATE, IPR_U320_SCSI_RATE
189};
190
191MODULE_AUTHOR("Brian King <brking@us.ibm.com>");
192MODULE_DESCRIPTION("IBM Power RAID SCSI Adapter Driver");
193module_param_named(max_speed, ipr_max_speed, uint, 0);
194MODULE_PARM_DESC(max_speed, "Maximum bus speed (0-2). Default: 1=U160. Speeds: 0=80 MB/s, 1=U160, 2=U320");
195module_param_named(log_level, ipr_log_level, uint, 0);
196MODULE_PARM_DESC(log_level, "Set to 0 - 4 for increasing verbosity of device driver");
197module_param_named(testmode, ipr_testmode, int, 0);
198MODULE_PARM_DESC(testmode, "DANGEROUS!!! Allows unsupported configurations");
199module_param_named(fastfail, ipr_fastfail, int, S_IRUGO | S_IWUSR);
200MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
201module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
202MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
203module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
204MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
205module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
206MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
207module_param_named(max_devs, ipr_max_devs, int, 0);
208MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
209		 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
210MODULE_LICENSE("GPL");
211MODULE_VERSION(IPR_DRIVER_VERSION);
212
213/*  A constant array of IOASCs/URCs/Error Messages */
214static const
215struct ipr_error_table_t ipr_error_table[] = {
216	{0x00000000, 1, IPR_DEFAULT_LOG_LEVEL,
217	"8155: An unknown error was received"},
218	{0x00330000, 0, 0,
219	"Soft underlength error"},
220	{0x005A0000, 0, 0,
221	"Command to be cancelled not found"},
222	{0x00808000, 0, 0,
223	"Qualified success"},
224	{0x01080000, 1, IPR_DEFAULT_LOG_LEVEL,
225	"FFFE: Soft device bus error recovered by the IOA"},
226	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
227	"4101: Soft device bus fabric error"},
228	{0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
229	"FFFC: Logical block guard error recovered by the device"},
230	{0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
231	"FFFC: Logical block reference tag error recovered by the device"},
232	{0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
233	"4171: Recovered scatter list tag / sequence number error"},
234	{0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
235	"FF3D: Recovered logical block CRC error on IOA to Host transfer"},
236	{0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
237	"4171: Recovered logical block sequence number error on IOA to Host transfer"},
238	{0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
239	"FFFD: Recovered logical block reference tag error detected by the IOA"},
240	{0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
241	"FFFD: Logical block guard error recovered by the IOA"},
242	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
243	"FFF9: Device sector reassign successful"},
244	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
245	"FFF7: Media error recovered by device rewrite procedures"},
246	{0x01180200, 0, IPR_DEFAULT_LOG_LEVEL,
247	"7001: IOA sector reassignment successful"},
248	{0x01180500, 0, IPR_DEFAULT_LOG_LEVEL,
249	"FFF9: Soft media error. Sector reassignment recommended"},
250	{0x01180600, 0, IPR_DEFAULT_LOG_LEVEL,
251	"FFF7: Media error recovered by IOA rewrite procedures"},
252	{0x01418000, 0, IPR_DEFAULT_LOG_LEVEL,
253	"FF3D: Soft PCI bus error recovered by the IOA"},
254	{0x01440000, 1, IPR_DEFAULT_LOG_LEVEL,
255	"FFF6: Device hardware error recovered by the IOA"},
256	{0x01448100, 0, IPR_DEFAULT_LOG_LEVEL,
257	"FFF6: Device hardware error recovered by the device"},
258	{0x01448200, 1, IPR_DEFAULT_LOG_LEVEL,
259	"FF3D: Soft IOA error recovered by the IOA"},
260	{0x01448300, 0, IPR_DEFAULT_LOG_LEVEL,
261	"FFFA: Undefined device response recovered by the IOA"},
262	{0x014A0000, 1, IPR_DEFAULT_LOG_LEVEL,
263	"FFF6: Device bus error, message or command phase"},
264	{0x014A8000, 0, IPR_DEFAULT_LOG_LEVEL,
265	"FFFE: Task Management Function failed"},
266	{0x015D0000, 0, IPR_DEFAULT_LOG_LEVEL,
267	"FFF6: Failure prediction threshold exceeded"},
268	{0x015D9200, 0, IPR_DEFAULT_LOG_LEVEL,
269	"8009: Impending cache battery pack failure"},
270	{0x02040400, 0, 0,
271	"34FF: Disk device format in progress"},
272	{0x02048000, 0, IPR_DEFAULT_LOG_LEVEL,
273	"9070: IOA requested reset"},
274	{0x023F0000, 0, 0,
275	"Synchronization required"},
276	{0x024E0000, 0, 0,
277	"No ready, IOA shutdown"},
278	{0x025A0000, 0, 0,
279	"Not ready, IOA has been shutdown"},
280	{0x02670100, 0, IPR_DEFAULT_LOG_LEVEL,
281	"3020: Storage subsystem configuration error"},
282	{0x03110B00, 0, 0,
283	"FFF5: Medium error, data unreadable, recommend reassign"},
284	{0x03110C00, 0, 0,
285	"7000: Medium error, data unreadable, do not reassign"},
286	{0x03310000, 0, IPR_DEFAULT_LOG_LEVEL,
287	"FFF3: Disk media format bad"},
288	{0x04050000, 0, IPR_DEFAULT_LOG_LEVEL,
289	"3002: Addressed device failed to respond to selection"},
290	{0x04080000, 1, IPR_DEFAULT_LOG_LEVEL,
291	"3100: Device bus error"},
292	{0x04080100, 0, IPR_DEFAULT_LOG_LEVEL,
293	"3109: IOA timed out a device command"},
294	{0x04088000, 0, 0,
295	"3120: SCSI bus is not operational"},
296	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
297	"4100: Hard device bus fabric error"},
298	{0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
299	"310C: Logical block guard error detected by the device"},
300	{0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
301	"310C: Logical block reference tag error detected by the device"},
302	{0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
303	"4170: Scatter list tag / sequence number error"},
304	{0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
305	"8150: Logical block CRC error on IOA to Host transfer"},
306	{0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
307	"4170: Logical block sequence number error on IOA to Host transfer"},
308	{0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
309	"310D: Logical block reference tag error detected by the IOA"},
310	{0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
311	"310D: Logical block guard error detected by the IOA"},
312	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
313	"9000: IOA reserved area data check"},
314	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
315	"9001: IOA reserved area invalid data pattern"},
316	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
317	"9002: IOA reserved area LRC error"},
318	{0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
319	"Hardware Error, IOA metadata access error"},
320	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
321	"102E: Out of alternate sectors for disk storage"},
322	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
323	"FFF4: Data transfer underlength error"},
324	{0x04338000, 1, IPR_DEFAULT_LOG_LEVEL,
325	"FFF4: Data transfer overlength error"},
326	{0x043E0100, 0, IPR_DEFAULT_LOG_LEVEL,
327	"3400: Logical unit failure"},
328	{0x04408500, 0, IPR_DEFAULT_LOG_LEVEL,
329	"FFF4: Device microcode is corrupt"},
330	{0x04418000, 1, IPR_DEFAULT_LOG_LEVEL,
331	"8150: PCI bus error"},
332	{0x04430000, 1, 0,
333	"Unsupported device bus message received"},
334	{0x04440000, 1, IPR_DEFAULT_LOG_LEVEL,
335	"FFF4: Disk device problem"},
336	{0x04448200, 1, IPR_DEFAULT_LOG_LEVEL,
337	"8150: Permanent IOA failure"},
338	{0x04448300, 0, IPR_DEFAULT_LOG_LEVEL,
339	"3010: Disk device returned wrong response to IOA"},
340	{0x04448400, 0, IPR_DEFAULT_LOG_LEVEL,
341	"8151: IOA microcode error"},
342	{0x04448500, 0, 0,
343	"Device bus status error"},
344	{0x04448600, 0, IPR_DEFAULT_LOG_LEVEL,
345	"8157: IOA error requiring IOA reset to recover"},
346	{0x04448700, 0, 0,
347	"ATA device status error"},
348	{0x04490000, 0, 0,
349	"Message reject received from the device"},
350	{0x04449200, 0, IPR_DEFAULT_LOG_LEVEL,
351	"8008: A permanent cache battery pack failure occurred"},
352	{0x0444A000, 0, IPR_DEFAULT_LOG_LEVEL,
353	"9090: Disk unit has been modified after the last known status"},
354	{0x0444A200, 0, IPR_DEFAULT_LOG_LEVEL,
355	"9081: IOA detected device error"},
356	{0x0444A300, 0, IPR_DEFAULT_LOG_LEVEL,
357	"9082: IOA detected device error"},
358	{0x044A0000, 1, IPR_DEFAULT_LOG_LEVEL,
359	"3110: Device bus error, message or command phase"},
360	{0x044A8000, 1, IPR_DEFAULT_LOG_LEVEL,
361	"3110: SAS Command / Task Management Function failed"},
362	{0x04670400, 0, IPR_DEFAULT_LOG_LEVEL,
363	"9091: Incorrect hardware configuration change has been detected"},
364	{0x04678000, 0, IPR_DEFAULT_LOG_LEVEL,
365	"9073: Invalid multi-adapter configuration"},
366	{0x04678100, 0, IPR_DEFAULT_LOG_LEVEL,
367	"4010: Incorrect connection between cascaded expanders"},
368	{0x04678200, 0, IPR_DEFAULT_LOG_LEVEL,
369	"4020: Connections exceed IOA design limits"},
370	{0x04678300, 0, IPR_DEFAULT_LOG_LEVEL,
371	"4030: Incorrect multipath connection"},
372	{0x04679000, 0, IPR_DEFAULT_LOG_LEVEL,
373	"4110: Unsupported enclosure function"},
374	{0x046E0000, 0, IPR_DEFAULT_LOG_LEVEL,
375	"FFF4: Command to logical unit failed"},
376	{0x05240000, 1, 0,
377	"Illegal request, invalid request type or request packet"},
378	{0x05250000, 0, 0,
379	"Illegal request, invalid resource handle"},
380	{0x05258000, 0, 0,
381	"Illegal request, commands not allowed to this device"},
382	{0x05258100, 0, 0,
383	"Illegal request, command not allowed to a secondary adapter"},
384	{0x05258200, 0, 0,
385	"Illegal request, command not allowed to a non-optimized resource"},
386	{0x05260000, 0, 0,
387	"Illegal request, invalid field in parameter list"},
388	{0x05260100, 0, 0,
389	"Illegal request, parameter not supported"},
390	{0x05260200, 0, 0,
391	"Illegal request, parameter value invalid"},
392	{0x052C0000, 0, 0,
393	"Illegal request, command sequence error"},
394	{0x052C8000, 1, 0,
395	"Illegal request, dual adapter support not enabled"},
396	{0x06040500, 0, IPR_DEFAULT_LOG_LEVEL,
397	"9031: Array protection temporarily suspended, protection resuming"},
398	{0x06040600, 0, IPR_DEFAULT_LOG_LEVEL,
399	"9040: Array protection temporarily suspended, protection resuming"},
400	{0x06288000, 0, IPR_DEFAULT_LOG_LEVEL,
401	"3140: Device bus not ready to ready transition"},
402	{0x06290000, 0, IPR_DEFAULT_LOG_LEVEL,
403	"FFFB: SCSI bus was reset"},
404	{0x06290500, 0, 0,
405	"FFFE: SCSI bus transition to single ended"},
406	{0x06290600, 0, 0,
407	"FFFE: SCSI bus transition to LVD"},
408	{0x06298000, 0, IPR_DEFAULT_LOG_LEVEL,
409	"FFFB: SCSI bus was reset by another initiator"},
410	{0x063F0300, 0, IPR_DEFAULT_LOG_LEVEL,
411	"3029: A device replacement has occurred"},
412	{0x064C8000, 0, IPR_DEFAULT_LOG_LEVEL,
413	"9051: IOA cache data exists for a missing or failed device"},
414	{0x064C8100, 0, IPR_DEFAULT_LOG_LEVEL,
415	"9055: Auxiliary cache IOA contains cache data needed by the primary IOA"},
416	{0x06670100, 0, IPR_DEFAULT_LOG_LEVEL,
417	"9025: Disk unit is not supported at its physical location"},
418	{0x06670600, 0, IPR_DEFAULT_LOG_LEVEL,
419	"3020: IOA detected a SCSI bus configuration error"},
420	{0x06678000, 0, IPR_DEFAULT_LOG_LEVEL,
421	"3150: SCSI bus configuration error"},
422	{0x06678100, 0, IPR_DEFAULT_LOG_LEVEL,
423	"9074: Asymmetric advanced function disk configuration"},
424	{0x06678300, 0, IPR_DEFAULT_LOG_LEVEL,
425	"4040: Incomplete multipath connection between IOA and enclosure"},
426	{0x06678400, 0, IPR_DEFAULT_LOG_LEVEL,
427	"4041: Incomplete multipath connection between enclosure and device"},
428	{0x06678500, 0, IPR_DEFAULT_LOG_LEVEL,
429	"9075: Incomplete multipath connection between IOA and remote IOA"},
430	{0x06678600, 0, IPR_DEFAULT_LOG_LEVEL,
431	"9076: Configuration error, missing remote IOA"},
432	{0x06679100, 0, IPR_DEFAULT_LOG_LEVEL,
433	"4050: Enclosure does not support a required multipath function"},
434	{0x06690000, 0, IPR_DEFAULT_LOG_LEVEL,
435	"4070: Logically bad block written on device"},
436	{0x06690200, 0, IPR_DEFAULT_LOG_LEVEL,
437	"9041: Array protection temporarily suspended"},
438	{0x06698200, 0, IPR_DEFAULT_LOG_LEVEL,
439	"9042: Corrupt array parity detected on specified device"},
440	{0x066B0200, 0, IPR_DEFAULT_LOG_LEVEL,
441	"9030: Array no longer protected due to missing or failed disk unit"},
442	{0x066B8000, 0, IPR_DEFAULT_LOG_LEVEL,
443	"9071: Link operational transition"},
444	{0x066B8100, 0, IPR_DEFAULT_LOG_LEVEL,
445	"9072: Link not operational transition"},
446	{0x066B8200, 0, IPR_DEFAULT_LOG_LEVEL,
447	"9032: Array exposed but still protected"},
448	{0x066B8300, 0, IPR_DEFAULT_LOG_LEVEL + 1,
449	"70DD: Device forced failed by disrupt device command"},
450	{0x066B9100, 0, IPR_DEFAULT_LOG_LEVEL,
451	"4061: Multipath redundancy level got better"},
452	{0x066B9200, 0, IPR_DEFAULT_LOG_LEVEL,
453	"4060: Multipath redundancy level got worse"},
454	{0x07270000, 0, 0,
455	"Failure due to other device"},
456	{0x07278000, 0, IPR_DEFAULT_LOG_LEVEL,
457	"9008: IOA does not support functions expected by devices"},
458	{0x07278100, 0, IPR_DEFAULT_LOG_LEVEL,
459	"9010: Cache data associated with attached devices cannot be found"},
460	{0x07278200, 0, IPR_DEFAULT_LOG_LEVEL,
461	"9011: Cache data belongs to devices other than those attached"},
462	{0x07278400, 0, IPR_DEFAULT_LOG_LEVEL,
463	"9020: Array missing 2 or more devices with only 1 device present"},
464	{0x07278500, 0, IPR_DEFAULT_LOG_LEVEL,
465	"9021: Array missing 2 or more devices with 2 or more devices present"},
466	{0x07278600, 0, IPR_DEFAULT_LOG_LEVEL,
467	"9022: Exposed array is missing a required device"},
468	{0x07278700, 0, IPR_DEFAULT_LOG_LEVEL,
469	"9023: Array member(s) not at required physical locations"},
470	{0x07278800, 0, IPR_DEFAULT_LOG_LEVEL,
471	"9024: Array not functional due to present hardware configuration"},
472	{0x07278900, 0, IPR_DEFAULT_LOG_LEVEL,
473	"9026: Array not functional due to present hardware configuration"},
474	{0x07278A00, 0, IPR_DEFAULT_LOG_LEVEL,
475	"9027: Array is missing a device and parity is out of sync"},
476	{0x07278B00, 0, IPR_DEFAULT_LOG_LEVEL,
477	"9028: Maximum number of arrays already exist"},
478	{0x07278C00, 0, IPR_DEFAULT_LOG_LEVEL,
479	"9050: Required cache data cannot be located for a disk unit"},
480	{0x07278D00, 0, IPR_DEFAULT_LOG_LEVEL,
481	"9052: Cache data exists for a device that has been modified"},
482	{0x07278F00, 0, IPR_DEFAULT_LOG_LEVEL,
483	"9054: IOA resources not available due to previous problems"},
484	{0x07279100, 0, IPR_DEFAULT_LOG_LEVEL,
485	"9092: Disk unit requires initialization before use"},
486	{0x07279200, 0, IPR_DEFAULT_LOG_LEVEL,
487	"9029: Incorrect hardware configuration change has been detected"},
488	{0x07279600, 0, IPR_DEFAULT_LOG_LEVEL,
489	"9060: One or more disk pairs are missing from an array"},
490	{0x07279700, 0, IPR_DEFAULT_LOG_LEVEL,
491	"9061: One or more disks are missing from an array"},
492	{0x07279800, 0, IPR_DEFAULT_LOG_LEVEL,
493	"9062: One or more disks are missing from an array"},
494	{0x07279900, 0, IPR_DEFAULT_LOG_LEVEL,
495	"9063: Maximum number of functional arrays has been exceeded"},
496	{0x0B260000, 0, 0,
497	"Aborted command, invalid descriptor"},
498	{0x0B5A0000, 0, 0,
499	"Command terminated by host"}
500};
501
502static const struct ipr_ses_table_entry ipr_ses_table[] = {
503	{ "2104-DL1        ", "XXXXXXXXXXXXXXXX", 80 },
504	{ "2104-TL1        ", "XXXXXXXXXXXXXXXX", 80 },
505	{ "HSBP07M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 7 slot */
506	{ "HSBP05M P U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Hidive 5 slot */
507	{ "HSBP05M S U2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* Bowtie */
508	{ "HSBP06E ASU2SCSI", "XXXXXXXXXXXXXXXX", 80 }, /* MartinFenning */
509	{ "2104-DU3        ", "XXXXXXXXXXXXXXXX", 160 },
510	{ "2104-TU3        ", "XXXXXXXXXXXXXXXX", 160 },
511	{ "HSBP04C RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
512	{ "HSBP06E RSU2SCSI", "XXXXXXX*XXXXXXXX", 160 },
513	{ "St  V1S2        ", "XXXXXXXXXXXXXXXX", 160 },
514	{ "HSBPD4M  PU3SCSI", "XXXXXXX*XXXXXXXX", 160 },
515	{ "VSBPD1H   U3SCSI", "XXXXXXX*XXXXXXXX", 160 }
516};
517
518/*
519 *  Function Prototypes
520 */
521static int ipr_reset_alert(struct ipr_cmnd *);
522static void ipr_process_ccn(struct ipr_cmnd *);
523static void ipr_process_error(struct ipr_cmnd *);
524static void ipr_reset_ioa_job(struct ipr_cmnd *);
525static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *,
526				   enum ipr_shutdown_type);
527
528#ifdef CONFIG_SCSI_IPR_TRACE
529/**
530 * ipr_trc_hook - Add a trace entry to the driver trace
531 * @ipr_cmd:	ipr command struct
532 * @type:		trace type
533 * @add_data:	additional data
534 *
535 * Return value:
536 * 	none
537 **/
538static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
539			 u8 type, u32 add_data)
540{
541	struct ipr_trace_entry *trace_entry;
542	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
543
544	trace_entry = &ioa_cfg->trace[ioa_cfg->trace_index++];
545	trace_entry->time = jiffies;
546	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
547	trace_entry->type = type;
548	if (ipr_cmd->ioa_cfg->sis64)
549		trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
550	else
551		trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
552	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
553	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
554	trace_entry->u.add_data = add_data;
555}
556#else
557#define ipr_trc_hook(ipr_cmd, type, add_data) do { } while(0)
558#endif
559
560/**
561 * ipr_reinit_ipr_cmnd - Re-initialize an IPR Cmnd block for reuse
562 * @ipr_cmd:	ipr command struct
563 *
564 * Return value:
565 * 	none
566 **/
567static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
568{
569	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
570	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
571	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
572	dma_addr_t dma_addr = ipr_cmd->dma_addr;
573
574	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
575	ioarcb->data_transfer_length = 0;
576	ioarcb->read_data_transfer_length = 0;
577	ioarcb->ioadl_len = 0;
578	ioarcb->read_ioadl_len = 0;
579
580	if (ipr_cmd->ioa_cfg->sis64) {
581		ioarcb->u.sis64_addr_data.data_ioadl_addr =
582			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
583		ioasa64->u.gata.status = 0;
584	} else {
585		ioarcb->write_ioadl_addr =
586			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
587		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
588		ioasa->u.gata.status = 0;
589	}
590
591	ioasa->hdr.ioasc = 0;
592	ioasa->hdr.residual_data_len = 0;
593	ipr_cmd->scsi_cmd = NULL;
594	ipr_cmd->qc = NULL;
595	ipr_cmd->sense_buffer[0] = 0;
596	ipr_cmd->dma_use_sg = 0;
597}
598
599/**
600 * ipr_init_ipr_cmnd - Initialize an IPR Cmnd block
601 * @ipr_cmd:	ipr command struct
602 *
603 * Return value:
604 * 	none
605 **/
606static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
607{
608	ipr_reinit_ipr_cmnd(ipr_cmd);
609	ipr_cmd->u.scratch = 0;
610	ipr_cmd->sibling = NULL;
611	init_timer(&ipr_cmd->timer);
612}
613
614/**
615 * ipr_get_free_ipr_cmnd - Get a free IPR Cmnd block
616 * @ioa_cfg:	ioa config struct
617 *
618 * Return value:
619 * 	pointer to ipr command struct
620 **/
621static
622struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
623{
624	struct ipr_cmnd *ipr_cmd;
625
626	ipr_cmd = list_entry(ioa_cfg->free_q.next, struct ipr_cmnd, queue);
627	list_del(&ipr_cmd->queue);
628	ipr_init_ipr_cmnd(ipr_cmd);
629
630	return ipr_cmd;
631}
632
633/**
634 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
635 * @ioa_cfg:	ioa config struct
636 * @clr_ints:     interrupts to clear
637 *
638 * This function masks all interrupts on the adapter, then clears the
639 * interrupts specified in the mask
640 *
641 * Return value:
642 * 	none
643 **/
644static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
645					  u32 clr_ints)
646{
647	volatile u32 int_reg;
648
649	/* Stop new interrupts */
650	ioa_cfg->allow_interrupts = 0;
651
652	/* Set interrupt mask to stop all new interrupts */
653	if (ioa_cfg->sis64)
654		writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
655	else
656		writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
657
658	/* Clear any pending interrupts */
659	if (ioa_cfg->sis64)
660		writel(~0, ioa_cfg->regs.clr_interrupt_reg);
661	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
662	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
663}
664
665/**
666 * ipr_save_pcix_cmd_reg - Save PCI-X command register
667 * @ioa_cfg:	ioa config struct
668 *
669 * Return value:
670 * 	0 on success / -EIO on failure
671 **/
672static int ipr_save_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
673{
674	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
675
676	if (pcix_cmd_reg == 0)
677		return 0;
678
679	if (pci_read_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
680				 &ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
681		dev_err(&ioa_cfg->pdev->dev, "Failed to save PCI-X command register\n");
682		return -EIO;
683	}
684
685	ioa_cfg->saved_pcix_cmd_reg |= PCI_X_CMD_DPERR_E | PCI_X_CMD_ERO;
686	return 0;
687}
688
689/**
690 * ipr_set_pcix_cmd_reg - Setup PCI-X command register
691 * @ioa_cfg:	ioa config struct
692 *
693 * Return value:
694 * 	0 on success / -EIO on failure
695 **/
696static int ipr_set_pcix_cmd_reg(struct ipr_ioa_cfg *ioa_cfg)
697{
698	int pcix_cmd_reg = pci_find_capability(ioa_cfg->pdev, PCI_CAP_ID_PCIX);
699
700	if (pcix_cmd_reg) {
701		if (pci_write_config_word(ioa_cfg->pdev, pcix_cmd_reg + PCI_X_CMD,
702					  ioa_cfg->saved_pcix_cmd_reg) != PCIBIOS_SUCCESSFUL) {
703			dev_err(&ioa_cfg->pdev->dev, "Failed to setup PCI-X command register\n");
704			return -EIO;
705		}
706	}
707
708	return 0;
709}
710
711/**
712 * ipr_sata_eh_done - done function for aborted SATA commands
713 * @ipr_cmd:	ipr command struct
714 *
715 * This function is invoked for ops generated to SATA
716 * devices which are being aborted.
717 *
718 * Return value:
719 * 	none
720 **/
721static void ipr_sata_eh_done(struct ipr_cmnd *ipr_cmd)
722{
723	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
724	struct ata_queued_cmd *qc = ipr_cmd->qc;
725	struct ipr_sata_port *sata_port = qc->ap->private_data;
726
727	qc->err_mask |= AC_ERR_OTHER;
728	sata_port->ioasa.status |= ATA_BUSY;
729	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
730	ata_qc_complete(qc);
731}
732
733/**
734 * ipr_scsi_eh_done - mid-layer done function for aborted ops
735 * @ipr_cmd:	ipr command struct
736 *
737 * This function is invoked by the interrupt handler for
738 * ops generated by the SCSI mid-layer which are being aborted.
739 *
740 * Return value:
741 * 	none
742 **/
743static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
744{
745	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
746	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
747
748	scsi_cmd->result |= (DID_ERROR << 16);
749
750	scsi_dma_unmap(ipr_cmd->scsi_cmd);
751	scsi_cmd->scsi_done(scsi_cmd);
752	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
753}
754
755/**
756 * ipr_fail_all_ops - Fails all outstanding ops.
757 * @ioa_cfg:	ioa config struct
758 *
759 * This function fails all outstanding ops.
760 *
761 * Return value:
762 * 	none
763 **/
764static void ipr_fail_all_ops(struct ipr_ioa_cfg *ioa_cfg)
765{
766	struct ipr_cmnd *ipr_cmd, *temp;
767
768	ENTER;
769	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
770		list_del(&ipr_cmd->queue);
771
772		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
773		ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
774
775		if (ipr_cmd->scsi_cmd)
776			ipr_cmd->done = ipr_scsi_eh_done;
777		else if (ipr_cmd->qc)
778			ipr_cmd->done = ipr_sata_eh_done;
779
780		ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, IPR_IOASC_IOA_WAS_RESET);
781		del_timer(&ipr_cmd->timer);
782		ipr_cmd->done(ipr_cmd);
783	}
784
785	LEAVE;
786}
787
788/**
789 * ipr_send_command -  Send driver initiated requests.
790 * @ipr_cmd:		ipr command struct
791 *
792 * This function sends a command to the adapter using the correct write call.
793 * In the case of sis64, calculate the ioarcb size required. Then or in the
794 * appropriate bits.
795 *
796 * Return value:
797 * 	none
798 **/
799static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
800{
801	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
802	dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
803
804	if (ioa_cfg->sis64) {
805		/* The default size is 256 bytes */
806		send_dma_addr |= 0x1;
807
808		/* If the number of ioadls * size of ioadl > 128 bytes,
809		   then use a 512 byte ioarcb */
810		if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
811			send_dma_addr |= 0x4;
812		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
813	} else
814		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
815}
816
817/**
818 * ipr_do_req -  Send driver initiated requests.
819 * @ipr_cmd:		ipr command struct
820 * @done:			done function
821 * @timeout_func:	timeout function
822 * @timeout:		timeout value
823 *
824 * This function sends the specified command to the adapter with the
825 * timeout given. The done function is invoked on command completion.
826 *
827 * Return value:
828 * 	none
829 **/
830static void ipr_do_req(struct ipr_cmnd *ipr_cmd,
831		       void (*done) (struct ipr_cmnd *),
832		       void (*timeout_func) (struct ipr_cmnd *), u32 timeout)
833{
834	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
835
836	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
837
838	ipr_cmd->done = done;
839
840	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
841	ipr_cmd->timer.expires = jiffies + timeout;
842	ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func;
843
844	add_timer(&ipr_cmd->timer);
845
846	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
847
848	mb();
849
850	ipr_send_command(ipr_cmd);
851}
852
853/**
854 * ipr_internal_cmd_done - Op done function for an internally generated op.
855 * @ipr_cmd:	ipr command struct
856 *
857 * This function is the op done function for an internally generated,
858 * blocking op. It simply wakes the sleeping thread.
859 *
860 * Return value:
861 * 	none
862 **/
863static void ipr_internal_cmd_done(struct ipr_cmnd *ipr_cmd)
864{
865	if (ipr_cmd->sibling)
866		ipr_cmd->sibling = NULL;
867	else
868		complete(&ipr_cmd->completion);
869}
870
871/**
872 * ipr_init_ioadl - initialize the ioadl for the correct SIS type
873 * @ipr_cmd:	ipr command struct
874 * @dma_addr:	dma address
875 * @len:	transfer length
876 * @flags:	ioadl flag value
877 *
878 * This function initializes an ioadl in the case where there is only a single
879 * descriptor.
880 *
881 * Return value:
882 * 	nothing
883 **/
884static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
885			   u32 len, int flags)
886{
887	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
888	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
889
890	ipr_cmd->dma_use_sg = 1;
891
892	if (ipr_cmd->ioa_cfg->sis64) {
893		ioadl64->flags = cpu_to_be32(flags);
894		ioadl64->data_len = cpu_to_be32(len);
895		ioadl64->address = cpu_to_be64(dma_addr);
896
897		ipr_cmd->ioarcb.ioadl_len =
898		       	cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
899		ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
900	} else {
901		ioadl->flags_and_data_len = cpu_to_be32(flags | len);
902		ioadl->address = cpu_to_be32(dma_addr);
903
904		if (flags == IPR_IOADL_FLAGS_READ_LAST) {
905			ipr_cmd->ioarcb.read_ioadl_len =
906				cpu_to_be32(sizeof(struct ipr_ioadl_desc));
907			ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
908		} else {
909			ipr_cmd->ioarcb.ioadl_len =
910			       	cpu_to_be32(sizeof(struct ipr_ioadl_desc));
911			ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
912		}
913	}
914}
915
916/**
917 * ipr_send_blocking_cmd - Send command and sleep on its completion.
918 * @ipr_cmd:	ipr command struct
919 * @timeout_func:	function to invoke if command times out
920 * @timeout:	timeout
921 *
922 * Return value:
923 * 	none
924 **/
925static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
926				  void (*timeout_func) (struct ipr_cmnd *ipr_cmd),
927				  u32 timeout)
928{
929	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
930
931	init_completion(&ipr_cmd->completion);
932	ipr_do_req(ipr_cmd, ipr_internal_cmd_done, timeout_func, timeout);
933
934	spin_unlock_irq(ioa_cfg->host->host_lock);
935	wait_for_completion(&ipr_cmd->completion);
936	spin_lock_irq(ioa_cfg->host->host_lock);
937}
938
939/**
940 * ipr_send_hcam - Send an HCAM to the adapter.
941 * @ioa_cfg:	ioa config struct
942 * @type:		HCAM type
943 * @hostrcb:	hostrcb struct
944 *
945 * This function will send a Host Controlled Async command to the adapter.
946 * If HCAMs are currently not allowed to be issued to the adapter, it will
947 * place the hostrcb on the free queue.
948 *
949 * Return value:
950 * 	none
951 **/
952static void ipr_send_hcam(struct ipr_ioa_cfg *ioa_cfg, u8 type,
953			  struct ipr_hostrcb *hostrcb)
954{
955	struct ipr_cmnd *ipr_cmd;
956	struct ipr_ioarcb *ioarcb;
957
958	if (ioa_cfg->allow_cmds) {
959		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
960		list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
961		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_pending_q);
962
963		ipr_cmd->u.hostrcb = hostrcb;
964		ioarcb = &ipr_cmd->ioarcb;
965
966		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
967		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_HCAM;
968		ioarcb->cmd_pkt.cdb[0] = IPR_HOST_CONTROLLED_ASYNC;
969		ioarcb->cmd_pkt.cdb[1] = type;
970		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
971		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
972
973		ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
974			       sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
975
976		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
977			ipr_cmd->done = ipr_process_ccn;
978		else
979			ipr_cmd->done = ipr_process_error;
980
981		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
982
983		mb();
984
985		ipr_send_command(ipr_cmd);
986	} else {
987		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
988	}
989}
990
991/**
992 * ipr_update_ata_class - Update the ata class in the resource entry
993 * @res:	resource entry struct
994 * @proto:	cfgte device bus protocol value
995 *
996 * Return value:
997 * 	none
998 **/
999static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
1000{
1001	switch(proto) {
1002	case IPR_PROTO_SATA:
1003	case IPR_PROTO_SAS_STP:
1004		res->ata_class = ATA_DEV_ATA;
1005		break;
1006	case IPR_PROTO_SATA_ATAPI:
1007	case IPR_PROTO_SAS_STP_ATAPI:
1008		res->ata_class = ATA_DEV_ATAPI;
1009		break;
1010	default:
1011		res->ata_class = ATA_DEV_UNKNOWN;
1012		break;
1013	};
1014}
1015
1016/**
1017 * ipr_init_res_entry - Initialize a resource entry struct.
1018 * @res:	resource entry struct
1019 * @cfgtew:	config table entry wrapper struct
1020 *
1021 * Return value:
1022 * 	none
1023 **/
1024static void ipr_init_res_entry(struct ipr_resource_entry *res,
1025			       struct ipr_config_table_entry_wrapper *cfgtew)
1026{
1027	int found = 0;
1028	unsigned int proto;
1029	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1030	struct ipr_resource_entry *gscsi_res = NULL;
1031
1032	res->needs_sync_complete = 0;
1033	res->in_erp = 0;
1034	res->add_to_ml = 0;
1035	res->del_from_ml = 0;
1036	res->resetting_device = 0;
1037	res->sdev = NULL;
1038	res->sata_port = NULL;
1039
1040	if (ioa_cfg->sis64) {
1041		proto = cfgtew->u.cfgte64->proto;
1042		res->res_flags = cfgtew->u.cfgte64->res_flags;
1043		res->qmodel = IPR_QUEUEING_MODEL64(res);
1044		res->type = cfgtew->u.cfgte64->res_type;
1045
1046		memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1047			sizeof(res->res_path));
1048
1049		res->bus = 0;
1050		res->lun = scsilun_to_int(&res->dev_lun);
1051
1052		if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1053			list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
1054				if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
1055					found = 1;
1056					res->target = gscsi_res->target;
1057					break;
1058				}
1059			}
1060			if (!found) {
1061				res->target = find_first_zero_bit(ioa_cfg->target_ids,
1062								  ioa_cfg->max_devs_supported);
1063				set_bit(res->target, ioa_cfg->target_ids);
1064			}
1065
1066			memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1067				sizeof(res->dev_lun.scsi_lun));
1068		} else if (res->type == IPR_RES_TYPE_IOAFP) {
1069			res->bus = IPR_IOAFP_VIRTUAL_BUS;
1070			res->target = 0;
1071		} else if (res->type == IPR_RES_TYPE_ARRAY) {
1072			res->bus = IPR_ARRAY_VIRTUAL_BUS;
1073			res->target = find_first_zero_bit(ioa_cfg->array_ids,
1074							  ioa_cfg->max_devs_supported);
1075			set_bit(res->target, ioa_cfg->array_ids);
1076		} else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
1077			res->bus = IPR_VSET_VIRTUAL_BUS;
1078			res->target = find_first_zero_bit(ioa_cfg->vset_ids,
1079							  ioa_cfg->max_devs_supported);
1080			set_bit(res->target, ioa_cfg->vset_ids);
1081		} else {
1082			res->target = find_first_zero_bit(ioa_cfg->target_ids,
1083							  ioa_cfg->max_devs_supported);
1084			set_bit(res->target, ioa_cfg->target_ids);
1085		}
1086	} else {
1087		proto = cfgtew->u.cfgte->proto;
1088		res->qmodel = IPR_QUEUEING_MODEL(res);
1089		res->flags = cfgtew->u.cfgte->flags;
1090		if (res->flags & IPR_IS_IOA_RESOURCE)
1091			res->type = IPR_RES_TYPE_IOAFP;
1092		else
1093			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1094
1095		res->bus = cfgtew->u.cfgte->res_addr.bus;
1096		res->target = cfgtew->u.cfgte->res_addr.target;
1097		res->lun = cfgtew->u.cfgte->res_addr.lun;
1098	}
1099
1100	ipr_update_ata_class(res, proto);
1101}
1102
1103/**
1104 * ipr_is_same_device - Determine if two devices are the same.
1105 * @res:	resource entry struct
1106 * @cfgtew:	config table entry wrapper struct
1107 *
1108 * Return value:
1109 * 	1 if the devices are the same / 0 otherwise
1110 **/
1111static int ipr_is_same_device(struct ipr_resource_entry *res,
1112			      struct ipr_config_table_entry_wrapper *cfgtew)
1113{
1114	if (res->ioa_cfg->sis64) {
1115		if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
1116					sizeof(cfgtew->u.cfgte64->dev_id)) &&
1117			!memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
1118					sizeof(cfgtew->u.cfgte64->lun))) {
1119			return 1;
1120		}
1121	} else {
1122		if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
1123		    res->target == cfgtew->u.cfgte->res_addr.target &&
1124		    res->lun == cfgtew->u.cfgte->res_addr.lun)
1125			return 1;
1126	}
1127
1128	return 0;
1129}
1130
1131/**
1132 * ipr_format_resource_path - Format the resource path for printing.
1133 * @res_path:	resource path
1134 * @buf:	buffer
1135 *
1136 * Return value:
1137 * 	pointer to buffer
1138 **/
1139static char *ipr_format_resource_path(u8 *res_path, char *buffer)
1140{
1141	int i;
1142
1143	sprintf(buffer, "%02X", res_path[0]);
1144	for (i=1; res_path[i] != 0xff; i++)
1145		sprintf(buffer, "%s-%02X", buffer, res_path[i]);
1146
1147	return buffer;
1148}
1149
1150/**
1151 * ipr_update_res_entry - Update the resource entry.
1152 * @res:	resource entry struct
1153 * @cfgtew:	config table entry wrapper struct
1154 *
1155 * Return value:
1156 *      none
1157 **/
1158static void ipr_update_res_entry(struct ipr_resource_entry *res,
1159				 struct ipr_config_table_entry_wrapper *cfgtew)
1160{
1161	char buffer[IPR_MAX_RES_PATH_LENGTH];
1162	unsigned int proto;
1163	int new_path = 0;
1164
1165	if (res->ioa_cfg->sis64) {
1166		res->flags = cfgtew->u.cfgte64->flags;
1167		res->res_flags = cfgtew->u.cfgte64->res_flags;
1168		res->type = cfgtew->u.cfgte64->res_type & 0x0f;
1169
1170		memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
1171			sizeof(struct ipr_std_inq_data));
1172
1173		res->qmodel = IPR_QUEUEING_MODEL64(res);
1174		proto = cfgtew->u.cfgte64->proto;
1175		res->res_handle = cfgtew->u.cfgte64->res_handle;
1176		res->dev_id = cfgtew->u.cfgte64->dev_id;
1177
1178		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
1179			sizeof(res->dev_lun.scsi_lun));
1180
1181		if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
1182					sizeof(res->res_path))) {
1183			memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
1184				sizeof(res->res_path));
1185			new_path = 1;
1186		}
1187
1188		if (res->sdev && new_path)
1189			sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
1190				    ipr_format_resource_path(&res->res_path[0], &buffer[0]));
1191	} else {
1192		res->flags = cfgtew->u.cfgte->flags;
1193		if (res->flags & IPR_IS_IOA_RESOURCE)
1194			res->type = IPR_RES_TYPE_IOAFP;
1195		else
1196			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
1197
1198		memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
1199			sizeof(struct ipr_std_inq_data));
1200
1201		res->qmodel = IPR_QUEUEING_MODEL(res);
1202		proto = cfgtew->u.cfgte->proto;
1203		res->res_handle = cfgtew->u.cfgte->res_handle;
1204	}
1205
1206	ipr_update_ata_class(res, proto);
1207}
1208
1209/**
1210 * ipr_clear_res_target - Clear the bit in the bit map representing the target
1211 * 			  for the resource.
1212 * @res:	resource entry struct
1213 * @cfgtew:	config table entry wrapper struct
1214 *
1215 * Return value:
1216 *      none
1217 **/
1218static void ipr_clear_res_target(struct ipr_resource_entry *res)
1219{
1220	struct ipr_resource_entry *gscsi_res = NULL;
1221	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
1222
1223	if (!ioa_cfg->sis64)
1224		return;
1225
1226	if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
1227		clear_bit(res->target, ioa_cfg->array_ids);
1228	else if (res->bus == IPR_VSET_VIRTUAL_BUS)
1229		clear_bit(res->target, ioa_cfg->vset_ids);
1230	else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
1231		list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
1232			if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
1233				return;
1234		clear_bit(res->target, ioa_cfg->target_ids);
1235
1236	} else if (res->bus == 0)
1237		clear_bit(res->target, ioa_cfg->target_ids);
1238}
1239
1240/**
1241 * ipr_handle_config_change - Handle a config change from the adapter
1242 * @ioa_cfg:	ioa config struct
1243 * @hostrcb:	hostrcb
1244 *
1245 * Return value:
1246 * 	none
1247 **/
1248static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
1249				     struct ipr_hostrcb *hostrcb)
1250{
1251	struct ipr_resource_entry *res = NULL;
1252	struct ipr_config_table_entry_wrapper cfgtew;
1253	__be32 cc_res_handle;
1254
1255	u32 is_ndn = 1;
1256
1257	if (ioa_cfg->sis64) {
1258		cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
1259		cc_res_handle = cfgtew.u.cfgte64->res_handle;
1260	} else {
1261		cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
1262		cc_res_handle = cfgtew.u.cfgte->res_handle;
1263	}
1264
1265	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
1266		if (res->res_handle == cc_res_handle) {
1267			is_ndn = 0;
1268			break;
1269		}
1270	}
1271
1272	if (is_ndn) {
1273		if (list_empty(&ioa_cfg->free_res_q)) {
1274			ipr_send_hcam(ioa_cfg,
1275				      IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE,
1276				      hostrcb);
1277			return;
1278		}
1279
1280		res = list_entry(ioa_cfg->free_res_q.next,
1281				 struct ipr_resource_entry, queue);
1282
1283		list_del(&res->queue);
1284		ipr_init_res_entry(res, &cfgtew);
1285		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
1286	}
1287
1288	ipr_update_res_entry(res, &cfgtew);
1289
1290	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
1291		if (res->sdev) {
1292			res->del_from_ml = 1;
1293			res->res_handle = IPR_INVALID_RES_HANDLE;
1294			if (ioa_cfg->allow_ml_add_del)
1295				schedule_work(&ioa_cfg->work_q);
1296		} else {
1297			ipr_clear_res_target(res);
1298			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
1299		}
1300	} else if (!res->sdev) {
1301		res->add_to_ml = 1;
1302		if (ioa_cfg->allow_ml_add_del)
1303			schedule_work(&ioa_cfg->work_q);
1304	}
1305
1306	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1307}
1308
1309/**
1310 * ipr_process_ccn - Op done function for a CCN.
1311 * @ipr_cmd:	ipr command struct
1312 *
1313 * This function is the op done function for a configuration
1314 * change notification host controlled async from the adapter.
1315 *
1316 * Return value:
1317 * 	none
1318 **/
1319static void ipr_process_ccn(struct ipr_cmnd *ipr_cmd)
1320{
1321	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
1322	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
1323	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
1324
1325	list_del(&hostrcb->queue);
1326	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
1327
1328	if (ioasc) {
1329		if (ioasc != IPR_IOASC_IOA_WAS_RESET)
1330			dev_err(&ioa_cfg->pdev->dev,
1331				"Host RCB failed with IOASC: 0x%08X\n", ioasc);
1332
1333		ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
1334	} else {
1335		ipr_handle_config_change(ioa_cfg, hostrcb);
1336	}
1337}
1338
1339/**
1340 * strip_and_pad_whitespace - Strip and pad trailing whitespace.
1341 * @i:		index into buffer
1342 * @buf:		string to modify
1343 *
1344 * This function will strip all trailing whitespace, pad the end
1345 * of the string with a single space, and NULL terminate the string.
1346 *
1347 * Return value:
1348 * 	new length of string
1349 **/
1350static int strip_and_pad_whitespace(int i, char *buf)
1351{
1352	while (i && buf[i] == ' ')
1353		i--;
1354	buf[i+1] = ' ';
1355	buf[i+2] = '\0';
1356	return i + 2;
1357}
1358
1359/**
1360 * ipr_log_vpd_compact - Log the passed extended VPD compactly.
1361 * @prefix:		string to print at start of printk
1362 * @hostrcb:	hostrcb pointer
1363 * @vpd:		vendor/product id/sn struct
1364 *
1365 * Return value:
1366 * 	none
1367 **/
1368static void ipr_log_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1369				struct ipr_vpd *vpd)
1370{
1371	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN + IPR_SERIAL_NUM_LEN + 3];
1372	int i = 0;
1373
1374	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1375	i = strip_and_pad_whitespace(IPR_VENDOR_ID_LEN - 1, buffer);
1376
1377	memcpy(&buffer[i], vpd->vpids.product_id, IPR_PROD_ID_LEN);
1378	i = strip_and_pad_whitespace(i + IPR_PROD_ID_LEN - 1, buffer);
1379
1380	memcpy(&buffer[i], vpd->sn, IPR_SERIAL_NUM_LEN);
1381	buffer[IPR_SERIAL_NUM_LEN + i] = '\0';
1382
1383	ipr_hcam_err(hostrcb, "%s VPID/SN: %s\n", prefix, buffer);
1384}
1385
1386/**
1387 * ipr_log_vpd - Log the passed VPD to the error log.
1388 * @vpd:		vendor/product id/sn struct
1389 *
1390 * Return value:
1391 * 	none
1392 **/
1393static void ipr_log_vpd(struct ipr_vpd *vpd)
1394{
1395	char buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN
1396		    + IPR_SERIAL_NUM_LEN];
1397
1398	memcpy(buffer, vpd->vpids.vendor_id, IPR_VENDOR_ID_LEN);
1399	memcpy(buffer + IPR_VENDOR_ID_LEN, vpd->vpids.product_id,
1400	       IPR_PROD_ID_LEN);
1401	buffer[IPR_VENDOR_ID_LEN + IPR_PROD_ID_LEN] = '\0';
1402	ipr_err("Vendor/Product ID: %s\n", buffer);
1403
1404	memcpy(buffer, vpd->sn, IPR_SERIAL_NUM_LEN);
1405	buffer[IPR_SERIAL_NUM_LEN] = '\0';
1406	ipr_err("    Serial Number: %s\n", buffer);
1407}
1408
1409/**
1410 * ipr_log_ext_vpd_compact - Log the passed extended VPD compactly.
1411 * @prefix:		string to print at start of printk
1412 * @hostrcb:	hostrcb pointer
1413 * @vpd:		vendor/product id/sn/wwn struct
1414 *
1415 * Return value:
1416 * 	none
1417 **/
1418static void ipr_log_ext_vpd_compact(char *prefix, struct ipr_hostrcb *hostrcb,
1419				    struct ipr_ext_vpd *vpd)
1420{
1421	ipr_log_vpd_compact(prefix, hostrcb, &vpd->vpd);
1422	ipr_hcam_err(hostrcb, "%s WWN: %08X%08X\n", prefix,
1423		     be32_to_cpu(vpd->wwid[0]), be32_to_cpu(vpd->wwid[1]));
1424}
1425
1426/**
1427 * ipr_log_ext_vpd - Log the passed extended VPD to the error log.
1428 * @vpd:		vendor/product id/sn/wwn struct
1429 *
1430 * Return value:
1431 * 	none
1432 **/
1433static void ipr_log_ext_vpd(struct ipr_ext_vpd *vpd)
1434{
1435	ipr_log_vpd(&vpd->vpd);
1436	ipr_err("    WWN: %08X%08X\n", be32_to_cpu(vpd->wwid[0]),
1437		be32_to_cpu(vpd->wwid[1]));
1438}
1439
1440/**
1441 * ipr_log_enhanced_cache_error - Log a cache error.
1442 * @ioa_cfg:	ioa config struct
1443 * @hostrcb:	hostrcb struct
1444 *
1445 * Return value:
1446 * 	none
1447 **/
1448static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1449					 struct ipr_hostrcb *hostrcb)
1450{
1451	struct ipr_hostrcb_type_12_error *error;
1452
1453	if (ioa_cfg->sis64)
1454		error = &hostrcb->hcam.u.error64.u.type_12_error;
1455	else
1456		error = &hostrcb->hcam.u.error.u.type_12_error;
1457
1458	ipr_err("-----Current Configuration-----\n");
1459	ipr_err("Cache Directory Card Information:\n");
1460	ipr_log_ext_vpd(&error->ioa_vpd);
1461	ipr_err("Adapter Card Information:\n");
1462	ipr_log_ext_vpd(&error->cfc_vpd);
1463
1464	ipr_err("-----Expected Configuration-----\n");
1465	ipr_err("Cache Directory Card Information:\n");
1466	ipr_log_ext_vpd(&error->ioa_last_attached_to_cfc_vpd);
1467	ipr_err("Adapter Card Information:\n");
1468	ipr_log_ext_vpd(&error->cfc_last_attached_to_ioa_vpd);
1469
1470	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1471		     be32_to_cpu(error->ioa_data[0]),
1472		     be32_to_cpu(error->ioa_data[1]),
1473		     be32_to_cpu(error->ioa_data[2]));
1474}
1475
1476/**
1477 * ipr_log_cache_error - Log a cache error.
1478 * @ioa_cfg:	ioa config struct
1479 * @hostrcb:	hostrcb struct
1480 *
1481 * Return value:
1482 * 	none
1483 **/
1484static void ipr_log_cache_error(struct ipr_ioa_cfg *ioa_cfg,
1485				struct ipr_hostrcb *hostrcb)
1486{
1487	struct ipr_hostrcb_type_02_error *error =
1488		&hostrcb->hcam.u.error.u.type_02_error;
1489
1490	ipr_err("-----Current Configuration-----\n");
1491	ipr_err("Cache Directory Card Information:\n");
1492	ipr_log_vpd(&error->ioa_vpd);
1493	ipr_err("Adapter Card Information:\n");
1494	ipr_log_vpd(&error->cfc_vpd);
1495
1496	ipr_err("-----Expected Configuration-----\n");
1497	ipr_err("Cache Directory Card Information:\n");
1498	ipr_log_vpd(&error->ioa_last_attached_to_cfc_vpd);
1499	ipr_err("Adapter Card Information:\n");
1500	ipr_log_vpd(&error->cfc_last_attached_to_ioa_vpd);
1501
1502	ipr_err("Additional IOA Data: %08X %08X %08X\n",
1503		     be32_to_cpu(error->ioa_data[0]),
1504		     be32_to_cpu(error->ioa_data[1]),
1505		     be32_to_cpu(error->ioa_data[2]));
1506}
1507
1508/**
1509 * ipr_log_enhanced_config_error - Log a configuration error.
1510 * @ioa_cfg:	ioa config struct
1511 * @hostrcb:	hostrcb struct
1512 *
1513 * Return value:
1514 * 	none
1515 **/
1516static void ipr_log_enhanced_config_error(struct ipr_ioa_cfg *ioa_cfg,
1517					  struct ipr_hostrcb *hostrcb)
1518{
1519	int errors_logged, i;
1520	struct ipr_hostrcb_device_data_entry_enhanced *dev_entry;
1521	struct ipr_hostrcb_type_13_error *error;
1522
1523	error = &hostrcb->hcam.u.error.u.type_13_error;
1524	errors_logged = be32_to_cpu(error->errors_logged);
1525
1526	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1527		be32_to_cpu(error->errors_detected), errors_logged);
1528
1529	dev_entry = error->dev;
1530
1531	for (i = 0; i < errors_logged; i++, dev_entry++) {
1532		ipr_err_separator;
1533
1534		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1535		ipr_log_ext_vpd(&dev_entry->vpd);
1536
1537		ipr_err("-----New Device Information-----\n");
1538		ipr_log_ext_vpd(&dev_entry->new_vpd);
1539
1540		ipr_err("Cache Directory Card Information:\n");
1541		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1542
1543		ipr_err("Adapter Card Information:\n");
1544		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1545	}
1546}
1547
1548/**
1549 * ipr_log_sis64_config_error - Log a device error.
1550 * @ioa_cfg:	ioa config struct
1551 * @hostrcb:	hostrcb struct
1552 *
1553 * Return value:
1554 * 	none
1555 **/
1556static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
1557				       struct ipr_hostrcb *hostrcb)
1558{
1559	int errors_logged, i;
1560	struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
1561	struct ipr_hostrcb_type_23_error *error;
1562	char buffer[IPR_MAX_RES_PATH_LENGTH];
1563
1564	error = &hostrcb->hcam.u.error64.u.type_23_error;
1565	errors_logged = be32_to_cpu(error->errors_logged);
1566
1567	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1568		be32_to_cpu(error->errors_detected), errors_logged);
1569
1570	dev_entry = error->dev;
1571
1572	for (i = 0; i < errors_logged; i++, dev_entry++) {
1573		ipr_err_separator;
1574
1575		ipr_err("Device %d : %s", i + 1,
1576			 ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
1577		ipr_log_ext_vpd(&dev_entry->vpd);
1578
1579		ipr_err("-----New Device Information-----\n");
1580		ipr_log_ext_vpd(&dev_entry->new_vpd);
1581
1582		ipr_err("Cache Directory Card Information:\n");
1583		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
1584
1585		ipr_err("Adapter Card Information:\n");
1586		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
1587	}
1588}
1589
1590/**
1591 * ipr_log_config_error - Log a configuration error.
1592 * @ioa_cfg:	ioa config struct
1593 * @hostrcb:	hostrcb struct
1594 *
1595 * Return value:
1596 * 	none
1597 **/
1598static void ipr_log_config_error(struct ipr_ioa_cfg *ioa_cfg,
1599				 struct ipr_hostrcb *hostrcb)
1600{
1601	int errors_logged, i;
1602	struct ipr_hostrcb_device_data_entry *dev_entry;
1603	struct ipr_hostrcb_type_03_error *error;
1604
1605	error = &hostrcb->hcam.u.error.u.type_03_error;
1606	errors_logged = be32_to_cpu(error->errors_logged);
1607
1608	ipr_err("Device Errors Detected/Logged: %d/%d\n",
1609		be32_to_cpu(error->errors_detected), errors_logged);
1610
1611	dev_entry = error->dev;
1612
1613	for (i = 0; i < errors_logged; i++, dev_entry++) {
1614		ipr_err_separator;
1615
1616		ipr_phys_res_err(ioa_cfg, dev_entry->dev_res_addr, "Device %d", i + 1);
1617		ipr_log_vpd(&dev_entry->vpd);
1618
1619		ipr_err("-----New Device Information-----\n");
1620		ipr_log_vpd(&dev_entry->new_vpd);
1621
1622		ipr_err("Cache Directory Card Information:\n");
1623		ipr_log_vpd(&dev_entry->ioa_last_with_dev_vpd);
1624
1625		ipr_err("Adapter Card Information:\n");
1626		ipr_log_vpd(&dev_entry->cfc_last_with_dev_vpd);
1627
1628		ipr_err("Additional IOA Data: %08X %08X %08X %08X %08X\n",
1629			be32_to_cpu(dev_entry->ioa_data[0]),
1630			be32_to_cpu(dev_entry->ioa_data[1]),
1631			be32_to_cpu(dev_entry->ioa_data[2]),
1632			be32_to_cpu(dev_entry->ioa_data[3]),
1633			be32_to_cpu(dev_entry->ioa_data[4]));
1634	}
1635}
1636
1637/**
1638 * ipr_log_enhanced_array_error - Log an array configuration error.
1639 * @ioa_cfg:	ioa config struct
1640 * @hostrcb:	hostrcb struct
1641 *
1642 * Return value:
1643 * 	none
1644 **/
1645static void ipr_log_enhanced_array_error(struct ipr_ioa_cfg *ioa_cfg,
1646					 struct ipr_hostrcb *hostrcb)
1647{
1648	int i, num_entries;
1649	struct ipr_hostrcb_type_14_error *error;
1650	struct ipr_hostrcb_array_data_entry_enhanced *array_entry;
1651	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1652
1653	error = &hostrcb->hcam.u.error.u.type_14_error;
1654
1655	ipr_err_separator;
1656
1657	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1658		error->protection_level,
1659		ioa_cfg->host->host_no,
1660		error->last_func_vset_res_addr.bus,
1661		error->last_func_vset_res_addr.target,
1662		error->last_func_vset_res_addr.lun);
1663
1664	ipr_err_separator;
1665
1666	array_entry = error->array_member;
1667	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
1668			    sizeof(error->array_member));
1669
1670	for (i = 0; i < num_entries; i++, array_entry++) {
1671		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1672			continue;
1673
1674		if (be32_to_cpu(error->exposed_mode_adn) == i)
1675			ipr_err("Exposed Array Member %d:\n", i);
1676		else
1677			ipr_err("Array Member %d:\n", i);
1678
1679		ipr_log_ext_vpd(&array_entry->vpd);
1680		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1681		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1682				 "Expected Location");
1683
1684		ipr_err_separator;
1685	}
1686}
1687
1688/**
1689 * ipr_log_array_error - Log an array configuration error.
1690 * @ioa_cfg:	ioa config struct
1691 * @hostrcb:	hostrcb struct
1692 *
1693 * Return value:
1694 * 	none
1695 **/
1696static void ipr_log_array_error(struct ipr_ioa_cfg *ioa_cfg,
1697				struct ipr_hostrcb *hostrcb)
1698{
1699	int i;
1700	struct ipr_hostrcb_type_04_error *error;
1701	struct ipr_hostrcb_array_data_entry *array_entry;
1702	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
1703
1704	error = &hostrcb->hcam.u.error.u.type_04_error;
1705
1706	ipr_err_separator;
1707
1708	ipr_err("RAID %s Array Configuration: %d:%d:%d:%d\n",
1709		error->protection_level,
1710		ioa_cfg->host->host_no,
1711		error->last_func_vset_res_addr.bus,
1712		error->last_func_vset_res_addr.target,
1713		error->last_func_vset_res_addr.lun);
1714
1715	ipr_err_separator;
1716
1717	array_entry = error->array_member;
1718
1719	for (i = 0; i < 18; i++) {
1720		if (!memcmp(array_entry->vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
1721			continue;
1722
1723		if (be32_to_cpu(error->exposed_mode_adn) == i)
1724			ipr_err("Exposed Array Member %d:\n", i);
1725		else
1726			ipr_err("Array Member %d:\n", i);
1727
1728		ipr_log_vpd(&array_entry->vpd);
1729
1730		ipr_phys_res_err(ioa_cfg, array_entry->dev_res_addr, "Current Location");
1731		ipr_phys_res_err(ioa_cfg, array_entry->expected_dev_res_addr,
1732				 "Expected Location");
1733
1734		ipr_err_separator;
1735
1736		if (i == 9)
1737			array_entry = error->array_member2;
1738		else
1739			array_entry++;
1740	}
1741}
1742
1743/**
1744 * ipr_log_hex_data - Log additional hex IOA error data.
1745 * @ioa_cfg:	ioa config struct
1746 * @data:		IOA error data
1747 * @len:		data length
1748 *
1749 * Return value:
1750 * 	none
1751 **/
1752static void ipr_log_hex_data(struct ipr_ioa_cfg *ioa_cfg, u32 *data, int len)
1753{
1754	int i;
1755
1756	if (len == 0)
1757		return;
1758
1759	if (ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
1760		len = min_t(int, len, IPR_DEFAULT_MAX_ERROR_DUMP);
1761
1762	for (i = 0; i < len / 4; i += 4) {
1763		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
1764			be32_to_cpu(data[i]),
1765			be32_to_cpu(data[i+1]),
1766			be32_to_cpu(data[i+2]),
1767			be32_to_cpu(data[i+3]));
1768	}
1769}
1770
1771/**
1772 * ipr_log_enhanced_dual_ioa_error - Log an enhanced dual adapter error.
1773 * @ioa_cfg:	ioa config struct
1774 * @hostrcb:	hostrcb struct
1775 *
1776 * Return value:
1777 * 	none
1778 **/
1779static void ipr_log_enhanced_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1780					    struct ipr_hostrcb *hostrcb)
1781{
1782	struct ipr_hostrcb_type_17_error *error;
1783
1784	if (ioa_cfg->sis64)
1785		error = &hostrcb->hcam.u.error64.u.type_17_error;
1786	else
1787		error = &hostrcb->hcam.u.error.u.type_17_error;
1788
1789	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1790	strim(error->failure_reason);
1791
1792	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1793		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1794	ipr_log_ext_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1795	ipr_log_hex_data(ioa_cfg, error->data,
1796			 be32_to_cpu(hostrcb->hcam.length) -
1797			 (offsetof(struct ipr_hostrcb_error, u) +
1798			  offsetof(struct ipr_hostrcb_type_17_error, data)));
1799}
1800
1801/**
1802 * ipr_log_dual_ioa_error - Log a dual adapter error.
1803 * @ioa_cfg:	ioa config struct
1804 * @hostrcb:	hostrcb struct
1805 *
1806 * Return value:
1807 * 	none
1808 **/
1809static void ipr_log_dual_ioa_error(struct ipr_ioa_cfg *ioa_cfg,
1810				   struct ipr_hostrcb *hostrcb)
1811{
1812	struct ipr_hostrcb_type_07_error *error;
1813
1814	error = &hostrcb->hcam.u.error.u.type_07_error;
1815	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
1816	strim(error->failure_reason);
1817
1818	ipr_hcam_err(hostrcb, "%s [PRC: %08X]\n", error->failure_reason,
1819		     be32_to_cpu(hostrcb->hcam.u.error.prc));
1820	ipr_log_vpd_compact("Remote IOA", hostrcb, &error->vpd);
1821	ipr_log_hex_data(ioa_cfg, error->data,
1822			 be32_to_cpu(hostrcb->hcam.length) -
1823			 (offsetof(struct ipr_hostrcb_error, u) +
1824			  offsetof(struct ipr_hostrcb_type_07_error, data)));
1825}
1826
1827static const struct {
1828	u8 active;
1829	char *desc;
1830} path_active_desc[] = {
1831	{ IPR_PATH_NO_INFO, "Path" },
1832	{ IPR_PATH_ACTIVE, "Active path" },
1833	{ IPR_PATH_NOT_ACTIVE, "Inactive path" }
1834};
1835
1836static const struct {
1837	u8 state;
1838	char *desc;
1839} path_state_desc[] = {
1840	{ IPR_PATH_STATE_NO_INFO, "has no path state information available" },
1841	{ IPR_PATH_HEALTHY, "is healthy" },
1842	{ IPR_PATH_DEGRADED, "is degraded" },
1843	{ IPR_PATH_FAILED, "is failed" }
1844};
1845
1846/**
1847 * ipr_log_fabric_path - Log a fabric path error
1848 * @hostrcb:	hostrcb struct
1849 * @fabric:		fabric descriptor
1850 *
1851 * Return value:
1852 * 	none
1853 **/
1854static void ipr_log_fabric_path(struct ipr_hostrcb *hostrcb,
1855				struct ipr_hostrcb_fabric_desc *fabric)
1856{
1857	int i, j;
1858	u8 path_state = fabric->path_state;
1859	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1860	u8 state = path_state & IPR_PATH_STATE_MASK;
1861
1862	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1863		if (path_active_desc[i].active != active)
1864			continue;
1865
1866		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1867			if (path_state_desc[j].state != state)
1868				continue;
1869
1870			if (fabric->cascaded_expander == 0xff && fabric->phy == 0xff) {
1871				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d\n",
1872					     path_active_desc[i].desc, path_state_desc[j].desc,
1873					     fabric->ioa_port);
1874			} else if (fabric->cascaded_expander == 0xff) {
1875				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Phy=%d\n",
1876					     path_active_desc[i].desc, path_state_desc[j].desc,
1877					     fabric->ioa_port, fabric->phy);
1878			} else if (fabric->phy == 0xff) {
1879				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d\n",
1880					     path_active_desc[i].desc, path_state_desc[j].desc,
1881					     fabric->ioa_port, fabric->cascaded_expander);
1882			} else {
1883				ipr_hcam_err(hostrcb, "%s %s: IOA Port=%d, Cascade=%d, Phy=%d\n",
1884					     path_active_desc[i].desc, path_state_desc[j].desc,
1885					     fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1886			}
1887			return;
1888		}
1889	}
1890
1891	ipr_err("Path state=%02X IOA Port=%d Cascade=%d Phy=%d\n", path_state,
1892		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
1893}
1894
1895/**
1896 * ipr_log64_fabric_path - Log a fabric path error
1897 * @hostrcb:	hostrcb struct
1898 * @fabric:		fabric descriptor
1899 *
1900 * Return value:
1901 * 	none
1902 **/
1903static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
1904				  struct ipr_hostrcb64_fabric_desc *fabric)
1905{
1906	int i, j;
1907	u8 path_state = fabric->path_state;
1908	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
1909	u8 state = path_state & IPR_PATH_STATE_MASK;
1910	char buffer[IPR_MAX_RES_PATH_LENGTH];
1911
1912	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
1913		if (path_active_desc[i].active != active)
1914			continue;
1915
1916		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
1917			if (path_state_desc[j].state != state)
1918				continue;
1919
1920			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
1921				     path_active_desc[i].desc, path_state_desc[j].desc,
1922				     ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1923			return;
1924		}
1925	}
1926
1927	ipr_err("Path state=%02X Resource Path=%s\n", path_state,
1928		ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
1929}
1930
1931static const struct {
1932	u8 type;
1933	char *desc;
1934} path_type_desc[] = {
1935	{ IPR_PATH_CFG_IOA_PORT, "IOA port" },
1936	{ IPR_PATH_CFG_EXP_PORT, "Expander port" },
1937	{ IPR_PATH_CFG_DEVICE_PORT, "Device port" },
1938	{ IPR_PATH_CFG_DEVICE_LUN, "Device LUN" }
1939};
1940
1941static const struct {
1942	u8 status;
1943	char *desc;
1944} path_status_desc[] = {
1945	{ IPR_PATH_CFG_NO_PROB, "Functional" },
1946	{ IPR_PATH_CFG_DEGRADED, "Degraded" },
1947	{ IPR_PATH_CFG_FAILED, "Failed" },
1948	{ IPR_PATH_CFG_SUSPECT, "Suspect" },
1949	{ IPR_PATH_NOT_DETECTED, "Missing" },
1950	{ IPR_PATH_INCORRECT_CONN, "Incorrectly connected" }
1951};
1952
1953static const char *link_rate[] = {
1954	"unknown",
1955	"disabled",
1956	"phy reset problem",
1957	"spinup hold",
1958	"port selector",
1959	"unknown",
1960	"unknown",
1961	"unknown",
1962	"1.5Gbps",
1963	"3.0Gbps",
1964	"unknown",
1965	"unknown",
1966	"unknown",
1967	"unknown",
1968	"unknown",
1969	"unknown"
1970};
1971
1972/**
1973 * ipr_log_path_elem - Log a fabric path element.
1974 * @hostrcb:	hostrcb struct
1975 * @cfg:		fabric path element struct
1976 *
1977 * Return value:
1978 * 	none
1979 **/
1980static void ipr_log_path_elem(struct ipr_hostrcb *hostrcb,
1981			      struct ipr_hostrcb_config_element *cfg)
1982{
1983	int i, j;
1984	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
1985	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
1986
1987	if (type == IPR_PATH_CFG_NOT_EXIST)
1988		return;
1989
1990	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
1991		if (path_type_desc[i].type != type)
1992			continue;
1993
1994		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
1995			if (path_status_desc[j].status != status)
1996				continue;
1997
1998			if (type == IPR_PATH_CFG_IOA_PORT) {
1999				ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, WWN=%08X%08X\n",
2000					     path_status_desc[j].desc, path_type_desc[i].desc,
2001					     cfg->phy, link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2002					     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2003			} else {
2004				if (cfg->cascaded_expander == 0xff && cfg->phy == 0xff) {
2005					ipr_hcam_err(hostrcb, "%s %s: Link rate=%s, WWN=%08X%08X\n",
2006						     path_status_desc[j].desc, path_type_desc[i].desc,
2007						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2008						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2009				} else if (cfg->cascaded_expander == 0xff) {
2010					ipr_hcam_err(hostrcb, "%s %s: Phy=%d, Link rate=%s, "
2011						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2012						     path_type_desc[i].desc, cfg->phy,
2013						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2014						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2015				} else if (cfg->phy == 0xff) {
2016					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Link rate=%s, "
2017						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2018						     path_type_desc[i].desc, cfg->cascaded_expander,
2019						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2020						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2021				} else {
2022					ipr_hcam_err(hostrcb, "%s %s: Cascade=%d, Phy=%d, Link rate=%s "
2023						     "WWN=%08X%08X\n", path_status_desc[j].desc,
2024						     path_type_desc[i].desc, cfg->cascaded_expander, cfg->phy,
2025						     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2026						     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2027				}
2028			}
2029			return;
2030		}
2031	}
2032
2033	ipr_hcam_err(hostrcb, "Path element=%02X: Cascade=%d Phy=%d Link rate=%s "
2034		     "WWN=%08X%08X\n", cfg->type_status, cfg->cascaded_expander, cfg->phy,
2035		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2036		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2037}
2038
2039/**
2040 * ipr_log64_path_elem - Log a fabric path element.
2041 * @hostrcb:	hostrcb struct
2042 * @cfg:		fabric path element struct
2043 *
2044 * Return value:
2045 * 	none
2046 **/
2047static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
2048				struct ipr_hostrcb64_config_element *cfg)
2049{
2050	int i, j;
2051	u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
2052	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
2053	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
2054	char buffer[IPR_MAX_RES_PATH_LENGTH];
2055
2056	if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
2057		return;
2058
2059	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
2060		if (path_type_desc[i].type != type)
2061			continue;
2062
2063		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
2064			if (path_status_desc[j].status != status)
2065				continue;
2066
2067			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
2068				     path_status_desc[j].desc, path_type_desc[i].desc,
2069				     ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2070				     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2071				     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2072			return;
2073		}
2074	}
2075	ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
2076		     "WWN=%08X%08X\n", cfg->type_status,
2077		     ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
2078		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
2079		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
2080}
2081
2082/**
2083 * ipr_log_fabric_error - Log a fabric error.
2084 * @ioa_cfg:	ioa config struct
2085 * @hostrcb:	hostrcb struct
2086 *
2087 * Return value:
2088 * 	none
2089 **/
2090static void ipr_log_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2091				 struct ipr_hostrcb *hostrcb)
2092{
2093	struct ipr_hostrcb_type_20_error *error;
2094	struct ipr_hostrcb_fabric_desc *fabric;
2095	struct ipr_hostrcb_config_element *cfg;
2096	int i, add_len;
2097
2098	error = &hostrcb->hcam.u.error.u.type_20_error;
2099	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2100	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2101
2102	add_len = be32_to_cpu(hostrcb->hcam.length) -
2103		(offsetof(struct ipr_hostrcb_error, u) +
2104		 offsetof(struct ipr_hostrcb_type_20_error, desc));
2105
2106	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2107		ipr_log_fabric_path(hostrcb, fabric);
2108		for_each_fabric_cfg(fabric, cfg)
2109			ipr_log_path_elem(hostrcb, cfg);
2110
2111		add_len -= be16_to_cpu(fabric->length);
2112		fabric = (struct ipr_hostrcb_fabric_desc *)
2113			((unsigned long)fabric + be16_to_cpu(fabric->length));
2114	}
2115
2116	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2117}
2118
2119/**
2120 * ipr_log_sis64_array_error - Log a sis64 array error.
2121 * @ioa_cfg:	ioa config struct
2122 * @hostrcb:	hostrcb struct
2123 *
2124 * Return value:
2125 * 	none
2126 **/
2127static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
2128				      struct ipr_hostrcb *hostrcb)
2129{
2130	int i, num_entries;
2131	struct ipr_hostrcb_type_24_error *error;
2132	struct ipr_hostrcb64_array_data_entry *array_entry;
2133	char buffer[IPR_MAX_RES_PATH_LENGTH];
2134	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
2135
2136	error = &hostrcb->hcam.u.error64.u.type_24_error;
2137
2138	ipr_err_separator;
2139
2140	ipr_err("RAID %s Array Configuration: %s\n",
2141		error->protection_level,
2142		ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
2143
2144	ipr_err_separator;
2145
2146	array_entry = error->array_member;
2147	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
2148			    sizeof(error->array_member));
2149
2150	for (i = 0; i < num_entries; i++, array_entry++) {
2151
2152		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
2153			continue;
2154
2155		if (error->exposed_mode_adn == i)
2156			ipr_err("Exposed Array Member %d:\n", i);
2157		else
2158			ipr_err("Array Member %d:\n", i);
2159
2160		ipr_err("Array Member %d:\n", i);
2161		ipr_log_ext_vpd(&array_entry->vpd);
2162		ipr_err("Current Location: %s",
2163			 ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
2164		ipr_err("Expected Location: %s",
2165			 ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
2166
2167		ipr_err_separator;
2168	}
2169}
2170
2171/**
2172 * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
2173 * @ioa_cfg:	ioa config struct
2174 * @hostrcb:	hostrcb struct
2175 *
2176 * Return value:
2177 * 	none
2178 **/
2179static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
2180				       struct ipr_hostrcb *hostrcb)
2181{
2182	struct ipr_hostrcb_type_30_error *error;
2183	struct ipr_hostrcb64_fabric_desc *fabric;
2184	struct ipr_hostrcb64_config_element *cfg;
2185	int i, add_len;
2186
2187	error = &hostrcb->hcam.u.error64.u.type_30_error;
2188
2189	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
2190	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
2191
2192	add_len = be32_to_cpu(hostrcb->hcam.length) -
2193		(offsetof(struct ipr_hostrcb64_error, u) +
2194		 offsetof(struct ipr_hostrcb_type_30_error, desc));
2195
2196	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
2197		ipr_log64_fabric_path(hostrcb, fabric);
2198		for_each_fabric_cfg(fabric, cfg)
2199			ipr_log64_path_elem(hostrcb, cfg);
2200
2201		add_len -= be16_to_cpu(fabric->length);
2202		fabric = (struct ipr_hostrcb64_fabric_desc *)
2203			((unsigned long)fabric + be16_to_cpu(fabric->length));
2204	}
2205
2206	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
2207}
2208
2209/**
2210 * ipr_log_generic_error - Log an adapter error.
2211 * @ioa_cfg:	ioa config struct
2212 * @hostrcb:	hostrcb struct
2213 *
2214 * Return value:
2215 * 	none
2216 **/
2217static void ipr_log_generic_error(struct ipr_ioa_cfg *ioa_cfg,
2218				  struct ipr_hostrcb *hostrcb)
2219{
2220	ipr_log_hex_data(ioa_cfg, hostrcb->hcam.u.raw.data,
2221			 be32_to_cpu(hostrcb->hcam.length));
2222}
2223
2224/**
2225 * ipr_get_error - Find the specfied IOASC in the ipr_error_table.
2226 * @ioasc:	IOASC
2227 *
2228 * This function will return the index of into the ipr_error_table
2229 * for the specified IOASC. If the IOASC is not in the table,
2230 * 0 will be returned, which points to the entry used for unknown errors.
2231 *
2232 * Return value:
2233 * 	index into the ipr_error_table
2234 **/
2235static u32 ipr_get_error(u32 ioasc)
2236{
2237	int i;
2238
2239	for (i = 0; i < ARRAY_SIZE(ipr_error_table); i++)
2240		if (ipr_error_table[i].ioasc == (ioasc & IPR_IOASC_IOASC_MASK))
2241			return i;
2242
2243	return 0;
2244}
2245
2246/**
2247 * ipr_handle_log_data - Log an adapter error.
2248 * @ioa_cfg:	ioa config struct
2249 * @hostrcb:	hostrcb struct
2250 *
2251 * This function logs an adapter error to the system.
2252 *
2253 * Return value:
2254 * 	none
2255 **/
2256static void ipr_handle_log_data(struct ipr_ioa_cfg *ioa_cfg,
2257				struct ipr_hostrcb *hostrcb)
2258{
2259	u32 ioasc;
2260	int error_index;
2261
2262	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
2263		return;
2264
2265	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
2266		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
2267
2268	if (ioa_cfg->sis64)
2269		ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2270	else
2271		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2272
2273	if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
2274	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
2275		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
2276		scsi_report_bus_reset(ioa_cfg->host,
2277				      hostrcb->hcam.u.error.fd_res_addr.bus);
2278	}
2279
2280	error_index = ipr_get_error(ioasc);
2281
2282	if (!ipr_error_table[error_index].log_hcam)
2283		return;
2284
2285	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
2286
2287	/* Set indication we have logged an error */
2288	ioa_cfg->errors_logged++;
2289
2290	if (ioa_cfg->log_level < ipr_error_table[error_index].log_hcam)
2291		return;
2292	if (be32_to_cpu(hostrcb->hcam.length) > sizeof(hostrcb->hcam.u.raw))
2293		hostrcb->hcam.length = cpu_to_be32(sizeof(hostrcb->hcam.u.raw));
2294
2295	switch (hostrcb->hcam.overlay_id) {
2296	case IPR_HOST_RCB_OVERLAY_ID_2:
2297		ipr_log_cache_error(ioa_cfg, hostrcb);
2298		break;
2299	case IPR_HOST_RCB_OVERLAY_ID_3:
2300		ipr_log_config_error(ioa_cfg, hostrcb);
2301		break;
2302	case IPR_HOST_RCB_OVERLAY_ID_4:
2303	case IPR_HOST_RCB_OVERLAY_ID_6:
2304		ipr_log_array_error(ioa_cfg, hostrcb);
2305		break;
2306	case IPR_HOST_RCB_OVERLAY_ID_7:
2307		ipr_log_dual_ioa_error(ioa_cfg, hostrcb);
2308		break;
2309	case IPR_HOST_RCB_OVERLAY_ID_12:
2310		ipr_log_enhanced_cache_error(ioa_cfg, hostrcb);
2311		break;
2312	case IPR_HOST_RCB_OVERLAY_ID_13:
2313		ipr_log_enhanced_config_error(ioa_cfg, hostrcb);
2314		break;
2315	case IPR_HOST_RCB_OVERLAY_ID_14:
2316	case IPR_HOST_RCB_OVERLAY_ID_16:
2317		ipr_log_enhanced_array_error(ioa_cfg, hostrcb);
2318		break;
2319	case IPR_HOST_RCB_OVERLAY_ID_17:
2320		ipr_log_enhanced_dual_ioa_error(ioa_cfg, hostrcb);
2321		break;
2322	case IPR_HOST_RCB_OVERLAY_ID_20:
2323		ipr_log_fabric_error(ioa_cfg, hostrcb);
2324		break;
2325	case IPR_HOST_RCB_OVERLAY_ID_23:
2326		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
2327		break;
2328	case IPR_HOST_RCB_OVERLAY_ID_24:
2329	case IPR_HOST_RCB_OVERLAY_ID_26:
2330		ipr_log_sis64_array_error(ioa_cfg, hostrcb);
2331		break;
2332	case IPR_HOST_RCB_OVERLAY_ID_30:
2333		ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
2334		break;
2335	case IPR_HOST_RCB_OVERLAY_ID_1:
2336	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
2337	default:
2338		ipr_log_generic_error(ioa_cfg, hostrcb);
2339		break;
2340	}
2341}
2342
2343/**
2344 * ipr_process_error - Op done function for an adapter error log.
2345 * @ipr_cmd:	ipr command struct
2346 *
2347 * This function is the op done function for an error log host
2348 * controlled async from the adapter. It will log the error and
2349 * send the HCAM back to the adapter.
2350 *
2351 * Return value:
2352 * 	none
2353 **/
2354static void ipr_process_error(struct ipr_cmnd *ipr_cmd)
2355{
2356	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2357	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
2358	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
2359	u32 fd_ioasc;
2360
2361	if (ioa_cfg->sis64)
2362		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
2363	else
2364		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
2365
2366	list_del(&hostrcb->queue);
2367	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
2368
2369	if (!ioasc) {
2370		ipr_handle_log_data(ioa_cfg, hostrcb);
2371		if (fd_ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED)
2372			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_ABBREV);
2373	} else if (ioasc != IPR_IOASC_IOA_WAS_RESET) {
2374		dev_err(&ioa_cfg->pdev->dev,
2375			"Host RCB failed with IOASC: 0x%08X\n", ioasc);
2376	}
2377
2378	ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
2379}
2380
2381/**
2382 * ipr_timeout -  An internally generated op has timed out.
2383 * @ipr_cmd:	ipr command struct
2384 *
2385 * This function blocks host requests and initiates an
2386 * adapter reset.
2387 *
2388 * Return value:
2389 * 	none
2390 **/
2391static void ipr_timeout(struct ipr_cmnd *ipr_cmd)
2392{
2393	unsigned long lock_flags = 0;
2394	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2395
2396	ENTER;
2397	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2398
2399	ioa_cfg->errors_logged++;
2400	dev_err(&ioa_cfg->pdev->dev,
2401		"Adapter being reset due to command timeout.\n");
2402
2403	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2404		ioa_cfg->sdt_state = GET_DUMP;
2405
2406	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd)
2407		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2408
2409	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2410	LEAVE;
2411}
2412
2413/**
2414 * ipr_oper_timeout -  Adapter timed out transitioning to operational
2415 * @ipr_cmd:	ipr command struct
2416 *
2417 * This function blocks host requests and initiates an
2418 * adapter reset.
2419 *
2420 * Return value:
2421 * 	none
2422 **/
2423static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd)
2424{
2425	unsigned long lock_flags = 0;
2426	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
2427
2428	ENTER;
2429	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2430
2431	ioa_cfg->errors_logged++;
2432	dev_err(&ioa_cfg->pdev->dev,
2433		"Adapter timed out transitioning to operational.\n");
2434
2435	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
2436		ioa_cfg->sdt_state = GET_DUMP;
2437
2438	if (!ioa_cfg->in_reset_reload || ioa_cfg->reset_cmd == ipr_cmd) {
2439		if (ipr_fastfail)
2440			ioa_cfg->reset_retries += IPR_NUM_RESET_RELOAD_RETRIES;
2441		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
2442	}
2443
2444	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2445	LEAVE;
2446}
2447
2448/**
2449 * ipr_reset_reload - Reset/Reload the IOA
2450 * @ioa_cfg:		ioa config struct
2451 * @shutdown_type:	shutdown type
2452 *
2453 * This function resets the adapter and re-initializes it.
2454 * This function assumes that all new host commands have been stopped.
2455 * Return value:
2456 * 	SUCCESS / FAILED
2457 **/
2458static int ipr_reset_reload(struct ipr_ioa_cfg *ioa_cfg,
2459			    enum ipr_shutdown_type shutdown_type)
2460{
2461	if (!ioa_cfg->in_reset_reload)
2462		ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
2463
2464	spin_unlock_irq(ioa_cfg->host->host_lock);
2465	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
2466	spin_lock_irq(ioa_cfg->host->host_lock);
2467
2468	/* If we got hit with a host reset while we were already resetting
2469	 the adapter for some reason, and the reset failed. */
2470	if (ioa_cfg->ioa_is_dead) {
2471		ipr_trace;
2472		return FAILED;
2473	}
2474
2475	return SUCCESS;
2476}
2477
2478/**
2479 * ipr_find_ses_entry - Find matching SES in SES table
2480 * @res:	resource entry struct of SES
2481 *
2482 * Return value:
2483 * 	pointer to SES table entry / NULL on failure
2484 **/
2485static const struct ipr_ses_table_entry *
2486ipr_find_ses_entry(struct ipr_resource_entry *res)
2487{
2488	int i, j, matches;
2489	struct ipr_std_inq_vpids *vpids;
2490	const struct ipr_ses_table_entry *ste = ipr_ses_table;
2491
2492	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
2493		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
2494			if (ste->compare_product_id_byte[j] == 'X') {
2495				vpids = &res->std_inq_data.vpids;
2496				if (vpids->product_id[j] == ste->product_id[j])
2497					matches++;
2498				else
2499					break;
2500			} else
2501				matches++;
2502		}
2503
2504		if (matches == IPR_PROD_ID_LEN)
2505			return ste;
2506	}
2507
2508	return NULL;
2509}
2510
2511/**
2512 * ipr_get_max_scsi_speed - Determine max SCSI speed for a given bus
2513 * @ioa_cfg:	ioa config struct
2514 * @bus:		SCSI bus
2515 * @bus_width:	bus width
2516 *
2517 * Return value:
2518 *	SCSI bus speed in units of 100KHz, 1600 is 160 MHz
2519 *	For a 2-byte wide SCSI bus, the maximum transfer speed is
2520 *	twice the maximum transfer rate (e.g. for a wide enabled bus,
2521 *	max 160MHz = max 320MB/sec).
2522 **/
2523static u32 ipr_get_max_scsi_speed(struct ipr_ioa_cfg *ioa_cfg, u8 bus, u8 bus_width)
2524{
2525	struct ipr_resource_entry *res;
2526	const struct ipr_ses_table_entry *ste;
2527	u32 max_xfer_rate = IPR_MAX_SCSI_RATE(bus_width);
2528
2529	/* Loop through each config table entry in the config table buffer */
2530	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
2531		if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
2532			continue;
2533
2534		if (bus != res->bus)
2535			continue;
2536
2537		if (!(ste = ipr_find_ses_entry(res)))
2538			continue;
2539
2540		max_xfer_rate = (ste->max_bus_speed_limit * 10) / (bus_width / 8);
2541	}
2542
2543	return max_xfer_rate;
2544}
2545
2546/**
2547 * ipr_wait_iodbg_ack - Wait for an IODEBUG ACK from the IOA
2548 * @ioa_cfg:		ioa config struct
2549 * @max_delay:		max delay in micro-seconds to wait
2550 *
2551 * Waits for an IODEBUG ACK from the IOA, doing busy looping.
2552 *
2553 * Return value:
2554 * 	0 on success / other on failure
2555 **/
2556static int ipr_wait_iodbg_ack(struct ipr_ioa_cfg *ioa_cfg, int max_delay)
2557{
2558	volatile u32 pcii_reg;
2559	int delay = 1;
2560
2561	/* Read interrupt reg until IOA signals IO Debug Acknowledge */
2562	while (delay < max_delay) {
2563		pcii_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
2564
2565		if (pcii_reg & IPR_PCII_IO_DEBUG_ACKNOWLEDGE)
2566			return 0;
2567
2568		/* udelay cannot be used if delay is more than a few milliseconds */
2569		if ((delay / 1000) > MAX_UDELAY_MS)
2570			mdelay(delay / 1000);
2571		else
2572			udelay(delay);
2573
2574		delay += delay;
2575	}
2576	return -EIO;
2577}
2578
2579/**
2580 * ipr_get_sis64_dump_data_section - Dump IOA memory
2581 * @ioa_cfg:			ioa config struct
2582 * @start_addr:			adapter address to dump
2583 * @dest:			destination kernel buffer
2584 * @length_in_words:		length to dump in 4 byte words
2585 *
2586 * Return value:
2587 * 	0 on success
2588 **/
2589static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2590					   u32 start_addr,
2591					   __be32 *dest, u32 length_in_words)
2592{
2593	int i;
2594
2595	for (i = 0; i < length_in_words; i++) {
2596		writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
2597		*dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
2598		dest++;
2599	}
2600
2601	return 0;
2602}
2603
2604/**
2605 * ipr_get_ldump_data_section - Dump IOA memory
2606 * @ioa_cfg:			ioa config struct
2607 * @start_addr:			adapter address to dump
2608 * @dest:				destination kernel buffer
2609 * @length_in_words:	length to dump in 4 byte words
2610 *
2611 * Return value:
2612 * 	0 on success / -EIO on failure
2613 **/
2614static int ipr_get_ldump_data_section(struct ipr_ioa_cfg *ioa_cfg,
2615				      u32 start_addr,
2616				      __be32 *dest, u32 length_in_words)
2617{
2618	volatile u32 temp_pcii_reg;
2619	int i, delay = 0;
2620
2621	if (ioa_cfg->sis64)
2622		return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
2623						       dest, length_in_words);
2624
2625	/* Write IOA interrupt reg starting LDUMP state  */
2626	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
2627	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2628
2629	/* Wait for IO debug acknowledge */
2630	if (ipr_wait_iodbg_ack(ioa_cfg,
2631			       IPR_LDUMP_MAX_LONG_ACK_DELAY_IN_USEC)) {
2632		dev_err(&ioa_cfg->pdev->dev,
2633			"IOA dump long data transfer timeout\n");
2634		return -EIO;
2635	}
2636
2637	/* Signal LDUMP interlocked - clear IO debug ack */
2638	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2639	       ioa_cfg->regs.clr_interrupt_reg);
2640
2641	/* Write Mailbox with starting address */
2642	writel(start_addr, ioa_cfg->ioa_mailbox);
2643
2644	/* Signal address valid - clear IOA Reset alert */
2645	writel(IPR_UPROCI_RESET_ALERT,
2646	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2647
2648	for (i = 0; i < length_in_words; i++) {
2649		/* Wait for IO debug acknowledge */
2650		if (ipr_wait_iodbg_ack(ioa_cfg,
2651				       IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC)) {
2652			dev_err(&ioa_cfg->pdev->dev,
2653				"IOA dump short data transfer timeout\n");
2654			return -EIO;
2655		}
2656
2657		/* Read data from mailbox and increment destination pointer */
2658		*dest = cpu_to_be32(readl(ioa_cfg->ioa_mailbox));
2659		dest++;
2660
2661		/* For all but the last word of data, signal data received */
2662		if (i < (length_in_words - 1)) {
2663			/* Signal dump data received - Clear IO debug Ack */
2664			writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2665			       ioa_cfg->regs.clr_interrupt_reg);
2666		}
2667	}
2668
2669	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
2670	writel(IPR_UPROCI_RESET_ALERT,
2671	       ioa_cfg->regs.set_uproc_interrupt_reg32);
2672
2673	writel(IPR_UPROCI_IO_DEBUG_ALERT,
2674	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
2675
2676	/* Signal dump data received - Clear IO debug Ack */
2677	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
2678	       ioa_cfg->regs.clr_interrupt_reg);
2679
2680	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
2681	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
2682		temp_pcii_reg =
2683		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
2684
2685		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
2686			return 0;
2687
2688		udelay(10);
2689		delay += 10;
2690	}
2691
2692	return 0;
2693}
2694
2695#ifdef CONFIG_SCSI_IPR_DUMP
2696/**
2697 * ipr_sdt_copy - Copy Smart Dump Table to kernel buffer
2698 * @ioa_cfg:		ioa config struct
2699 * @pci_address:	adapter address
2700 * @length:			length of data to copy
2701 *
2702 * Copy data from PCI adapter to kernel buffer.
2703 * Note: length MUST be a 4 byte multiple
2704 * Return value:
2705 * 	0 on success / other on failure
2706 **/
2707static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
2708			unsigned long pci_address, u32 length)
2709{
2710	int bytes_copied = 0;
2711	int cur_len, rc, rem_len, rem_page_len;
2712	__be32 *page;
2713	unsigned long lock_flags = 0;
2714	struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
2715
2716	while (bytes_copied < length &&
2717	       (ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
2718		if (ioa_dump->page_offset >= PAGE_SIZE ||
2719		    ioa_dump->page_offset == 0) {
2720			page = (__be32 *)__get_free_page(GFP_ATOMIC);
2721
2722			if (!page) {
2723				ipr_trace;
2724				return bytes_copied;
2725			}
2726
2727			ioa_dump->page_offset = 0;
2728			ioa_dump->ioa_data[ioa_dump->next_page_index] = page;
2729			ioa_dump->next_page_index++;
2730		} else
2731			page = ioa_dump->ioa_data[ioa_dump->next_page_index - 1];
2732
2733		rem_len = length - bytes_copied;
2734		rem_page_len = PAGE_SIZE - ioa_dump->page_offset;
2735		cur_len = min(rem_len, rem_page_len);
2736
2737		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2738		if (ioa_cfg->sdt_state == ABORT_DUMP) {
2739			rc = -EIO;
2740		} else {
2741			rc = ipr_get_ldump_data_section(ioa_cfg,
2742							pci_address + bytes_copied,
2743							&page[ioa_dump->page_offset / 4],
2744							(cur_len / sizeof(u32)));
2745		}
2746		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2747
2748		if (!rc) {
2749			ioa_dump->page_offset += cur_len;
2750			bytes_copied += cur_len;
2751		} else {
2752			ipr_trace;
2753			break;
2754		}
2755		schedule();
2756	}
2757
2758	return bytes_copied;
2759}
2760
2761/**
2762 * ipr_init_dump_entry_hdr - Initialize a dump entry header.
2763 * @hdr:	dump entry header struct
2764 *
2765 * Return value:
2766 * 	nothing
2767 **/
2768static void ipr_init_dump_entry_hdr(struct ipr_dump_entry_header *hdr)
2769{
2770	hdr->eye_catcher = IPR_DUMP_EYE_CATCHER;
2771	hdr->num_elems = 1;
2772	hdr->offset = sizeof(*hdr);
2773	hdr->status = IPR_DUMP_STATUS_SUCCESS;
2774}
2775
2776/**
2777 * ipr_dump_ioa_type_data - Fill in the adapter type in the dump.
2778 * @ioa_cfg:	ioa config struct
2779 * @driver_dump:	driver dump struct
2780 *
2781 * Return value:
2782 * 	nothing
2783 **/
2784static void ipr_dump_ioa_type_data(struct ipr_ioa_cfg *ioa_cfg,
2785				   struct ipr_driver_dump *driver_dump)
2786{
2787	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
2788
2789	ipr_init_dump_entry_hdr(&driver_dump->ioa_type_entry.hdr);
2790	driver_dump->ioa_type_entry.hdr.len =
2791		sizeof(struct ipr_dump_ioa_type_entry) -
2792		sizeof(struct ipr_dump_entry_header);
2793	driver_dump->ioa_type_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2794	driver_dump->ioa_type_entry.hdr.id = IPR_DUMP_DRIVER_TYPE_ID;
2795	driver_dump->ioa_type_entry.type = ioa_cfg->type;
2796	driver_dump->ioa_type_entry.fw_version = (ucode_vpd->major_release << 24) |
2797		(ucode_vpd->card_type << 16) | (ucode_vpd->minor_release[0] << 8) |
2798		ucode_vpd->minor_release[1];
2799	driver_dump->hdr.num_entries++;
2800}
2801
2802/**
2803 * ipr_dump_version_data - Fill in the driver version in the dump.
2804 * @ioa_cfg:	ioa config struct
2805 * @driver_dump:	driver dump struct
2806 *
2807 * Return value:
2808 * 	nothing
2809 **/
2810static void ipr_dump_version_data(struct ipr_ioa_cfg *ioa_cfg,
2811				  struct ipr_driver_dump *driver_dump)
2812{
2813	ipr_init_dump_entry_hdr(&driver_dump->version_entry.hdr);
2814	driver_dump->version_entry.hdr.len =
2815		sizeof(struct ipr_dump_version_entry) -
2816		sizeof(struct ipr_dump_entry_header);
2817	driver_dump->version_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2818	driver_dump->version_entry.hdr.id = IPR_DUMP_DRIVER_VERSION_ID;
2819	strcpy(driver_dump->version_entry.version, IPR_DRIVER_VERSION);
2820	driver_dump->hdr.num_entries++;
2821}
2822
2823/**
2824 * ipr_dump_trace_data - Fill in the IOA trace in the dump.
2825 * @ioa_cfg:	ioa config struct
2826 * @driver_dump:	driver dump struct
2827 *
2828 * Return value:
2829 * 	nothing
2830 **/
2831static void ipr_dump_trace_data(struct ipr_ioa_cfg *ioa_cfg,
2832				   struct ipr_driver_dump *driver_dump)
2833{
2834	ipr_init_dump_entry_hdr(&driver_dump->trace_entry.hdr);
2835	driver_dump->trace_entry.hdr.len =
2836		sizeof(struct ipr_dump_trace_entry) -
2837		sizeof(struct ipr_dump_entry_header);
2838	driver_dump->trace_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2839	driver_dump->trace_entry.hdr.id = IPR_DUMP_TRACE_ID;
2840	memcpy(driver_dump->trace_entry.trace, ioa_cfg->trace, IPR_TRACE_SIZE);
2841	driver_dump->hdr.num_entries++;
2842}
2843
2844/**
2845 * ipr_dump_location_data - Fill in the IOA location in the dump.
2846 * @ioa_cfg:	ioa config struct
2847 * @driver_dump:	driver dump struct
2848 *
2849 * Return value:
2850 * 	nothing
2851 **/
2852static void ipr_dump_location_data(struct ipr_ioa_cfg *ioa_cfg,
2853				   struct ipr_driver_dump *driver_dump)
2854{
2855	ipr_init_dump_entry_hdr(&driver_dump->location_entry.hdr);
2856	driver_dump->location_entry.hdr.len =
2857		sizeof(struct ipr_dump_location_entry) -
2858		sizeof(struct ipr_dump_entry_header);
2859	driver_dump->location_entry.hdr.data_type = IPR_DUMP_DATA_TYPE_ASCII;
2860	driver_dump->location_entry.hdr.id = IPR_DUMP_LOCATION_ID;
2861	strcpy(driver_dump->location_entry.location, dev_name(&ioa_cfg->pdev->dev));
2862	driver_dump->hdr.num_entries++;
2863}
2864
2865/**
2866 * ipr_get_ioa_dump - Perform a dump of the driver and adapter.
2867 * @ioa_cfg:	ioa config struct
2868 * @dump:		dump struct
2869 *
2870 * Return value:
2871 * 	nothing
2872 **/
2873static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
2874{
2875	unsigned long start_addr, sdt_word;
2876	unsigned long lock_flags = 0;
2877	struct ipr_driver_dump *driver_dump = &dump->driver_dump;
2878	struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
2879	u32 num_entries, start_off, end_off;
2880	u32 bytes_to_copy, bytes_copied, rc;
2881	struct ipr_sdt *sdt;
2882	int valid = 1;
2883	int i;
2884
2885	ENTER;
2886
2887	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
2888
2889	if (ioa_cfg->sdt_state != GET_DUMP) {
2890		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2891		return;
2892	}
2893
2894	start_addr = readl(ioa_cfg->ioa_mailbox);
2895
2896	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
2897		dev_err(&ioa_cfg->pdev->dev,
2898			"Invalid dump table format: %lx\n", start_addr);
2899		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2900		return;
2901	}
2902
2903	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA initiated\n");
2904
2905	driver_dump->hdr.eye_catcher = IPR_DUMP_EYE_CATCHER;
2906
2907	/* Initialize the overall dump header */
2908	driver_dump->hdr.len = sizeof(struct ipr_driver_dump);
2909	driver_dump->hdr.num_entries = 1;
2910	driver_dump->hdr.first_entry_offset = sizeof(struct ipr_dump_header);
2911	driver_dump->hdr.status = IPR_DUMP_STATUS_SUCCESS;
2912	driver_dump->hdr.os = IPR_DUMP_OS_LINUX;
2913	driver_dump->hdr.driver_name = IPR_DUMP_DRIVER_NAME;
2914
2915	ipr_dump_version_data(ioa_cfg, driver_dump);
2916	ipr_dump_location_data(ioa_cfg, driver_dump);
2917	ipr_dump_ioa_type_data(ioa_cfg, driver_dump);
2918	ipr_dump_trace_data(ioa_cfg, driver_dump);
2919
2920	/* Update dump_header */
2921	driver_dump->hdr.len += sizeof(struct ipr_dump_entry_header);
2922
2923	/* IOA Dump entry */
2924	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
2925	ioa_dump->hdr.len = 0;
2926	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
2927	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
2928
2929	/* First entries in sdt are actually a list of dump addresses and
2930	 lengths to gather the real dump data.  sdt represents the pointer
2931	 to the ioa generated dump table.  Dump data will be extracted based
2932	 on entries in this table */
2933	sdt = &ioa_dump->sdt;
2934
2935	rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
2936					sizeof(struct ipr_sdt) / sizeof(__be32));
2937
2938	/* Smart Dump table is ready to use and the first entry is valid */
2939	if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
2940	    (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
2941		dev_err(&ioa_cfg->pdev->dev,
2942			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
2943			rc, be32_to_cpu(sdt->hdr.state));
2944		driver_dump->hdr.status = IPR_DUMP_STATUS_FAILED;
2945		ioa_cfg->sdt_state = DUMP_OBTAINED;
2946		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2947		return;
2948	}
2949
2950	num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
2951
2952	if (num_entries > IPR_NUM_SDT_ENTRIES)
2953		num_entries = IPR_NUM_SDT_ENTRIES;
2954
2955	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
2956
2957	for (i = 0; i < num_entries; i++) {
2958		if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
2959			driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2960			break;
2961		}
2962
2963		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
2964			sdt_word = be32_to_cpu(sdt->entry[i].start_token);
2965			if (ioa_cfg->sis64)
2966				bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
2967			else {
2968				start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
2969				end_off = be32_to_cpu(sdt->entry[i].end_token);
2970
2971				if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
2972					bytes_to_copy = end_off - start_off;
2973				else
2974					valid = 0;
2975			}
2976			if (valid) {
2977				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
2978					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
2979					continue;
2980				}
2981
2982				/* Copy data from adapter to driver buffers */
2983				bytes_copied = ipr_sdt_copy(ioa_cfg, sdt_word,
2984							    bytes_to_copy);
2985
2986				ioa_dump->hdr.len += bytes_copied;
2987
2988				if (bytes_copied != bytes_to_copy) {
2989					driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
2990					break;
2991				}
2992			}
2993		}
2994	}
2995
2996	dev_err(&ioa_cfg->pdev->dev, "Dump of IOA completed.\n");
2997
2998	/* Update dump_header */
2999	driver_dump->hdr.len += ioa_dump->hdr.len;
3000	wmb();
3001	ioa_cfg->sdt_state = DUMP_OBTAINED;
3002	LEAVE;
3003}
3004
3005#else
3006#define ipr_get_ioa_dump(ioa_cfg, dump) do { } while(0)
3007#endif
3008
3009/**
3010 * ipr_release_dump - Free adapter dump memory
3011 * @kref:	kref struct
3012 *
3013 * Return value:
3014 *	nothing
3015 **/
3016static void ipr_release_dump(struct kref *kref)
3017{
3018	struct ipr_dump *dump = container_of(kref,struct ipr_dump,kref);
3019	struct ipr_ioa_cfg *ioa_cfg = dump->ioa_cfg;
3020	unsigned long lock_flags = 0;
3021	int i;
3022
3023	ENTER;
3024	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3025	ioa_cfg->dump = NULL;
3026	ioa_cfg->sdt_state = INACTIVE;
3027	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3028
3029	for (i = 0; i < dump->ioa_dump.next_page_index; i++)
3030		free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
3031
3032	kfree(dump);
3033	LEAVE;
3034}
3035
3036/**
3037 * ipr_worker_thread - Worker thread
3038 * @work:		ioa config struct
3039 *
3040 * Called at task level from a work thread. This function takes care
3041 * of adding and removing device from the mid-layer as configuration
3042 * changes are detected by the adapter.
3043 *
3044 * Return value:
3045 * 	nothing
3046 **/
3047static void ipr_worker_thread(struct work_struct *work)
3048{
3049	unsigned long lock_flags;
3050	struct ipr_resource_entry *res;
3051	struct scsi_device *sdev;
3052	struct ipr_dump *dump;
3053	struct ipr_ioa_cfg *ioa_cfg =
3054		container_of(work, struct ipr_ioa_cfg, work_q);
3055	u8 bus, target, lun;
3056	int did_work;
3057
3058	ENTER;
3059	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3060
3061	if (ioa_cfg->sdt_state == GET_DUMP) {
3062		dump = ioa_cfg->dump;
3063		if (!dump) {
3064			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3065			return;
3066		}
3067		kref_get(&dump->kref);
3068		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3069		ipr_get_ioa_dump(ioa_cfg, dump);
3070		kref_put(&dump->kref, ipr_release_dump);
3071
3072		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3073		if (ioa_cfg->sdt_state == DUMP_OBTAINED)
3074			ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3075		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3076		return;
3077	}
3078
3079restart:
3080	do {
3081		did_work = 0;
3082		if (!ioa_cfg->allow_cmds || !ioa_cfg->allow_ml_add_del) {
3083			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3084			return;
3085		}
3086
3087		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3088			if (res->del_from_ml && res->sdev) {
3089				did_work = 1;
3090				sdev = res->sdev;
3091				if (!scsi_device_get(sdev)) {
3092					list_move_tail(&res->queue, &ioa_cfg->free_res_q);
3093					spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3094					scsi_remove_device(sdev);
3095					scsi_device_put(sdev);
3096					spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3097				}
3098				break;
3099			}
3100		}
3101	} while(did_work);
3102
3103	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
3104		if (res->add_to_ml) {
3105			bus = res->bus;
3106			target = res->target;
3107			lun = res->lun;
3108			res->add_to_ml = 0;
3109			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3110			scsi_add_device(ioa_cfg->host, bus, target, lun);
3111			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3112			goto restart;
3113		}
3114	}
3115
3116	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3117	kobject_uevent(&ioa_cfg->host->shost_dev.kobj, KOBJ_CHANGE);
3118	LEAVE;
3119}
3120
3121#ifdef CONFIG_SCSI_IPR_TRACE
3122/**
3123 * ipr_read_trace - Dump the adapter trace
3124 * @filp:		open sysfs file
3125 * @kobj:		kobject struct
3126 * @bin_attr:		bin_attribute struct
3127 * @buf:		buffer
3128 * @off:		offset
3129 * @count:		buffer size
3130 *
3131 * Return value:
3132 *	number of bytes printed to buffer
3133 **/
3134static ssize_t ipr_read_trace(struct file *filp, struct kobject *kobj,
3135			      struct bin_attribute *bin_attr,
3136			      char *buf, loff_t off, size_t count)
3137{
3138	struct device *dev = container_of(kobj, struct device, kobj);
3139	struct Scsi_Host *shost = class_to_shost(dev);
3140	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3141	unsigned long lock_flags = 0;
3142	ssize_t ret;
3143
3144	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3145	ret = memory_read_from_buffer(buf, count, &off, ioa_cfg->trace,
3146				IPR_TRACE_SIZE);
3147	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3148
3149	return ret;
3150}
3151
3152static struct bin_attribute ipr_trace_attr = {
3153	.attr =	{
3154		.name = "trace",
3155		.mode = S_IRUGO,
3156	},
3157	.size = 0,
3158	.read = ipr_read_trace,
3159};
3160#endif
3161
3162/**
3163 * ipr_show_fw_version - Show the firmware version
3164 * @dev:	class device struct
3165 * @buf:	buffer
3166 *
3167 * Return value:
3168 *	number of bytes printed to buffer
3169 **/
3170static ssize_t ipr_show_fw_version(struct device *dev,
3171				   struct device_attribute *attr, char *buf)
3172{
3173	struct Scsi_Host *shost = class_to_shost(dev);
3174	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3175	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
3176	unsigned long lock_flags = 0;
3177	int len;
3178
3179	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3180	len = snprintf(buf, PAGE_SIZE, "%02X%02X%02X%02X\n",
3181		       ucode_vpd->major_release, ucode_vpd->card_type,
3182		       ucode_vpd->minor_release[0],
3183		       ucode_vpd->minor_release[1]);
3184	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3185	return len;
3186}
3187
3188static struct device_attribute ipr_fw_version_attr = {
3189	.attr = {
3190		.name =		"fw_version",
3191		.mode =		S_IRUGO,
3192	},
3193	.show = ipr_show_fw_version,
3194};
3195
3196/**
3197 * ipr_show_log_level - Show the adapter's error logging level
3198 * @dev:	class device struct
3199 * @buf:	buffer
3200 *
3201 * Return value:
3202 * 	number of bytes printed to buffer
3203 **/
3204static ssize_t ipr_show_log_level(struct device *dev,
3205				   struct device_attribute *attr, char *buf)
3206{
3207	struct Scsi_Host *shost = class_to_shost(dev);
3208	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3209	unsigned long lock_flags = 0;
3210	int len;
3211
3212	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3213	len = snprintf(buf, PAGE_SIZE, "%d\n", ioa_cfg->log_level);
3214	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3215	return len;
3216}
3217
3218/**
3219 * ipr_store_log_level - Change the adapter's error logging level
3220 * @dev:	class device struct
3221 * @buf:	buffer
3222 *
3223 * Return value:
3224 * 	number of bytes printed to buffer
3225 **/
3226static ssize_t ipr_store_log_level(struct device *dev,
3227			           struct device_attribute *attr,
3228				   const char *buf, size_t count)
3229{
3230	struct Scsi_Host *shost = class_to_shost(dev);
3231	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3232	unsigned long lock_flags = 0;
3233
3234	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3235	ioa_cfg->log_level = simple_strtoul(buf, NULL, 10);
3236	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3237	return strlen(buf);
3238}
3239
3240static struct device_attribute ipr_log_level_attr = {
3241	.attr = {
3242		.name =		"log_level",
3243		.mode =		S_IRUGO | S_IWUSR,
3244	},
3245	.show = ipr_show_log_level,
3246	.store = ipr_store_log_level
3247};
3248
3249/**
3250 * ipr_store_diagnostics - IOA Diagnostics interface
3251 * @dev:	device struct
3252 * @buf:	buffer
3253 * @count:	buffer size
3254 *
3255 * This function will reset the adapter and wait a reasonable
3256 * amount of time for any errors that the adapter might log.
3257 *
3258 * Return value:
3259 * 	count on success / other on failure
3260 **/
3261static ssize_t ipr_store_diagnostics(struct device *dev,
3262				     struct device_attribute *attr,
3263				     const char *buf, size_t count)
3264{
3265	struct Scsi_Host *shost = class_to_shost(dev);
3266	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3267	unsigned long lock_flags = 0;
3268	int rc = count;
3269
3270	if (!capable(CAP_SYS_ADMIN))
3271		return -EACCES;
3272
3273	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3274	while(ioa_cfg->in_reset_reload) {
3275		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3276		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3277		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3278	}
3279
3280	ioa_cfg->errors_logged = 0;
3281	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3282
3283	if (ioa_cfg->in_reset_reload) {
3284		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3285		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3286
3287		/* Wait for a second for any errors to be logged */
3288		msleep(1000);
3289	} else {
3290		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3291		return -EIO;
3292	}
3293
3294	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3295	if (ioa_cfg->in_reset_reload || ioa_cfg->errors_logged)
3296		rc = -EIO;
3297	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3298
3299	return rc;
3300}
3301
3302static struct device_attribute ipr_diagnostics_attr = {
3303	.attr = {
3304		.name =		"run_diagnostics",
3305		.mode =		S_IWUSR,
3306	},
3307	.store = ipr_store_diagnostics
3308};
3309
3310/**
3311 * ipr_show_adapter_state - Show the adapter's state
3312 * @class_dev:	device struct
3313 * @buf:	buffer
3314 *
3315 * Return value:
3316 * 	number of bytes printed to buffer
3317 **/
3318static ssize_t ipr_show_adapter_state(struct device *dev,
3319				      struct device_attribute *attr, char *buf)
3320{
3321	struct Scsi_Host *shost = class_to_shost(dev);
3322	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3323	unsigned long lock_flags = 0;
3324	int len;
3325
3326	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3327	if (ioa_cfg->ioa_is_dead)
3328		len = snprintf(buf, PAGE_SIZE, "offline\n");
3329	else
3330		len = snprintf(buf, PAGE_SIZE, "online\n");
3331	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3332	return len;
3333}
3334
3335/**
3336 * ipr_store_adapter_state - Change adapter state
3337 * @dev:	device struct
3338 * @buf:	buffer
3339 * @count:	buffer size
3340 *
3341 * This function will change the adapter's state.
3342 *
3343 * Return value:
3344 * 	count on success / other on failure
3345 **/
3346static ssize_t ipr_store_adapter_state(struct device *dev,
3347				       struct device_attribute *attr,
3348				       const char *buf, size_t count)
3349{
3350	struct Scsi_Host *shost = class_to_shost(dev);
3351	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3352	unsigned long lock_flags;
3353	int result = count;
3354
3355	if (!capable(CAP_SYS_ADMIN))
3356		return -EACCES;
3357
3358	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3359	if (ioa_cfg->ioa_is_dead && !strncmp(buf, "online", 6)) {
3360		ioa_cfg->ioa_is_dead = 0;
3361		ioa_cfg->reset_retries = 0;
3362		ioa_cfg->in_ioa_bringdown = 0;
3363		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
3364	}
3365	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3366	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3367
3368	return result;
3369}
3370
3371static struct device_attribute ipr_ioa_state_attr = {
3372	.attr = {
3373		.name =		"online_state",
3374		.mode =		S_IRUGO | S_IWUSR,
3375	},
3376	.show = ipr_show_adapter_state,
3377	.store = ipr_store_adapter_state
3378};
3379
3380/**
3381 * ipr_store_reset_adapter - Reset the adapter
3382 * @dev:	device struct
3383 * @buf:	buffer
3384 * @count:	buffer size
3385 *
3386 * This function will reset the adapter.
3387 *
3388 * Return value:
3389 * 	count on success / other on failure
3390 **/
3391static ssize_t ipr_store_reset_adapter(struct device *dev,
3392				       struct device_attribute *attr,
3393				       const char *buf, size_t count)
3394{
3395	struct Scsi_Host *shost = class_to_shost(dev);
3396	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3397	unsigned long lock_flags;
3398	int result = count;
3399
3400	if (!capable(CAP_SYS_ADMIN))
3401		return -EACCES;
3402
3403	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3404	if (!ioa_cfg->in_reset_reload)
3405		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3406	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3407	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3408
3409	return result;
3410}
3411
3412static struct device_attribute ipr_ioa_reset_attr = {
3413	.attr = {
3414		.name =		"reset_host",
3415		.mode =		S_IWUSR,
3416	},
3417	.store = ipr_store_reset_adapter
3418};
3419
3420/**
3421 * ipr_alloc_ucode_buffer - Allocates a microcode download buffer
3422 * @buf_len:		buffer length
3423 *
3424 * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
3425 * list to use for microcode download
3426 *
3427 * Return value:
3428 * 	pointer to sglist / NULL on failure
3429 **/
3430static struct ipr_sglist *ipr_alloc_ucode_buffer(int buf_len)
3431{
3432	int sg_size, order, bsize_elem, num_elem, i, j;
3433	struct ipr_sglist *sglist;
3434	struct scatterlist *scatterlist;
3435	struct page *page;
3436
3437	/* Get the minimum size per scatter/gather element */
3438	sg_size = buf_len / (IPR_MAX_SGLIST - 1);
3439
3440	/* Get the actual size per element */
3441	order = get_order(sg_size);
3442
3443	/* Determine the actual number of bytes per element */
3444	bsize_elem = PAGE_SIZE * (1 << order);
3445
3446	/* Determine the actual number of sg entries needed */
3447	if (buf_len % bsize_elem)
3448		num_elem = (buf_len / bsize_elem) + 1;
3449	else
3450		num_elem = buf_len / bsize_elem;
3451
3452	/* Allocate a scatter/gather list for the DMA */
3453	sglist = kzalloc(sizeof(struct ipr_sglist) +
3454			 (sizeof(struct scatterlist) * (num_elem - 1)),
3455			 GFP_KERNEL);
3456
3457	if (sglist == NULL) {
3458		ipr_trace;
3459		return NULL;
3460	}
3461
3462	scatterlist = sglist->scatterlist;
3463	sg_init_table(scatterlist, num_elem);
3464
3465	sglist->order = order;
3466	sglist->num_sg = num_elem;
3467
3468	/* Allocate a bunch of sg elements */
3469	for (i = 0; i < num_elem; i++) {
3470		page = alloc_pages(GFP_KERNEL, order);
3471		if (!page) {
3472			ipr_trace;
3473
3474			/* Free up what we already allocated */
3475			for (j = i - 1; j >= 0; j--)
3476				__free_pages(sg_page(&scatterlist[j]), order);
3477			kfree(sglist);
3478			return NULL;
3479		}
3480
3481		sg_set_page(&scatterlist[i], page, 0, 0);
3482	}
3483
3484	return sglist;
3485}
3486
3487/**
3488 * ipr_free_ucode_buffer - Frees a microcode download buffer
3489 * @p_dnld:		scatter/gather list pointer
3490 *
3491 * Free a DMA'able ucode download buffer previously allocated with
3492 * ipr_alloc_ucode_buffer
3493 *
3494 * Return value:
3495 * 	nothing
3496 **/
3497static void ipr_free_ucode_buffer(struct ipr_sglist *sglist)
3498{
3499	int i;
3500
3501	for (i = 0; i < sglist->num_sg; i++)
3502		__free_pages(sg_page(&sglist->scatterlist[i]), sglist->order);
3503
3504	kfree(sglist);
3505}
3506
3507/**
3508 * ipr_copy_ucode_buffer - Copy user buffer to kernel buffer
3509 * @sglist:		scatter/gather list pointer
3510 * @buffer:		buffer pointer
3511 * @len:		buffer length
3512 *
3513 * Copy a microcode image from a user buffer into a buffer allocated by
3514 * ipr_alloc_ucode_buffer
3515 *
3516 * Return value:
3517 * 	0 on success / other on failure
3518 **/
3519static int ipr_copy_ucode_buffer(struct ipr_sglist *sglist,
3520				 u8 *buffer, u32 len)
3521{
3522	int bsize_elem, i, result = 0;
3523	struct scatterlist *scatterlist;
3524	void *kaddr;
3525
3526	/* Determine the actual number of bytes per element */
3527	bsize_elem = PAGE_SIZE * (1 << sglist->order);
3528
3529	scatterlist = sglist->scatterlist;
3530
3531	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
3532		struct page *page = sg_page(&scatterlist[i]);
3533
3534		kaddr = kmap(page);
3535		memcpy(kaddr, buffer, bsize_elem);
3536		kunmap(page);
3537
3538		scatterlist[i].length = bsize_elem;
3539
3540		if (result != 0) {
3541			ipr_trace;
3542			return result;
3543		}
3544	}
3545
3546	if (len % bsize_elem) {
3547		struct page *page = sg_page(&scatterlist[i]);
3548
3549		kaddr = kmap(page);
3550		memcpy(kaddr, buffer, len % bsize_elem);
3551		kunmap(page);
3552
3553		scatterlist[i].length = len % bsize_elem;
3554	}
3555
3556	sglist->buffer_len = len;
3557	return result;
3558}
3559
3560/**
3561 * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
3562 * @ipr_cmd:		ipr command struct
3563 * @sglist:		scatter/gather list
3564 *
3565 * Builds a microcode download IOA data list (IOADL).
3566 *
3567 **/
3568static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
3569				    struct ipr_sglist *sglist)
3570{
3571	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3572	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
3573	struct scatterlist *scatterlist = sglist->scatterlist;
3574	int i;
3575
3576	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3577	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3578	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3579
3580	ioarcb->ioadl_len =
3581		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
3582	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3583		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
3584		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
3585		ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
3586	}
3587
3588	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3589}
3590
3591/**
3592 * ipr_build_ucode_ioadl - Build a microcode download IOADL
3593 * @ipr_cmd:	ipr command struct
3594 * @sglist:		scatter/gather list
3595 *
3596 * Builds a microcode download IOA data list (IOADL).
3597 *
3598 **/
3599static void ipr_build_ucode_ioadl(struct ipr_cmnd *ipr_cmd,
3600				  struct ipr_sglist *sglist)
3601{
3602	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
3603	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
3604	struct scatterlist *scatterlist = sglist->scatterlist;
3605	int i;
3606
3607	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
3608	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
3609	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
3610
3611	ioarcb->ioadl_len =
3612		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
3613
3614	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
3615		ioadl[i].flags_and_data_len =
3616			cpu_to_be32(IPR_IOADL_FLAGS_WRITE | sg_dma_len(&scatterlist[i]));
3617		ioadl[i].address =
3618			cpu_to_be32(sg_dma_address(&scatterlist[i]));
3619	}
3620
3621	ioadl[i-1].flags_and_data_len |=
3622		cpu_to_be32(IPR_IOADL_FLAGS_LAST);
3623}
3624
3625/**
3626 * ipr_update_ioa_ucode - Update IOA's microcode
3627 * @ioa_cfg:	ioa config struct
3628 * @sglist:		scatter/gather list
3629 *
3630 * Initiate an adapter reset to update the IOA's microcode
3631 *
3632 * Return value:
3633 * 	0 on success / -EIO on failure
3634 **/
3635static int ipr_update_ioa_ucode(struct ipr_ioa_cfg *ioa_cfg,
3636				struct ipr_sglist *sglist)
3637{
3638	unsigned long lock_flags;
3639
3640	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3641	while(ioa_cfg->in_reset_reload) {
3642		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3643		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3644		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3645	}
3646
3647	if (ioa_cfg->ucode_sglist) {
3648		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3649		dev_err(&ioa_cfg->pdev->dev,
3650			"Microcode download already in progress\n");
3651		return -EIO;
3652	}
3653
3654	sglist->num_dma_sg = pci_map_sg(ioa_cfg->pdev, sglist->scatterlist,
3655					sglist->num_sg, DMA_TO_DEVICE);
3656
3657	if (!sglist->num_dma_sg) {
3658		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3659		dev_err(&ioa_cfg->pdev->dev,
3660			"Failed to map microcode download buffer!\n");
3661		return -EIO;
3662	}
3663
3664	ioa_cfg->ucode_sglist = sglist;
3665	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
3666	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3667	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
3668
3669	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3670	ioa_cfg->ucode_sglist = NULL;
3671	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3672	return 0;
3673}
3674
3675/**
3676 * ipr_store_update_fw - Update the firmware on the adapter
3677 * @class_dev:	device struct
3678 * @buf:	buffer
3679 * @count:	buffer size
3680 *
3681 * This function will update the firmware on the adapter.
3682 *
3683 * Return value:
3684 * 	count on success / other on failure
3685 **/
3686static ssize_t ipr_store_update_fw(struct device *dev,
3687				   struct device_attribute *attr,
3688				   const char *buf, size_t count)
3689{
3690	struct Scsi_Host *shost = class_to_shost(dev);
3691	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3692	struct ipr_ucode_image_header *image_hdr;
3693	const struct firmware *fw_entry;
3694	struct ipr_sglist *sglist;
3695	char fname[100];
3696	char *src;
3697	int len, result, dnld_size;
3698
3699	if (!capable(CAP_SYS_ADMIN))
3700		return -EACCES;
3701
3702	len = snprintf(fname, 99, "%s", buf);
3703	fname[len-1] = '\0';
3704
3705	if(request_firmware(&fw_entry, fname, &ioa_cfg->pdev->dev)) {
3706		dev_err(&ioa_cfg->pdev->dev, "Firmware file %s not found\n", fname);
3707		return -EIO;
3708	}
3709
3710	image_hdr = (struct ipr_ucode_image_header *)fw_entry->data;
3711
3712	if (be32_to_cpu(image_hdr->header_length) > fw_entry->size ||
3713	    (ioa_cfg->vpd_cbs->page3_data.card_type &&
3714	     ioa_cfg->vpd_cbs->page3_data.card_type != image_hdr->card_type)) {
3715		dev_err(&ioa_cfg->pdev->dev, "Invalid microcode buffer\n");
3716		release_firmware(fw_entry);
3717		return -EINVAL;
3718	}
3719
3720	src = (u8 *)image_hdr + be32_to_cpu(image_hdr->header_length);
3721	dnld_size = fw_entry->size - be32_to_cpu(image_hdr->header_length);
3722	sglist = ipr_alloc_ucode_buffer(dnld_size);
3723
3724	if (!sglist) {
3725		dev_err(&ioa_cfg->pdev->dev, "Microcode buffer allocation failed\n");
3726		release_firmware(fw_entry);
3727		return -ENOMEM;
3728	}
3729
3730	result = ipr_copy_ucode_buffer(sglist, src, dnld_size);
3731
3732	if (result) {
3733		dev_err(&ioa_cfg->pdev->dev,
3734			"Microcode buffer copy to DMA buffer failed\n");
3735		goto out;
3736	}
3737
3738	result = ipr_update_ioa_ucode(ioa_cfg, sglist);
3739
3740	if (!result)
3741		result = count;
3742out:
3743	ipr_free_ucode_buffer(sglist);
3744	release_firmware(fw_entry);
3745	return result;
3746}
3747
3748static struct device_attribute ipr_update_fw_attr = {
3749	.attr = {
3750		.name =		"update_fw",
3751		.mode =		S_IWUSR,
3752	},
3753	.store = ipr_store_update_fw
3754};
3755
3756static struct device_attribute *ipr_ioa_attrs[] = {
3757	&ipr_fw_version_attr,
3758	&ipr_log_level_attr,
3759	&ipr_diagnostics_attr,
3760	&ipr_ioa_state_attr,
3761	&ipr_ioa_reset_attr,
3762	&ipr_update_fw_attr,
3763	NULL,
3764};
3765
3766#ifdef CONFIG_SCSI_IPR_DUMP
3767/**
3768 * ipr_read_dump - Dump the adapter
3769 * @filp:		open sysfs file
3770 * @kobj:		kobject struct
3771 * @bin_attr:		bin_attribute struct
3772 * @buf:		buffer
3773 * @off:		offset
3774 * @count:		buffer size
3775 *
3776 * Return value:
3777 *	number of bytes printed to buffer
3778 **/
3779static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
3780			     struct bin_attribute *bin_attr,
3781			     char *buf, loff_t off, size_t count)
3782{
3783	struct device *cdev = container_of(kobj, struct device, kobj);
3784	struct Scsi_Host *shost = class_to_shost(cdev);
3785	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3786	struct ipr_dump *dump;
3787	unsigned long lock_flags = 0;
3788	char *src;
3789	int len;
3790	size_t rc = count;
3791
3792	if (!capable(CAP_SYS_ADMIN))
3793		return -EACCES;
3794
3795	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3796	dump = ioa_cfg->dump;
3797
3798	if (ioa_cfg->sdt_state != DUMP_OBTAINED || !dump) {
3799		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3800		return 0;
3801	}
3802	kref_get(&dump->kref);
3803	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3804
3805	if (off > dump->driver_dump.hdr.len) {
3806		kref_put(&dump->kref, ipr_release_dump);
3807		return 0;
3808	}
3809
3810	if (off + count > dump->driver_dump.hdr.len) {
3811		count = dump->driver_dump.hdr.len - off;
3812		rc = count;
3813	}
3814
3815	if (count && off < sizeof(dump->driver_dump)) {
3816		if (off + count > sizeof(dump->driver_dump))
3817			len = sizeof(dump->driver_dump) - off;
3818		else
3819			len = count;
3820		src = (u8 *)&dump->driver_dump + off;
3821		memcpy(buf, src, len);
3822		buf += len;
3823		off += len;
3824		count -= len;
3825	}
3826
3827	off -= sizeof(dump->driver_dump);
3828
3829	if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
3830		if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
3831			len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
3832		else
3833			len = count;
3834		src = (u8 *)&dump->ioa_dump + off;
3835		memcpy(buf, src, len);
3836		buf += len;
3837		off += len;
3838		count -= len;
3839	}
3840
3841	off -= offsetof(struct ipr_ioa_dump, ioa_data);
3842
3843	while (count) {
3844		if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
3845			len = PAGE_ALIGN(off) - off;
3846		else
3847			len = count;
3848		src = (u8 *)dump->ioa_dump.ioa_data[(off & PAGE_MASK) >> PAGE_SHIFT];
3849		src += off & ~PAGE_MASK;
3850		memcpy(buf, src, len);
3851		buf += len;
3852		off += len;
3853		count -= len;
3854	}
3855
3856	kref_put(&dump->kref, ipr_release_dump);
3857	return rc;
3858}
3859
3860/**
3861 * ipr_alloc_dump - Prepare for adapter dump
3862 * @ioa_cfg:	ioa config struct
3863 *
3864 * Return value:
3865 *	0 on success / other on failure
3866 **/
3867static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
3868{
3869	struct ipr_dump *dump;
3870	unsigned long lock_flags = 0;
3871
3872	dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
3873
3874	if (!dump) {
3875		ipr_err("Dump memory allocation failed\n");
3876		return -ENOMEM;
3877	}
3878
3879	kref_init(&dump->kref);
3880	dump->ioa_cfg = ioa_cfg;
3881
3882	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3883
3884	if (INACTIVE != ioa_cfg->sdt_state) {
3885		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3886		kfree(dump);
3887		return 0;
3888	}
3889
3890	ioa_cfg->dump = dump;
3891	ioa_cfg->sdt_state = WAIT_FOR_DUMP;
3892	if (ioa_cfg->ioa_is_dead && !ioa_cfg->dump_taken) {
3893		ioa_cfg->dump_taken = 1;
3894		schedule_work(&ioa_cfg->work_q);
3895	}
3896	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3897
3898	return 0;
3899}
3900
3901/**
3902 * ipr_free_dump - Free adapter dump memory
3903 * @ioa_cfg:	ioa config struct
3904 *
3905 * Return value:
3906 *	0 on success / other on failure
3907 **/
3908static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg)
3909{
3910	struct ipr_dump *dump;
3911	unsigned long lock_flags = 0;
3912
3913	ENTER;
3914
3915	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
3916	dump = ioa_cfg->dump;
3917	if (!dump) {
3918		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3919		return 0;
3920	}
3921
3922	ioa_cfg->dump = NULL;
3923	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
3924
3925	kref_put(&dump->kref, ipr_release_dump);
3926
3927	LEAVE;
3928	return 0;
3929}
3930
3931/**
3932 * ipr_write_dump - Setup dump state of adapter
3933 * @filp:		open sysfs file
3934 * @kobj:		kobject struct
3935 * @bin_attr:		bin_attribute struct
3936 * @buf:		buffer
3937 * @off:		offset
3938 * @count:		buffer size
3939 *
3940 * Return value:
3941 *	number of bytes printed to buffer
3942 **/
3943static ssize_t ipr_write_dump(struct file *filp, struct kobject *kobj,
3944			      struct bin_attribute *bin_attr,
3945			      char *buf, loff_t off, size_t count)
3946{
3947	struct device *cdev = container_of(kobj, struct device, kobj);
3948	struct Scsi_Host *shost = class_to_shost(cdev);
3949	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
3950	int rc;
3951
3952	if (!capable(CAP_SYS_ADMIN))
3953		return -EACCES;
3954
3955	if (buf[0] == '1')
3956		rc = ipr_alloc_dump(ioa_cfg);
3957	else if (buf[0] == '0')
3958		rc = ipr_free_dump(ioa_cfg);
3959	else
3960		return -EINVAL;
3961
3962	if (rc)
3963		return rc;
3964	else
3965		return count;
3966}
3967
3968static struct bin_attribute ipr_dump_attr = {
3969	.attr =	{
3970		.name = "dump",
3971		.mode = S_IRUSR | S_IWUSR,
3972	},
3973	.size = 0,
3974	.read = ipr_read_dump,
3975	.write = ipr_write_dump
3976};
3977#else
3978static int ipr_free_dump(struct ipr_ioa_cfg *ioa_cfg) { return 0; };
3979#endif
3980
3981/**
3982 * ipr_change_queue_depth - Change the device's queue depth
3983 * @sdev:	scsi device struct
3984 * @qdepth:	depth to set
3985 * @reason:	calling context
3986 *
3987 * Return value:
3988 * 	actual depth set
3989 **/
3990static int ipr_change_queue_depth(struct scsi_device *sdev, int qdepth,
3991				  int reason)
3992{
3993	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
3994	struct ipr_resource_entry *res;
3995	unsigned long lock_flags = 0;
3996
3997	if (reason != SCSI_QDEPTH_DEFAULT)
3998		return -EOPNOTSUPP;
3999
4000	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4001	res = (struct ipr_resource_entry *)sdev->hostdata;
4002
4003	if (res && ipr_is_gata(res) && qdepth > IPR_MAX_CMD_PER_ATA_LUN)
4004		qdepth = IPR_MAX_CMD_PER_ATA_LUN;
4005	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4006
4007	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
4008	return sdev->queue_depth;
4009}
4010
4011/**
4012 * ipr_change_queue_type - Change the device's queue type
4013 * @dsev:		scsi device struct
4014 * @tag_type:	type of tags to use
4015 *
4016 * Return value:
4017 * 	actual queue type set
4018 **/
4019static int ipr_change_queue_type(struct scsi_device *sdev, int tag_type)
4020{
4021	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4022	struct ipr_resource_entry *res;
4023	unsigned long lock_flags = 0;
4024
4025	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4026	res = (struct ipr_resource_entry *)sdev->hostdata;
4027
4028	if (res) {
4029		if (ipr_is_gscsi(res) && sdev->tagged_supported) {
4030			/*
4031			 * We don't bother quiescing the device here since the
4032			 * adapter firmware does it for us.
4033			 */
4034			scsi_set_tag_type(sdev, tag_type);
4035
4036			if (tag_type)
4037				scsi_activate_tcq(sdev, sdev->queue_depth);
4038			else
4039				scsi_deactivate_tcq(sdev, sdev->queue_depth);
4040		} else
4041			tag_type = 0;
4042	} else
4043		tag_type = 0;
4044
4045	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4046	return tag_type;
4047}
4048
4049/**
4050 * ipr_show_adapter_handle - Show the adapter's resource handle for this device
4051 * @dev:	device struct
4052 * @buf:	buffer
4053 *
4054 * Return value:
4055 * 	number of bytes printed to buffer
4056 **/
4057static ssize_t ipr_show_adapter_handle(struct device *dev, struct device_attribute *attr, char *buf)
4058{
4059	struct scsi_device *sdev = to_scsi_device(dev);
4060	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4061	struct ipr_resource_entry *res;
4062	unsigned long lock_flags = 0;
4063	ssize_t len = -ENXIO;
4064
4065	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4066	res = (struct ipr_resource_entry *)sdev->hostdata;
4067	if (res)
4068		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
4069	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4070	return len;
4071}
4072
4073static struct device_attribute ipr_adapter_handle_attr = {
4074	.attr = {
4075		.name = 	"adapter_handle",
4076		.mode =		S_IRUSR,
4077	},
4078	.show = ipr_show_adapter_handle
4079};
4080
4081/**
4082 * ipr_show_resource_path - Show the resource path for this device.
4083 * @dev:	device struct
4084 * @buf:	buffer
4085 *
4086 * Return value:
4087 * 	number of bytes printed to buffer
4088 **/
4089static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
4090{
4091	struct scsi_device *sdev = to_scsi_device(dev);
4092	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
4093	struct ipr_resource_entry *res;
4094	unsigned long lock_flags = 0;
4095	ssize_t len = -ENXIO;
4096	char buffer[IPR_MAX_RES_PATH_LENGTH];
4097
4098	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4099	res = (struct ipr_resource_entry *)sdev->hostdata;
4100	if (res)
4101		len = snprintf(buf, PAGE_SIZE, "%s\n",
4102			       ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4103	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4104	return len;
4105}
4106
4107static struct device_attribute ipr_resource_path_attr = {
4108	.attr = {
4109		.name = 	"resource_path",
4110		.mode =		S_IRUSR,
4111	},
4112	.show = ipr_show_resource_path
4113};
4114
4115static struct device_attribute *ipr_dev_attrs[] = {
4116	&ipr_adapter_handle_attr,
4117	&ipr_resource_path_attr,
4118	NULL,
4119};
4120
4121/**
4122 * ipr_biosparam - Return the HSC mapping
4123 * @sdev:			scsi device struct
4124 * @block_device:	block device pointer
4125 * @capacity:		capacity of the device
4126 * @parm:			Array containing returned HSC values.
4127 *
4128 * This function generates the HSC parms that fdisk uses.
4129 * We want to make sure we return something that places partitions
4130 * on 4k boundaries for best performance with the IOA.
4131 *
4132 * Return value:
4133 * 	0 on success
4134 **/
4135static int ipr_biosparam(struct scsi_device *sdev,
4136			 struct block_device *block_device,
4137			 sector_t capacity, int *parm)
4138{
4139	int heads, sectors;
4140	sector_t cylinders;
4141
4142	heads = 128;
4143	sectors = 32;
4144
4145	cylinders = capacity;
4146	sector_div(cylinders, (128 * 32));
4147
4148	/* return result */
4149	parm[0] = heads;
4150	parm[1] = sectors;
4151	parm[2] = cylinders;
4152
4153	return 0;
4154}
4155
4156/**
4157 * ipr_find_starget - Find target based on bus/target.
4158 * @starget:	scsi target struct
4159 *
4160 * Return value:
4161 * 	resource entry pointer if found / NULL if not found
4162 **/
4163static struct ipr_resource_entry *ipr_find_starget(struct scsi_target *starget)
4164{
4165	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4166	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4167	struct ipr_resource_entry *res;
4168
4169	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4170		if ((res->bus == starget->channel) &&
4171		    (res->target == starget->id) &&
4172		    (res->lun == 0)) {
4173			return res;
4174		}
4175	}
4176
4177	return NULL;
4178}
4179
4180static struct ata_port_info sata_port_info;
4181
4182/**
4183 * ipr_target_alloc - Prepare for commands to a SCSI target
4184 * @starget:	scsi target struct
4185 *
4186 * If the device is a SATA device, this function allocates an
4187 * ATA port with libata, else it does nothing.
4188 *
4189 * Return value:
4190 * 	0 on success / non-0 on failure
4191 **/
4192static int ipr_target_alloc(struct scsi_target *starget)
4193{
4194	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4195	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4196	struct ipr_sata_port *sata_port;
4197	struct ata_port *ap;
4198	struct ipr_resource_entry *res;
4199	unsigned long lock_flags;
4200
4201	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4202	res = ipr_find_starget(starget);
4203	starget->hostdata = NULL;
4204
4205	if (res && ipr_is_gata(res)) {
4206		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4207		sata_port = kzalloc(sizeof(*sata_port), GFP_KERNEL);
4208		if (!sata_port)
4209			return -ENOMEM;
4210
4211		ap = ata_sas_port_alloc(&ioa_cfg->ata_host, &sata_port_info, shost);
4212		if (ap) {
4213			spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4214			sata_port->ioa_cfg = ioa_cfg;
4215			sata_port->ap = ap;
4216			sata_port->res = res;
4217
4218			res->sata_port = sata_port;
4219			ap->private_data = sata_port;
4220			starget->hostdata = sata_port;
4221		} else {
4222			kfree(sata_port);
4223			return -ENOMEM;
4224		}
4225	}
4226	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4227
4228	return 0;
4229}
4230
4231/**
4232 * ipr_target_destroy - Destroy a SCSI target
4233 * @starget:	scsi target struct
4234 *
4235 * If the device was a SATA device, this function frees the libata
4236 * ATA port, else it does nothing.
4237 *
4238 **/
4239static void ipr_target_destroy(struct scsi_target *starget)
4240{
4241	struct ipr_sata_port *sata_port = starget->hostdata;
4242	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
4243	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
4244
4245	if (ioa_cfg->sis64) {
4246		if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
4247			clear_bit(starget->id, ioa_cfg->array_ids);
4248		else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
4249			clear_bit(starget->id, ioa_cfg->vset_ids);
4250		else if (starget->channel == 0)
4251			clear_bit(starget->id, ioa_cfg->target_ids);
4252	}
4253
4254	if (sata_port) {
4255		starget->hostdata = NULL;
4256		ata_sas_port_destroy(sata_port->ap);
4257		kfree(sata_port);
4258	}
4259}
4260
4261/**
4262 * ipr_find_sdev - Find device based on bus/target/lun.
4263 * @sdev:	scsi device struct
4264 *
4265 * Return value:
4266 * 	resource entry pointer if found / NULL if not found
4267 **/
4268static struct ipr_resource_entry *ipr_find_sdev(struct scsi_device *sdev)
4269{
4270	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4271	struct ipr_resource_entry *res;
4272
4273	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4274		if ((res->bus == sdev->channel) &&
4275		    (res->target == sdev->id) &&
4276		    (res->lun == sdev->lun))
4277			return res;
4278	}
4279
4280	return NULL;
4281}
4282
4283/**
4284 * ipr_slave_destroy - Unconfigure a SCSI device
4285 * @sdev:	scsi device struct
4286 *
4287 * Return value:
4288 * 	nothing
4289 **/
4290static void ipr_slave_destroy(struct scsi_device *sdev)
4291{
4292	struct ipr_resource_entry *res;
4293	struct ipr_ioa_cfg *ioa_cfg;
4294	unsigned long lock_flags = 0;
4295
4296	ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4297
4298	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4299	res = (struct ipr_resource_entry *) sdev->hostdata;
4300	if (res) {
4301		if (res->sata_port)
4302			res->sata_port->ap->link.device[0].class = ATA_DEV_NONE;
4303		sdev->hostdata = NULL;
4304		res->sdev = NULL;
4305		res->sata_port = NULL;
4306	}
4307	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4308}
4309
4310/**
4311 * ipr_slave_configure - Configure a SCSI device
4312 * @sdev:	scsi device struct
4313 *
4314 * This function configures the specified scsi device.
4315 *
4316 * Return value:
4317 * 	0 on success
4318 **/
4319static int ipr_slave_configure(struct scsi_device *sdev)
4320{
4321	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4322	struct ipr_resource_entry *res;
4323	struct ata_port *ap = NULL;
4324	unsigned long lock_flags = 0;
4325	char buffer[IPR_MAX_RES_PATH_LENGTH];
4326
4327	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4328	res = sdev->hostdata;
4329	if (res) {
4330		if (ipr_is_af_dasd_device(res))
4331			sdev->type = TYPE_RAID;
4332		if (ipr_is_af_dasd_device(res) || ipr_is_ioa_resource(res)) {
4333			sdev->scsi_level = 4;
4334			sdev->no_uld_attach = 1;
4335		}
4336		if (ipr_is_vset_device(res)) {
4337			blk_queue_rq_timeout(sdev->request_queue,
4338					     IPR_VSET_RW_TIMEOUT);
4339			blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS);
4340		}
4341		if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res))
4342			sdev->allow_restart = 1;
4343		if (ipr_is_gata(res) && res->sata_port)
4344			ap = res->sata_port->ap;
4345		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4346
4347		if (ap) {
4348			scsi_adjust_queue_depth(sdev, 0, IPR_MAX_CMD_PER_ATA_LUN);
4349			ata_sas_slave_configure(sdev, ap);
4350		} else
4351			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
4352		if (ioa_cfg->sis64)
4353			sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
4354			            ipr_format_resource_path(&res->res_path[0], &buffer[0]));
4355		return 0;
4356	}
4357	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4358	return 0;
4359}
4360
4361/**
4362 * ipr_ata_slave_alloc - Prepare for commands to a SATA device
4363 * @sdev:	scsi device struct
4364 *
4365 * This function initializes an ATA port so that future commands
4366 * sent through queuecommand will work.
4367 *
4368 * Return value:
4369 * 	0 on success
4370 **/
4371static int ipr_ata_slave_alloc(struct scsi_device *sdev)
4372{
4373	struct ipr_sata_port *sata_port = NULL;
4374	int rc = -ENXIO;
4375
4376	ENTER;
4377	if (sdev->sdev_target)
4378		sata_port = sdev->sdev_target->hostdata;
4379	if (sata_port)
4380		rc = ata_sas_port_init(sata_port->ap);
4381	if (rc)
4382		ipr_slave_destroy(sdev);
4383
4384	LEAVE;
4385	return rc;
4386}
4387
4388/**
4389 * ipr_slave_alloc - Prepare for commands to a device.
4390 * @sdev:	scsi device struct
4391 *
4392 * This function saves a pointer to the resource entry
4393 * in the scsi device struct if the device exists. We
4394 * can then use this pointer in ipr_queuecommand when
4395 * handling new commands.
4396 *
4397 * Return value:
4398 * 	0 on success / -ENXIO if device does not exist
4399 **/
4400static int ipr_slave_alloc(struct scsi_device *sdev)
4401{
4402	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) sdev->host->hostdata;
4403	struct ipr_resource_entry *res;
4404	unsigned long lock_flags;
4405	int rc = -ENXIO;
4406
4407	sdev->hostdata = NULL;
4408
4409	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4410
4411	res = ipr_find_sdev(sdev);
4412	if (res) {
4413		res->sdev = sdev;
4414		res->add_to_ml = 0;
4415		res->in_erp = 0;
4416		sdev->hostdata = res;
4417		if (!ipr_is_naca_model(res))
4418			res->needs_sync_complete = 1;
4419		rc = 0;
4420		if (ipr_is_gata(res)) {
4421			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4422			return ipr_ata_slave_alloc(sdev);
4423		}
4424	}
4425
4426	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4427
4428	return rc;
4429}
4430
4431/**
4432 * ipr_eh_host_reset - Reset the host adapter
4433 * @scsi_cmd:	scsi command struct
4434 *
4435 * Return value:
4436 * 	SUCCESS / FAILED
4437 **/
4438static int __ipr_eh_host_reset(struct scsi_cmnd * scsi_cmd)
4439{
4440	struct ipr_ioa_cfg *ioa_cfg;
4441	int rc;
4442
4443	ENTER;
4444	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4445
4446	dev_err(&ioa_cfg->pdev->dev,
4447		"Adapter being reset as a result of error recovery.\n");
4448
4449	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4450		ioa_cfg->sdt_state = GET_DUMP;
4451
4452	rc = ipr_reset_reload(ioa_cfg, IPR_SHUTDOWN_ABBREV);
4453
4454	LEAVE;
4455	return rc;
4456}
4457
4458static int ipr_eh_host_reset(struct scsi_cmnd * cmd)
4459{
4460	int rc;
4461
4462	spin_lock_irq(cmd->device->host->host_lock);
4463	rc = __ipr_eh_host_reset(cmd);
4464	spin_unlock_irq(cmd->device->host->host_lock);
4465
4466	return rc;
4467}
4468
4469/**
4470 * ipr_device_reset - Reset the device
4471 * @ioa_cfg:	ioa config struct
4472 * @res:		resource entry struct
4473 *
4474 * This function issues a device reset to the affected device.
4475 * If the device is a SCSI device, a LUN reset will be sent
4476 * to the device first. If that does not work, a target reset
4477 * will be sent. If the device is a SATA device, a PHY reset will
4478 * be sent.
4479 *
4480 * Return value:
4481 *	0 on success / non-zero on failure
4482 **/
4483static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg,
4484			    struct ipr_resource_entry *res)
4485{
4486	struct ipr_cmnd *ipr_cmd;
4487	struct ipr_ioarcb *ioarcb;
4488	struct ipr_cmd_pkt *cmd_pkt;
4489	struct ipr_ioarcb_ata_regs *regs;
4490	u32 ioasc;
4491
4492	ENTER;
4493	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4494	ioarcb = &ipr_cmd->ioarcb;
4495	cmd_pkt = &ioarcb->cmd_pkt;
4496
4497	if (ipr_cmd->ioa_cfg->sis64) {
4498		regs = &ipr_cmd->i.ata_ioadl.regs;
4499		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
4500	} else
4501		regs = &ioarcb->u.add_data.u.regs;
4502
4503	ioarcb->res_handle = res->res_handle;
4504	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4505	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4506	if (ipr_is_gata(res)) {
4507		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
4508		ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
4509		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
4510	}
4511
4512	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4513	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4514	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4515	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
4516		if (ipr_cmd->ioa_cfg->sis64)
4517			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
4518			       sizeof(struct ipr_ioasa_gata));
4519		else
4520			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
4521			       sizeof(struct ipr_ioasa_gata));
4522	}
4523
4524	LEAVE;
4525	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
4526}
4527
4528/**
4529 * ipr_sata_reset - Reset the SATA port
4530 * @link:	SATA link to reset
4531 * @classes:	class of the attached device
4532 *
4533 * This function issues a SATA phy reset to the affected ATA link.
4534 *
4535 * Return value:
4536 *	0 on success / non-zero on failure
4537 **/
4538static int ipr_sata_reset(struct ata_link *link, unsigned int *classes,
4539				unsigned long deadline)
4540{
4541	struct ipr_sata_port *sata_port = link->ap->private_data;
4542	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
4543	struct ipr_resource_entry *res;
4544	unsigned long lock_flags = 0;
4545	int rc = -ENXIO;
4546
4547	ENTER;
4548	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4549	while(ioa_cfg->in_reset_reload) {
4550		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4551		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
4552		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4553	}
4554
4555	res = sata_port->res;
4556	if (res) {
4557		rc = ipr_device_reset(ioa_cfg, res);
4558		*classes = res->ata_class;
4559	}
4560
4561	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4562	LEAVE;
4563	return rc;
4564}
4565
4566/**
4567 * ipr_eh_dev_reset - Reset the device
4568 * @scsi_cmd:	scsi command struct
4569 *
4570 * This function issues a device reset to the affected device.
4571 * A LUN reset will be sent to the device first. If that does
4572 * not work, a target reset will be sent.
4573 *
4574 * Return value:
4575 *	SUCCESS / FAILED
4576 **/
4577static int __ipr_eh_dev_reset(struct scsi_cmnd * scsi_cmd)
4578{
4579	struct ipr_cmnd *ipr_cmd;
4580	struct ipr_ioa_cfg *ioa_cfg;
4581	struct ipr_resource_entry *res;
4582	struct ata_port *ap;
4583	int rc = 0;
4584
4585	ENTER;
4586	ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
4587	res = scsi_cmd->device->hostdata;
4588
4589	if (!res)
4590		return FAILED;
4591
4592	/*
4593	 * If we are currently going through reset/reload, return failed. This will force the
4594	 * mid-layer to call ipr_eh_host_reset, which will then go to sleep and wait for the
4595	 * reset to complete
4596	 */
4597	if (ioa_cfg->in_reset_reload)
4598		return FAILED;
4599	if (ioa_cfg->ioa_is_dead)
4600		return FAILED;
4601
4602	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4603		if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4604			if (ipr_cmd->scsi_cmd)
4605				ipr_cmd->done = ipr_scsi_eh_done;
4606			if (ipr_cmd->qc)
4607				ipr_cmd->done = ipr_sata_eh_done;
4608			if (ipr_cmd->qc && !(ipr_cmd->qc->flags & ATA_QCFLAG_FAILED)) {
4609				ipr_cmd->qc->err_mask |= AC_ERR_TIMEOUT;
4610				ipr_cmd->qc->flags |= ATA_QCFLAG_FAILED;
4611			}
4612		}
4613	}
4614
4615	res->resetting_device = 1;
4616	scmd_printk(KERN_ERR, scsi_cmd, "Resetting device\n");
4617
4618	if (ipr_is_gata(res) && res->sata_port) {
4619		ap = res->sata_port->ap;
4620		spin_unlock_irq(scsi_cmd->device->host->host_lock);
4621		ata_std_error_handler(ap);
4622		spin_lock_irq(scsi_cmd->device->host->host_lock);
4623
4624		list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4625			if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
4626				rc = -EIO;
4627				break;
4628			}
4629		}
4630	} else
4631		rc = ipr_device_reset(ioa_cfg, res);
4632	res->resetting_device = 0;
4633
4634	LEAVE;
4635	return (rc ? FAILED : SUCCESS);
4636}
4637
4638static int ipr_eh_dev_reset(struct scsi_cmnd * cmd)
4639{
4640	int rc;
4641
4642	spin_lock_irq(cmd->device->host->host_lock);
4643	rc = __ipr_eh_dev_reset(cmd);
4644	spin_unlock_irq(cmd->device->host->host_lock);
4645
4646	return rc;
4647}
4648
4649/**
4650 * ipr_bus_reset_done - Op done function for bus reset.
4651 * @ipr_cmd:	ipr command struct
4652 *
4653 * This function is the op done function for a bus reset
4654 *
4655 * Return value:
4656 * 	none
4657 **/
4658static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd)
4659{
4660	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4661	struct ipr_resource_entry *res;
4662
4663	ENTER;
4664	if (!ioa_cfg->sis64)
4665		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
4666			if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
4667				scsi_report_bus_reset(ioa_cfg->host, res->bus);
4668				break;
4669			}
4670		}
4671
4672	/*
4673	 * If abort has not completed, indicate the reset has, else call the
4674	 * abort's done function to wake the sleeping eh thread
4675	 */
4676	if (ipr_cmd->sibling->sibling)
4677		ipr_cmd->sibling->sibling = NULL;
4678	else
4679		ipr_cmd->sibling->done(ipr_cmd->sibling);
4680
4681	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4682	LEAVE;
4683}
4684
4685/**
4686 * ipr_abort_timeout - An abort task has timed out
4687 * @ipr_cmd:	ipr command struct
4688 *
4689 * This function handles when an abort task times out. If this
4690 * happens we issue a bus reset since we have resources tied
4691 * up that must be freed before returning to the midlayer.
4692 *
4693 * Return value:
4694 *	none
4695 **/
4696static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd)
4697{
4698	struct ipr_cmnd *reset_cmd;
4699	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
4700	struct ipr_cmd_pkt *cmd_pkt;
4701	unsigned long lock_flags = 0;
4702
4703	ENTER;
4704	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4705	if (ipr_cmd->completion.done || ioa_cfg->in_reset_reload) {
4706		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4707		return;
4708	}
4709
4710	sdev_printk(KERN_ERR, ipr_cmd->u.sdev, "Abort timed out. Resetting bus.\n");
4711	reset_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4712	ipr_cmd->sibling = reset_cmd;
4713	reset_cmd->sibling = ipr_cmd;
4714	reset_cmd->ioarcb.res_handle = ipr_cmd->ioarcb.res_handle;
4715	cmd_pkt = &reset_cmd->ioarcb.cmd_pkt;
4716	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4717	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
4718	cmd_pkt->cdb[2] = IPR_RESET_TYPE_SELECT | IPR_BUS_RESET;
4719
4720	ipr_do_req(reset_cmd, ipr_bus_reset_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
4721	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4722	LEAVE;
4723}
4724
4725/**
4726 * ipr_cancel_op - Cancel specified op
4727 * @scsi_cmd:	scsi command struct
4728 *
4729 * This function cancels specified op.
4730 *
4731 * Return value:
4732 *	SUCCESS / FAILED
4733 **/
4734static int ipr_cancel_op(struct scsi_cmnd * scsi_cmd)
4735{
4736	struct ipr_cmnd *ipr_cmd;
4737	struct ipr_ioa_cfg *ioa_cfg;
4738	struct ipr_resource_entry *res;
4739	struct ipr_cmd_pkt *cmd_pkt;
4740	u32 ioasc;
4741	int op_found = 0;
4742
4743	ENTER;
4744	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
4745	res = scsi_cmd->device->hostdata;
4746
4747	/* If we are currently going through reset/reload, return failed.
4748	 * This will force the mid-layer to call ipr_eh_host_reset,
4749	 * which will then go to sleep and wait for the reset to complete
4750	 */
4751	if (ioa_cfg->in_reset_reload || ioa_cfg->ioa_is_dead)
4752		return FAILED;
4753	if (!res || !ipr_is_gscsi(res))
4754		return FAILED;
4755
4756	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
4757		if (ipr_cmd->scsi_cmd == scsi_cmd) {
4758			ipr_cmd->done = ipr_scsi_eh_done;
4759			op_found = 1;
4760			break;
4761		}
4762	}
4763
4764	if (!op_found)
4765		return SUCCESS;
4766
4767	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
4768	ipr_cmd->ioarcb.res_handle = res->res_handle;
4769	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
4770	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
4771	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
4772	ipr_cmd->u.sdev = scsi_cmd->device;
4773
4774	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
4775		    scsi_cmd->cmnd[0]);
4776	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
4777	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4778
4779	/*
4780	 * If the abort task timed out and we sent a bus reset, we will get
4781	 * one the following responses to the abort
4782	 */
4783	if (ioasc == IPR_IOASC_BUS_WAS_RESET || ioasc == IPR_IOASC_SYNC_REQUIRED) {
4784		ioasc = 0;
4785		ipr_trace;
4786	}
4787
4788	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4789	if (!ipr_is_naca_model(res))
4790		res->needs_sync_complete = 1;
4791
4792	LEAVE;
4793	return (IPR_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS);
4794}
4795
4796/**
4797 * ipr_eh_abort - Abort a single op
4798 * @scsi_cmd:	scsi command struct
4799 *
4800 * Return value:
4801 * 	SUCCESS / FAILED
4802 **/
4803static int ipr_eh_abort(struct scsi_cmnd * scsi_cmd)
4804{
4805	unsigned long flags;
4806	int rc;
4807
4808	ENTER;
4809
4810	spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
4811	rc = ipr_cancel_op(scsi_cmd);
4812	spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
4813
4814	LEAVE;
4815	return rc;
4816}
4817
4818/**
4819 * ipr_handle_other_interrupt - Handle "other" interrupts
4820 * @ioa_cfg:	ioa config struct
4821 * @int_reg:	interrupt register
4822 *
4823 * Return value:
4824 * 	IRQ_NONE / IRQ_HANDLED
4825 **/
4826static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
4827					      volatile u32 int_reg)
4828{
4829	irqreturn_t rc = IRQ_HANDLED;
4830
4831	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
4832		/* Mask the interrupt */
4833		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.set_interrupt_mask_reg);
4834
4835		/* Clear the interrupt */
4836		writel(IPR_PCII_IOA_TRANS_TO_OPER, ioa_cfg->regs.clr_interrupt_reg);
4837		int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
4838
4839		list_del(&ioa_cfg->reset_cmd->queue);
4840		del_timer(&ioa_cfg->reset_cmd->timer);
4841		ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4842	} else {
4843		if (int_reg & IPR_PCII_IOA_UNIT_CHECKED)
4844			ioa_cfg->ioa_unit_checked = 1;
4845		else
4846			dev_err(&ioa_cfg->pdev->dev,
4847				"Permanent IOA failure. 0x%08X\n", int_reg);
4848
4849		if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4850			ioa_cfg->sdt_state = GET_DUMP;
4851
4852		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
4853		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4854	}
4855
4856	return rc;
4857}
4858
4859/**
4860 * ipr_isr_eh - Interrupt service routine error handler
4861 * @ioa_cfg:	ioa config struct
4862 * @msg:	message to log
4863 *
4864 * Return value:
4865 * 	none
4866 **/
4867static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
4868{
4869	ioa_cfg->errors_logged++;
4870	dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
4871
4872	if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
4873		ioa_cfg->sdt_state = GET_DUMP;
4874
4875	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
4876}
4877
4878/**
4879 * ipr_isr - Interrupt service routine
4880 * @irq:	irq number
4881 * @devp:	pointer to ioa config struct
4882 *
4883 * Return value:
4884 * 	IRQ_NONE / IRQ_HANDLED
4885 **/
4886static irqreturn_t ipr_isr(int irq, void *devp)
4887{
4888	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
4889	unsigned long lock_flags = 0;
4890	volatile u32 int_reg, int_mask_reg;
4891	u32 ioasc;
4892	u16 cmd_index;
4893	int num_hrrq = 0;
4894	struct ipr_cmnd *ipr_cmd;
4895	irqreturn_t rc = IRQ_NONE;
4896
4897	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
4898
4899	/* If interrupts are disabled, ignore the interrupt */
4900	if (!ioa_cfg->allow_interrupts) {
4901		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4902		return IRQ_NONE;
4903	}
4904
4905	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
4906	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4907
4908	/* If an interrupt on the adapter did not occur, ignore it.
4909	 * Or in the case of SIS 64, check for a stage change interrupt.
4910	 */
4911	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
4912		if (ioa_cfg->sis64) {
4913			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
4914			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4915			if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
4916
4917				/* clear stage change */
4918				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
4919				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
4920				list_del(&ioa_cfg->reset_cmd->queue);
4921				del_timer(&ioa_cfg->reset_cmd->timer);
4922				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
4923				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4924				return IRQ_HANDLED;
4925			}
4926		}
4927
4928		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4929		return IRQ_NONE;
4930	}
4931
4932	while (1) {
4933		ipr_cmd = NULL;
4934
4935		while ((be32_to_cpu(*ioa_cfg->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) ==
4936		       ioa_cfg->toggle_bit) {
4937
4938			cmd_index = (be32_to_cpu(*ioa_cfg->hrrq_curr) &
4939				     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
4940
4941			if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
4942				ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
4943				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4944				return IRQ_HANDLED;
4945			}
4946
4947			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
4948
4949			ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
4950
4951			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
4952
4953			list_del(&ipr_cmd->queue);
4954			del_timer(&ipr_cmd->timer);
4955			ipr_cmd->done(ipr_cmd);
4956
4957			rc = IRQ_HANDLED;
4958
4959			if (ioa_cfg->hrrq_curr < ioa_cfg->hrrq_end) {
4960				ioa_cfg->hrrq_curr++;
4961			} else {
4962				ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
4963				ioa_cfg->toggle_bit ^= 1u;
4964			}
4965		}
4966
4967		if (ipr_cmd != NULL) {
4968			/* Clear the PCI interrupt */
4969			do {
4970				writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
4971				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
4972			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
4973					num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
4974
4975			if (int_reg & IPR_PCII_HRRQ_UPDATED) {
4976				ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
4977				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4978				return IRQ_HANDLED;
4979			}
4980
4981		} else
4982			break;
4983	}
4984
4985	if (unlikely(rc == IRQ_NONE))
4986		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
4987
4988	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
4989	return rc;
4990}
4991
4992/**
4993 * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
4994 * @ioa_cfg:	ioa config struct
4995 * @ipr_cmd:	ipr command struct
4996 *
4997 * Return value:
4998 * 	0 on success / -1 on failure
4999 **/
5000static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
5001			     struct ipr_cmnd *ipr_cmd)
5002{
5003	int i, nseg;
5004	struct scatterlist *sg;
5005	u32 length;
5006	u32 ioadl_flags = 0;
5007	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5008	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5009	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5010
5011	length = scsi_bufflen(scsi_cmd);
5012	if (!length)
5013		return 0;
5014
5015	nseg = scsi_dma_map(scsi_cmd);
5016	if (nseg < 0) {
5017		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5018		return -1;
5019	}
5020
5021	ipr_cmd->dma_use_sg = nseg;
5022
5023	ioarcb->data_transfer_length = cpu_to_be32(length);
5024
5025	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5026		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5027		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5028	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
5029		ioadl_flags = IPR_IOADL_FLAGS_READ;
5030
5031	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5032		ioadl64[i].flags = cpu_to_be32(ioadl_flags);
5033		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
5034		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
5035	}
5036
5037	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5038	return 0;
5039}
5040
5041/**
5042 * ipr_build_ioadl - Build a scatter/gather list and map the buffer
5043 * @ioa_cfg:	ioa config struct
5044 * @ipr_cmd:	ipr command struct
5045 *
5046 * Return value:
5047 * 	0 on success / -1 on failure
5048 **/
5049static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
5050			   struct ipr_cmnd *ipr_cmd)
5051{
5052	int i, nseg;
5053	struct scatterlist *sg;
5054	u32 length;
5055	u32 ioadl_flags = 0;
5056	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5057	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5058	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5059
5060	length = scsi_bufflen(scsi_cmd);
5061	if (!length)
5062		return 0;
5063
5064	nseg = scsi_dma_map(scsi_cmd);
5065	if (nseg < 0) {
5066		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
5067		return -1;
5068	}
5069
5070	ipr_cmd->dma_use_sg = nseg;
5071
5072	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
5073		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5074		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5075		ioarcb->data_transfer_length = cpu_to_be32(length);
5076		ioarcb->ioadl_len =
5077			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5078	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
5079		ioadl_flags = IPR_IOADL_FLAGS_READ;
5080		ioarcb->read_data_transfer_length = cpu_to_be32(length);
5081		ioarcb->read_ioadl_len =
5082			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5083	}
5084
5085	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
5086		ioadl = ioarcb->u.add_data.u.ioadl;
5087		ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
5088				    offsetof(struct ipr_ioarcb, u.add_data));
5089		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5090	}
5091
5092	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
5093		ioadl[i].flags_and_data_len =
5094			cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5095		ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
5096	}
5097
5098	ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5099	return 0;
5100}
5101
5102/**
5103 * ipr_get_task_attributes - Translate SPI Q-Tag to task attributes
5104 * @scsi_cmd:	scsi command struct
5105 *
5106 * Return value:
5107 * 	task attributes
5108 **/
5109static u8 ipr_get_task_attributes(struct scsi_cmnd *scsi_cmd)
5110{
5111	u8 tag[2];
5112	u8 rc = IPR_FLAGS_LO_UNTAGGED_TASK;
5113
5114	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
5115		switch (tag[0]) {
5116		case MSG_SIMPLE_TAG:
5117			rc = IPR_FLAGS_LO_SIMPLE_TASK;
5118			break;
5119		case MSG_HEAD_TAG:
5120			rc = IPR_FLAGS_LO_HEAD_OF_Q_TASK;
5121			break;
5122		case MSG_ORDERED_TAG:
5123			rc = IPR_FLAGS_LO_ORDERED_TASK;
5124			break;
5125		};
5126	}
5127
5128	return rc;
5129}
5130
5131/**
5132 * ipr_erp_done - Process completion of ERP for a device
5133 * @ipr_cmd:		ipr command struct
5134 *
5135 * This function copies the sense buffer into the scsi_cmd
5136 * struct and pushes the scsi_done function.
5137 *
5138 * Return value:
5139 * 	nothing
5140 **/
5141static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
5142{
5143	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5144	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5145	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5146	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5147
5148	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5149		scsi_cmd->result |= (DID_ERROR << 16);
5150		scmd_printk(KERN_ERR, scsi_cmd,
5151			    "Request Sense failed with IOASC: 0x%08X\n", ioasc);
5152	} else {
5153		memcpy(scsi_cmd->sense_buffer, ipr_cmd->sense_buffer,
5154		       SCSI_SENSE_BUFFERSIZE);
5155	}
5156
5157	if (res) {
5158		if (!ipr_is_naca_model(res))
5159			res->needs_sync_complete = 1;
5160		res->in_erp = 0;
5161	}
5162	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5163	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5164	scsi_cmd->scsi_done(scsi_cmd);
5165}
5166
5167/**
5168 * ipr_reinit_ipr_cmnd_for_erp - Re-initialize a cmnd block to be used for ERP
5169 * @ipr_cmd:	ipr command struct
5170 *
5171 * Return value:
5172 * 	none
5173 **/
5174static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
5175{
5176	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5177	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5178	dma_addr_t dma_addr = ipr_cmd->dma_addr;
5179
5180	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
5181	ioarcb->data_transfer_length = 0;
5182	ioarcb->read_data_transfer_length = 0;
5183	ioarcb->ioadl_len = 0;
5184	ioarcb->read_ioadl_len = 0;
5185	ioasa->hdr.ioasc = 0;
5186	ioasa->hdr.residual_data_len = 0;
5187
5188	if (ipr_cmd->ioa_cfg->sis64)
5189		ioarcb->u.sis64_addr_data.data_ioadl_addr =
5190			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
5191	else {
5192		ioarcb->write_ioadl_addr =
5193			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
5194		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
5195	}
5196}
5197
5198/**
5199 * ipr_erp_request_sense - Send request sense to a device
5200 * @ipr_cmd:	ipr command struct
5201 *
5202 * This function sends a request sense to a device as a result
5203 * of a check condition.
5204 *
5205 * Return value:
5206 * 	nothing
5207 **/
5208static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
5209{
5210	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5211	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5212
5213	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
5214		ipr_erp_done(ipr_cmd);
5215		return;
5216	}
5217
5218	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5219
5220	cmd_pkt->request_type = IPR_RQTYPE_SCSICDB;
5221	cmd_pkt->cdb[0] = REQUEST_SENSE;
5222	cmd_pkt->cdb[4] = SCSI_SENSE_BUFFERSIZE;
5223	cmd_pkt->flags_hi |= IPR_FLAGS_HI_SYNC_OVERRIDE;
5224	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5225	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
5226
5227	ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
5228		       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
5229
5230	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
5231		   IPR_REQUEST_SENSE_TIMEOUT * 2);
5232}
5233
5234/**
5235 * ipr_erp_cancel_all - Send cancel all to a device
5236 * @ipr_cmd:	ipr command struct
5237 *
5238 * This function sends a cancel all to a device to clear the
5239 * queue. If we are running TCQ on the device, QERR is set to 1,
5240 * which means all outstanding ops have been dropped on the floor.
5241 * Cancel all will return them to us.
5242 *
5243 * Return value:
5244 * 	nothing
5245 **/
5246static void ipr_erp_cancel_all(struct ipr_cmnd *ipr_cmd)
5247{
5248	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5249	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5250	struct ipr_cmd_pkt *cmd_pkt;
5251
5252	res->in_erp = 1;
5253
5254	ipr_reinit_ipr_cmnd_for_erp(ipr_cmd);
5255
5256	if (!scsi_get_tag_type(scsi_cmd->device)) {
5257		ipr_erp_request_sense(ipr_cmd);
5258		return;
5259	}
5260
5261	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
5262	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
5263	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
5264
5265	ipr_do_req(ipr_cmd, ipr_erp_request_sense, ipr_timeout,
5266		   IPR_CANCEL_ALL_TIMEOUT);
5267}
5268
5269/**
5270 * ipr_dump_ioasa - Dump contents of IOASA
5271 * @ioa_cfg:	ioa config struct
5272 * @ipr_cmd:	ipr command struct
5273 * @res:		resource entry struct
5274 *
5275 * This function is invoked by the interrupt handler when ops
5276 * fail. It will log the IOASA if appropriate. Only called
5277 * for GPDD ops.
5278 *
5279 * Return value:
5280 * 	none
5281 **/
5282static void ipr_dump_ioasa(struct ipr_ioa_cfg *ioa_cfg,
5283			   struct ipr_cmnd *ipr_cmd, struct ipr_resource_entry *res)
5284{
5285	int i;
5286	u16 data_len;
5287	u32 ioasc, fd_ioasc;
5288	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5289	__be32 *ioasa_data = (__be32 *)ioasa;
5290	int error_index;
5291
5292	ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
5293	fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
5294
5295	if (0 == ioasc)
5296		return;
5297
5298	if (ioa_cfg->log_level < IPR_DEFAULT_LOG_LEVEL)
5299		return;
5300
5301	if (ioasc == IPR_IOASC_BUS_WAS_RESET && fd_ioasc)
5302		error_index = ipr_get_error(fd_ioasc);
5303	else
5304		error_index = ipr_get_error(ioasc);
5305
5306	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
5307		/* Don't log an error if the IOA already logged one */
5308		if (ioasa->hdr.ilid != 0)
5309			return;
5310
5311		if (!ipr_is_gscsi(res))
5312			return;
5313
5314		if (ipr_error_table[error_index].log_ioasa == 0)
5315			return;
5316	}
5317
5318	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
5319
5320	data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
5321	if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
5322		data_len = sizeof(struct ipr_ioasa64);
5323	else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
5324		data_len = sizeof(struct ipr_ioasa);
5325
5326	ipr_err("IOASA Dump:\n");
5327
5328	for (i = 0; i < data_len / 4; i += 4) {
5329		ipr_err("%08X: %08X %08X %08X %08X\n", i*4,
5330			be32_to_cpu(ioasa_data[i]),
5331			be32_to_cpu(ioasa_data[i+1]),
5332			be32_to_cpu(ioasa_data[i+2]),
5333			be32_to_cpu(ioasa_data[i+3]));
5334	}
5335}
5336
5337/**
5338 * ipr_gen_sense - Generate SCSI sense data from an IOASA
5339 * @ioasa:		IOASA
5340 * @sense_buf:	sense data buffer
5341 *
5342 * Return value:
5343 * 	none
5344 **/
5345static void ipr_gen_sense(struct ipr_cmnd *ipr_cmd)
5346{
5347	u32 failing_lba;
5348	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
5349	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
5350	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5351	u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
5352
5353	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
5354
5355	if (ioasc >= IPR_FIRST_DRIVER_IOASC)
5356		return;
5357
5358	ipr_cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
5359
5360	if (ipr_is_vset_device(res) &&
5361	    ioasc == IPR_IOASC_MED_DO_NOT_REALLOC &&
5362	    ioasa->u.vset.failing_lba_hi != 0) {
5363		sense_buf[0] = 0x72;
5364		sense_buf[1] = IPR_IOASC_SENSE_KEY(ioasc);
5365		sense_buf[2] = IPR_IOASC_SENSE_CODE(ioasc);
5366		sense_buf[3] = IPR_IOASC_SENSE_QUAL(ioasc);
5367
5368		sense_buf[7] = 12;
5369		sense_buf[8] = 0;
5370		sense_buf[9] = 0x0A;
5371		sense_buf[10] = 0x80;
5372
5373		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_hi);
5374
5375		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
5376		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
5377		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
5378		sense_buf[15] = failing_lba & 0x000000ff;
5379
5380		failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5381
5382		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
5383		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
5384		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
5385		sense_buf[19] = failing_lba & 0x000000ff;
5386	} else {
5387		sense_buf[0] = 0x70;
5388		sense_buf[2] = IPR_IOASC_SENSE_KEY(ioasc);
5389		sense_buf[12] = IPR_IOASC_SENSE_CODE(ioasc);
5390		sense_buf[13] = IPR_IOASC_SENSE_QUAL(ioasc);
5391
5392		/* Illegal request */
5393		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
5394		    (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
5395			sense_buf[7] = 10;	/* additional length */
5396
5397			/* IOARCB was in error */
5398			if (IPR_IOASC_SENSE_CODE(ioasc) == 0x24)
5399				sense_buf[15] = 0xC0;
5400			else	/* Parameter data was invalid */
5401				sense_buf[15] = 0x80;
5402
5403			sense_buf[16] =
5404			    ((IPR_FIELD_POINTER_MASK &
5405			      be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
5406			sense_buf[17] =
5407			    (IPR_FIELD_POINTER_MASK &
5408			     be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
5409		} else {
5410			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
5411				if (ipr_is_vset_device(res))
5412					failing_lba = be32_to_cpu(ioasa->u.vset.failing_lba_lo);
5413				else
5414					failing_lba = be32_to_cpu(ioasa->u.dasd.failing_lba);
5415
5416				sense_buf[0] |= 0x80;	/* Or in the Valid bit */
5417				sense_buf[3] = (failing_lba & 0xff000000) >> 24;
5418				sense_buf[4] = (failing_lba & 0x00ff0000) >> 16;
5419				sense_buf[5] = (failing_lba & 0x0000ff00) >> 8;
5420				sense_buf[6] = failing_lba & 0x000000ff;
5421			}
5422
5423			sense_buf[7] = 6;	/* additional length */
5424		}
5425	}
5426}
5427
5428/**
5429 * ipr_get_autosense - Copy autosense data to sense buffer
5430 * @ipr_cmd:	ipr command struct
5431 *
5432 * This function copies the autosense buffer to the buffer
5433 * in the scsi_cmd, if there is autosense available.
5434 *
5435 * Return value:
5436 *	1 if autosense was available / 0 if not
5437 **/
5438static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
5439{
5440	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
5441	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
5442
5443	if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
5444		return 0;
5445
5446	if (ipr_cmd->ioa_cfg->sis64)
5447		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
5448		       min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
5449			   SCSI_SENSE_BUFFERSIZE));
5450	else
5451		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
5452		       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
5453			   SCSI_SENSE_BUFFERSIZE));
5454	return 1;
5455}
5456
5457/**
5458 * ipr_erp_start - Process an error response for a SCSI op
5459 * @ioa_cfg:	ioa config struct
5460 * @ipr_cmd:	ipr command struct
5461 *
5462 * This function determines whether or not to initiate ERP
5463 * on the affected device.
5464 *
5465 * Return value:
5466 * 	nothing
5467 **/
5468static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
5469			      struct ipr_cmnd *ipr_cmd)
5470{
5471	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5472	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
5473	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5474	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
5475
5476	if (!res) {
5477		ipr_scsi_eh_done(ipr_cmd);
5478		return;
5479	}
5480
5481	if (!ipr_is_gscsi(res) && masked_ioasc != IPR_IOASC_HW_DEV_BUS_STATUS)
5482		ipr_gen_sense(ipr_cmd);
5483
5484	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5485
5486	switch (masked_ioasc) {
5487	case IPR_IOASC_ABORTED_CMD_TERM_BY_HOST:
5488		if (ipr_is_naca_model(res))
5489			scsi_cmd->result |= (DID_ABORT << 16);
5490		else
5491			scsi_cmd->result |= (DID_IMM_RETRY << 16);
5492		break;
5493	case IPR_IOASC_IR_RESOURCE_HANDLE:
5494	case IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA:
5495		scsi_cmd->result |= (DID_NO_CONNECT << 16);
5496		break;
5497	case IPR_IOASC_HW_SEL_TIMEOUT:
5498		scsi_cmd->result |= (DID_NO_CONNECT << 16);
5499		if (!ipr_is_naca_model(res))
5500			res->needs_sync_complete = 1;
5501		break;
5502	case IPR_IOASC_SYNC_REQUIRED:
5503		if (!res->in_erp)
5504			res->needs_sync_complete = 1;
5505		scsi_cmd->result |= (DID_IMM_RETRY << 16);
5506		break;
5507	case IPR_IOASC_MED_DO_NOT_REALLOC: /* prevent retries */
5508	case IPR_IOASA_IR_DUAL_IOA_DISABLED:
5509		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
5510		break;
5511	case IPR_IOASC_BUS_WAS_RESET:
5512	case IPR_IOASC_BUS_WAS_RESET_BY_OTHER:
5513		/*
5514		 * Report the bus reset and ask for a retry. The device
5515		 * will give CC/UA the next command.
5516		 */
5517		if (!res->resetting_device)
5518			scsi_report_bus_reset(ioa_cfg->host, scsi_cmd->device->channel);
5519		scsi_cmd->result |= (DID_ERROR << 16);
5520		if (!ipr_is_naca_model(res))
5521			res->needs_sync_complete = 1;
5522		break;
5523	case IPR_IOASC_HW_DEV_BUS_STATUS:
5524		scsi_cmd->result |= IPR_IOASC_SENSE_STATUS(ioasc);
5525		if (IPR_IOASC_SENSE_STATUS(ioasc) == SAM_STAT_CHECK_CONDITION) {
5526			if (!ipr_get_autosense(ipr_cmd)) {
5527				if (!ipr_is_naca_model(res)) {
5528					ipr_erp_cancel_all(ipr_cmd);
5529					return;
5530				}
5531			}
5532		}
5533		if (!ipr_is_naca_model(res))
5534			res->needs_sync_complete = 1;
5535		break;
5536	case IPR_IOASC_NR_INIT_CMD_REQUIRED:
5537		break;
5538	default:
5539		if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5540			scsi_cmd->result |= (DID_ERROR << 16);
5541		if (!ipr_is_vset_device(res) && !ipr_is_naca_model(res))
5542			res->needs_sync_complete = 1;
5543		break;
5544	}
5545
5546	scsi_dma_unmap(ipr_cmd->scsi_cmd);
5547	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5548	scsi_cmd->scsi_done(scsi_cmd);
5549}
5550
5551/**
5552 * ipr_scsi_done - mid-layer done function
5553 * @ipr_cmd:	ipr command struct
5554 *
5555 * This function is invoked by the interrupt handler for
5556 * ops generated by the SCSI mid-layer
5557 *
5558 * Return value:
5559 * 	none
5560 **/
5561static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
5562{
5563	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5564	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
5565	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5566
5567	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
5568
5569	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
5570		scsi_dma_unmap(ipr_cmd->scsi_cmd);
5571		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5572		scsi_cmd->scsi_done(scsi_cmd);
5573	} else
5574		ipr_erp_start(ioa_cfg, ipr_cmd);
5575}
5576
5577/**
5578 * ipr_queuecommand - Queue a mid-layer request
5579 * @scsi_cmd:	scsi command struct
5580 * @done:		done function
5581 *
5582 * This function queues a request generated by the mid-layer.
5583 *
5584 * Return value:
5585 *	0 on success
5586 *	SCSI_MLQUEUE_DEVICE_BUSY if device is busy
5587 *	SCSI_MLQUEUE_HOST_BUSY if host is busy
5588 **/
5589static int ipr_queuecommand(struct scsi_cmnd *scsi_cmd,
5590			    void (*done) (struct scsi_cmnd *))
5591{
5592	struct ipr_ioa_cfg *ioa_cfg;
5593	struct ipr_resource_entry *res;
5594	struct ipr_ioarcb *ioarcb;
5595	struct ipr_cmnd *ipr_cmd;
5596	int rc = 0;
5597
5598	scsi_cmd->scsi_done = done;
5599	ioa_cfg = (struct ipr_ioa_cfg *)scsi_cmd->device->host->hostdata;
5600	res = scsi_cmd->device->hostdata;
5601	scsi_cmd->result = (DID_OK << 16);
5602
5603	/*
5604	 * We are currently blocking all devices due to a host reset
5605	 * We have told the host to stop giving us new requests, but
5606	 * ERP ops don't count. FIXME
5607	 */
5608	if (unlikely(!ioa_cfg->allow_cmds && !ioa_cfg->ioa_is_dead))
5609		return SCSI_MLQUEUE_HOST_BUSY;
5610
5611	/*
5612	 * FIXME - Create scsi_set_host_offline interface
5613	 *  and the ioa_is_dead check can be removed
5614	 */
5615	if (unlikely(ioa_cfg->ioa_is_dead || !res)) {
5616		memset(scsi_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
5617		scsi_cmd->result = (DID_NO_CONNECT << 16);
5618		scsi_cmd->scsi_done(scsi_cmd);
5619		return 0;
5620	}
5621
5622	if (ipr_is_gata(res) && res->sata_port)
5623		return ata_sas_queuecmd(scsi_cmd, done, res->sata_port->ap);
5624
5625	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5626	ioarcb = &ipr_cmd->ioarcb;
5627	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
5628
5629	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
5630	ipr_cmd->scsi_cmd = scsi_cmd;
5631	ioarcb->res_handle = res->res_handle;
5632	ipr_cmd->done = ipr_scsi_done;
5633	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
5634
5635	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
5636		if (scsi_cmd->underflow == 0)
5637			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
5638
5639		if (res->needs_sync_complete) {
5640			ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_SYNC_COMPLETE;
5641			res->needs_sync_complete = 0;
5642		}
5643
5644		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
5645		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_DELAY_AFTER_RST;
5646		ioarcb->cmd_pkt.flags_lo |= IPR_FLAGS_LO_ALIGNED_BFR;
5647		ioarcb->cmd_pkt.flags_lo |= ipr_get_task_attributes(scsi_cmd);
5648	}
5649
5650	if (scsi_cmd->cmnd[0] >= 0xC0 &&
5651	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
5652		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
5653
5654	if (likely(rc == 0)) {
5655		if (ioa_cfg->sis64)
5656			rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
5657		else
5658			rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
5659	}
5660
5661	if (likely(rc == 0)) {
5662		mb();
5663		ipr_send_command(ipr_cmd);
5664	} else {
5665		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5666		 return SCSI_MLQUEUE_HOST_BUSY;
5667	}
5668
5669	return 0;
5670}
5671
5672/**
5673 * ipr_ioctl - IOCTL handler
5674 * @sdev:	scsi device struct
5675 * @cmd:	IOCTL cmd
5676 * @arg:	IOCTL arg
5677 *
5678 * Return value:
5679 * 	0 on success / other on failure
5680 **/
5681static int ipr_ioctl(struct scsi_device *sdev, int cmd, void __user *arg)
5682{
5683	struct ipr_resource_entry *res;
5684
5685	res = (struct ipr_resource_entry *)sdev->hostdata;
5686	if (res && ipr_is_gata(res)) {
5687		if (cmd == HDIO_GET_IDENTITY)
5688			return -ENOTTY;
5689		return ata_sas_scsi_ioctl(res->sata_port->ap, sdev, cmd, arg);
5690	}
5691
5692	return -EINVAL;
5693}
5694
5695/**
5696 * ipr_info - Get information about the card/driver
5697 * @scsi_host:	scsi host struct
5698 *
5699 * Return value:
5700 * 	pointer to buffer with description string
5701 **/
5702static const char * ipr_ioa_info(struct Scsi_Host *host)
5703{
5704	static char buffer[512];
5705	struct ipr_ioa_cfg *ioa_cfg;
5706	unsigned long lock_flags = 0;
5707
5708	ioa_cfg = (struct ipr_ioa_cfg *) host->hostdata;
5709
5710	spin_lock_irqsave(host->host_lock, lock_flags);
5711	sprintf(buffer, "IBM %X Storage Adapter", ioa_cfg->type);
5712	spin_unlock_irqrestore(host->host_lock, lock_flags);
5713
5714	return buffer;
5715}
5716
5717static struct scsi_host_template driver_template = {
5718	.module = THIS_MODULE,
5719	.name = "IPR",
5720	.info = ipr_ioa_info,
5721	.ioctl = ipr_ioctl,
5722	.queuecommand = ipr_queuecommand,
5723	.eh_abort_handler = ipr_eh_abort,
5724	.eh_device_reset_handler = ipr_eh_dev_reset,
5725	.eh_host_reset_handler = ipr_eh_host_reset,
5726	.slave_alloc = ipr_slave_alloc,
5727	.slave_configure = ipr_slave_configure,
5728	.slave_destroy = ipr_slave_destroy,
5729	.target_alloc = ipr_target_alloc,
5730	.target_destroy = ipr_target_destroy,
5731	.change_queue_depth = ipr_change_queue_depth,
5732	.change_queue_type = ipr_change_queue_type,
5733	.bios_param = ipr_biosparam,
5734	.can_queue = IPR_MAX_COMMANDS,
5735	.this_id = -1,
5736	.sg_tablesize = IPR_MAX_SGLIST,
5737	.max_sectors = IPR_IOA_MAX_SECTORS,
5738	.cmd_per_lun = IPR_MAX_CMD_PER_LUN,
5739	.use_clustering = ENABLE_CLUSTERING,
5740	.shost_attrs = ipr_ioa_attrs,
5741	.sdev_attrs = ipr_dev_attrs,
5742	.proc_name = IPR_NAME
5743};
5744
5745/**
5746 * ipr_ata_phy_reset - libata phy_reset handler
5747 * @ap:		ata port to reset
5748 *
5749 **/
5750static void ipr_ata_phy_reset(struct ata_port *ap)
5751{
5752	unsigned long flags;
5753	struct ipr_sata_port *sata_port = ap->private_data;
5754	struct ipr_resource_entry *res = sata_port->res;
5755	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5756	int rc;
5757
5758	ENTER;
5759	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5760	while(ioa_cfg->in_reset_reload) {
5761		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5762		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5763		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5764	}
5765
5766	if (!ioa_cfg->allow_cmds)
5767		goto out_unlock;
5768
5769	rc = ipr_device_reset(ioa_cfg, res);
5770
5771	if (rc) {
5772		ap->link.device[0].class = ATA_DEV_NONE;
5773		goto out_unlock;
5774	}
5775
5776	ap->link.device[0].class = res->ata_class;
5777	if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
5778		ap->link.device[0].class = ATA_DEV_NONE;
5779
5780out_unlock:
5781	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5782	LEAVE;
5783}
5784
5785/**
5786 * ipr_ata_post_internal - Cleanup after an internal command
5787 * @qc:	ATA queued command
5788 *
5789 * Return value:
5790 * 	none
5791 **/
5792static void ipr_ata_post_internal(struct ata_queued_cmd *qc)
5793{
5794	struct ipr_sata_port *sata_port = qc->ap->private_data;
5795	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5796	struct ipr_cmnd *ipr_cmd;
5797	unsigned long flags;
5798
5799	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5800	while(ioa_cfg->in_reset_reload) {
5801		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5802		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
5803		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
5804	}
5805
5806	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
5807		if (ipr_cmd->qc == qc) {
5808			ipr_device_reset(ioa_cfg, sata_port->res);
5809			break;
5810		}
5811	}
5812	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
5813}
5814
5815/**
5816 * ipr_copy_sata_tf - Copy a SATA taskfile to an IOA data structure
5817 * @regs:	destination
5818 * @tf:	source ATA taskfile
5819 *
5820 * Return value:
5821 * 	none
5822 **/
5823static void ipr_copy_sata_tf(struct ipr_ioarcb_ata_regs *regs,
5824			     struct ata_taskfile *tf)
5825{
5826	regs->feature = tf->feature;
5827	regs->nsect = tf->nsect;
5828	regs->lbal = tf->lbal;
5829	regs->lbam = tf->lbam;
5830	regs->lbah = tf->lbah;
5831	regs->device = tf->device;
5832	regs->command = tf->command;
5833	regs->hob_feature = tf->hob_feature;
5834	regs->hob_nsect = tf->hob_nsect;
5835	regs->hob_lbal = tf->hob_lbal;
5836	regs->hob_lbam = tf->hob_lbam;
5837	regs->hob_lbah = tf->hob_lbah;
5838	regs->ctl = tf->ctl;
5839}
5840
5841/**
5842 * ipr_sata_done - done function for SATA commands
5843 * @ipr_cmd:	ipr command struct
5844 *
5845 * This function is invoked by the interrupt handler for
5846 * ops generated by the SCSI mid-layer to SATA devices
5847 *
5848 * Return value:
5849 * 	none
5850 **/
5851static void ipr_sata_done(struct ipr_cmnd *ipr_cmd)
5852{
5853	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
5854	struct ata_queued_cmd *qc = ipr_cmd->qc;
5855	struct ipr_sata_port *sata_port = qc->ap->private_data;
5856	struct ipr_resource_entry *res = sata_port->res;
5857	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
5858
5859	if (ipr_cmd->ioa_cfg->sis64)
5860		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
5861		       sizeof(struct ipr_ioasa_gata));
5862	else
5863		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
5864		       sizeof(struct ipr_ioasa_gata));
5865	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
5866
5867	if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
5868		scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
5869
5870	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
5871		qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
5872	else
5873		qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
5874	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
5875	ata_qc_complete(qc);
5876}
5877
5878/**
5879 * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
5880 * @ipr_cmd:	ipr command struct
5881 * @qc:		ATA queued command
5882 *
5883 **/
5884static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
5885				  struct ata_queued_cmd *qc)
5886{
5887	u32 ioadl_flags = 0;
5888	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5889	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
5890	struct ipr_ioadl64_desc *last_ioadl64 = NULL;
5891	int len = qc->nbytes;
5892	struct scatterlist *sg;
5893	unsigned int si;
5894	dma_addr_t dma_addr = ipr_cmd->dma_addr;
5895
5896	if (len == 0)
5897		return;
5898
5899	if (qc->dma_dir == DMA_TO_DEVICE) {
5900		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5901		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5902	} else if (qc->dma_dir == DMA_FROM_DEVICE)
5903		ioadl_flags = IPR_IOADL_FLAGS_READ;
5904
5905	ioarcb->data_transfer_length = cpu_to_be32(len);
5906	ioarcb->ioadl_len =
5907		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
5908	ioarcb->u.sis64_addr_data.data_ioadl_addr =
5909		cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
5910
5911	for_each_sg(qc->sg, sg, qc->n_elem, si) {
5912		ioadl64->flags = cpu_to_be32(ioadl_flags);
5913		ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
5914		ioadl64->address = cpu_to_be64(sg_dma_address(sg));
5915
5916		last_ioadl64 = ioadl64;
5917		ioadl64++;
5918	}
5919
5920	if (likely(last_ioadl64))
5921		last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5922}
5923
5924/**
5925 * ipr_build_ata_ioadl - Build an ATA scatter/gather list
5926 * @ipr_cmd:	ipr command struct
5927 * @qc:		ATA queued command
5928 *
5929 **/
5930static void ipr_build_ata_ioadl(struct ipr_cmnd *ipr_cmd,
5931				struct ata_queued_cmd *qc)
5932{
5933	u32 ioadl_flags = 0;
5934	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
5935	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
5936	struct ipr_ioadl_desc *last_ioadl = NULL;
5937	int len = qc->nbytes;
5938	struct scatterlist *sg;
5939	unsigned int si;
5940
5941	if (len == 0)
5942		return;
5943
5944	if (qc->dma_dir == DMA_TO_DEVICE) {
5945		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
5946		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
5947		ioarcb->data_transfer_length = cpu_to_be32(len);
5948		ioarcb->ioadl_len =
5949			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5950	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
5951		ioadl_flags = IPR_IOADL_FLAGS_READ;
5952		ioarcb->read_data_transfer_length = cpu_to_be32(len);
5953		ioarcb->read_ioadl_len =
5954			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
5955	}
5956
5957	for_each_sg(qc->sg, sg, qc->n_elem, si) {
5958		ioadl->flags_and_data_len = cpu_to_be32(ioadl_flags | sg_dma_len(sg));
5959		ioadl->address = cpu_to_be32(sg_dma_address(sg));
5960
5961		last_ioadl = ioadl;
5962		ioadl++;
5963	}
5964
5965	if (likely(last_ioadl))
5966		last_ioadl->flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
5967}
5968
5969/**
5970 * ipr_qc_issue - Issue a SATA qc to a device
5971 * @qc:	queued command
5972 *
5973 * Return value:
5974 * 	0 if success
5975 **/
5976static unsigned int ipr_qc_issue(struct ata_queued_cmd *qc)
5977{
5978	struct ata_port *ap = qc->ap;
5979	struct ipr_sata_port *sata_port = ap->private_data;
5980	struct ipr_resource_entry *res = sata_port->res;
5981	struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg;
5982	struct ipr_cmnd *ipr_cmd;
5983	struct ipr_ioarcb *ioarcb;
5984	struct ipr_ioarcb_ata_regs *regs;
5985
5986	if (unlikely(!ioa_cfg->allow_cmds || ioa_cfg->ioa_is_dead))
5987		return AC_ERR_SYSTEM;
5988
5989	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
5990	ioarcb = &ipr_cmd->ioarcb;
5991
5992	if (ioa_cfg->sis64) {
5993		regs = &ipr_cmd->i.ata_ioadl.regs;
5994		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
5995	} else
5996		regs = &ioarcb->u.add_data.u.regs;
5997
5998	memset(regs, 0, sizeof(*regs));
5999	ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
6000
6001	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
6002	ipr_cmd->qc = qc;
6003	ipr_cmd->done = ipr_sata_done;
6004	ipr_cmd->ioarcb.res_handle = res->res_handle;
6005	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
6006	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
6007	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
6008	ipr_cmd->dma_use_sg = qc->n_elem;
6009
6010	if (ioa_cfg->sis64)
6011		ipr_build_ata_ioadl64(ipr_cmd, qc);
6012	else
6013		ipr_build_ata_ioadl(ipr_cmd, qc);
6014
6015	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
6016	ipr_copy_sata_tf(regs, &qc->tf);
6017	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
6018	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
6019
6020	switch (qc->tf.protocol) {
6021	case ATA_PROT_NODATA:
6022	case ATA_PROT_PIO:
6023		break;
6024
6025	case ATA_PROT_DMA:
6026		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6027		break;
6028
6029	case ATAPI_PROT_PIO:
6030	case ATAPI_PROT_NODATA:
6031		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6032		break;
6033
6034	case ATAPI_PROT_DMA:
6035		regs->flags |= IPR_ATA_FLAG_PACKET_CMD;
6036		regs->flags |= IPR_ATA_FLAG_XFER_TYPE_DMA;
6037		break;
6038
6039	default:
6040		WARN_ON(1);
6041		return AC_ERR_INVALID;
6042	}
6043
6044	mb();
6045
6046	ipr_send_command(ipr_cmd);
6047
6048	return 0;
6049}
6050
6051/**
6052 * ipr_qc_fill_rtf - Read result TF
6053 * @qc: ATA queued command
6054 *
6055 * Return value:
6056 * 	true
6057 **/
6058static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
6059{
6060	struct ipr_sata_port *sata_port = qc->ap->private_data;
6061	struct ipr_ioasa_gata *g = &sata_port->ioasa;
6062	struct ata_taskfile *tf = &qc->result_tf;
6063
6064	tf->feature = g->error;
6065	tf->nsect = g->nsect;
6066	tf->lbal = g->lbal;
6067	tf->lbam = g->lbam;
6068	tf->lbah = g->lbah;
6069	tf->device = g->device;
6070	tf->command = g->status;
6071	tf->hob_nsect = g->hob_nsect;
6072	tf->hob_lbal = g->hob_lbal;
6073	tf->hob_lbam = g->hob_lbam;
6074	tf->hob_lbah = g->hob_lbah;
6075	tf->ctl = g->alt_status;
6076
6077	return true;
6078}
6079
6080static struct ata_port_operations ipr_sata_ops = {
6081	.phy_reset = ipr_ata_phy_reset,
6082	.hardreset = ipr_sata_reset,
6083	.post_internal_cmd = ipr_ata_post_internal,
6084	.qc_prep = ata_noop_qc_prep,
6085	.qc_issue = ipr_qc_issue,
6086	.qc_fill_rtf = ipr_qc_fill_rtf,
6087	.port_start = ata_sas_port_start,
6088	.port_stop = ata_sas_port_stop
6089};
6090
6091static struct ata_port_info sata_port_info = {
6092	.flags	= ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_SATA_RESET |
6093	ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA,
6094	.pio_mask	= 0x10, /* pio4 */
6095	.mwdma_mask = 0x07,
6096	.udma_mask	= 0x7f, /* udma0-6 */
6097	.port_ops	= &ipr_sata_ops
6098};
6099
6100#ifdef CONFIG_PPC_PSERIES
6101static const u16 ipr_blocked_processors[] = {
6102	PV_NORTHSTAR,
6103	PV_PULSAR,
6104	PV_POWER4,
6105	PV_ICESTAR,
6106	PV_SSTAR,
6107	PV_POWER4p,
6108	PV_630,
6109	PV_630p
6110};
6111
6112/**
6113 * ipr_invalid_adapter - Determine if this adapter is supported on this hardware
6114 * @ioa_cfg:	ioa cfg struct
6115 *
6116 * Adapters that use Gemstone revision < 3.1 do not work reliably on
6117 * certain pSeries hardware. This function determines if the given
6118 * adapter is in one of these confgurations or not.
6119 *
6120 * Return value:
6121 * 	1 if adapter is not supported / 0 if adapter is supported
6122 **/
6123static int ipr_invalid_adapter(struct ipr_ioa_cfg *ioa_cfg)
6124{
6125	int i;
6126
6127	if ((ioa_cfg->type == 0x5702) && (ioa_cfg->pdev->revision < 4)) {
6128		for (i = 0; i < ARRAY_SIZE(ipr_blocked_processors); i++){
6129			if (__is_processor(ipr_blocked_processors[i]))
6130				return 1;
6131		}
6132	}
6133	return 0;
6134}
6135#else
6136#define ipr_invalid_adapter(ioa_cfg) 0
6137#endif
6138
6139/**
6140 * ipr_ioa_bringdown_done - IOA bring down completion.
6141 * @ipr_cmd:	ipr command struct
6142 *
6143 * This function processes the completion of an adapter bring down.
6144 * It wakes any reset sleepers.
6145 *
6146 * Return value:
6147 * 	IPR_RC_JOB_RETURN
6148 **/
6149static int ipr_ioa_bringdown_done(struct ipr_cmnd *ipr_cmd)
6150{
6151	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6152
6153	ENTER;
6154	ioa_cfg->in_reset_reload = 0;
6155	ioa_cfg->reset_retries = 0;
6156	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6157	wake_up_all(&ioa_cfg->reset_wait_q);
6158
6159	spin_unlock_irq(ioa_cfg->host->host_lock);
6160	scsi_unblock_requests(ioa_cfg->host);
6161	spin_lock_irq(ioa_cfg->host->host_lock);
6162	LEAVE;
6163
6164	return IPR_RC_JOB_RETURN;
6165}
6166
6167/**
6168 * ipr_ioa_reset_done - IOA reset completion.
6169 * @ipr_cmd:	ipr command struct
6170 *
6171 * This function processes the completion of an adapter reset.
6172 * It schedules any necessary mid-layer add/removes and
6173 * wakes any reset sleepers.
6174 *
6175 * Return value:
6176 * 	IPR_RC_JOB_RETURN
6177 **/
6178static int ipr_ioa_reset_done(struct ipr_cmnd *ipr_cmd)
6179{
6180	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6181	struct ipr_resource_entry *res;
6182	struct ipr_hostrcb *hostrcb, *temp;
6183	int i = 0;
6184
6185	ENTER;
6186	ioa_cfg->in_reset_reload = 0;
6187	ioa_cfg->allow_cmds = 1;
6188	ioa_cfg->reset_cmd = NULL;
6189	ioa_cfg->doorbell |= IPR_RUNTIME_RESET;
6190
6191	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
6192		if (ioa_cfg->allow_ml_add_del && (res->add_to_ml || res->del_from_ml)) {
6193			ipr_trace;
6194			break;
6195		}
6196	}
6197	schedule_work(&ioa_cfg->work_q);
6198
6199	list_for_each_entry_safe(hostrcb, temp, &ioa_cfg->hostrcb_free_q, queue) {
6200		list_del(&hostrcb->queue);
6201		if (i++ < IPR_NUM_LOG_HCAMS)
6202			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_LOG_DATA, hostrcb);
6203		else
6204			ipr_send_hcam(ioa_cfg, IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE, hostrcb);
6205	}
6206
6207	scsi_report_bus_reset(ioa_cfg->host, IPR_VSET_BUS);
6208	dev_info(&ioa_cfg->pdev->dev, "IOA initialized.\n");
6209
6210	ioa_cfg->reset_retries = 0;
6211	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6212	wake_up_all(&ioa_cfg->reset_wait_q);
6213
6214	spin_unlock(ioa_cfg->host->host_lock);
6215	scsi_unblock_requests(ioa_cfg->host);
6216	spin_lock(ioa_cfg->host->host_lock);
6217
6218	if (!ioa_cfg->allow_cmds)
6219		scsi_block_requests(ioa_cfg->host);
6220
6221	LEAVE;
6222	return IPR_RC_JOB_RETURN;
6223}
6224
6225/**
6226 * ipr_set_sup_dev_dflt - Initialize a Set Supported Device buffer
6227 * @supported_dev:	supported device struct
6228 * @vpids:			vendor product id struct
6229 *
6230 * Return value:
6231 * 	none
6232 **/
6233static void ipr_set_sup_dev_dflt(struct ipr_supported_device *supported_dev,
6234				 struct ipr_std_inq_vpids *vpids)
6235{
6236	memset(supported_dev, 0, sizeof(struct ipr_supported_device));
6237	memcpy(&supported_dev->vpids, vpids, sizeof(struct ipr_std_inq_vpids));
6238	supported_dev->num_records = 1;
6239	supported_dev->data_length =
6240		cpu_to_be16(sizeof(struct ipr_supported_device));
6241	supported_dev->reserved = 0;
6242}
6243
6244/**
6245 * ipr_set_supported_devs - Send Set Supported Devices for a device
6246 * @ipr_cmd:	ipr command struct
6247 *
6248 * This function sends a Set Supported Devices to the adapter
6249 *
6250 * Return value:
6251 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6252 **/
6253static int ipr_set_supported_devs(struct ipr_cmnd *ipr_cmd)
6254{
6255	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6256	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
6257	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6258	struct ipr_resource_entry *res = ipr_cmd->u.res;
6259
6260	ipr_cmd->job_step = ipr_ioa_reset_done;
6261
6262	list_for_each_entry_continue(res, &ioa_cfg->used_res_q, queue) {
6263		if (!ipr_is_scsi_disk(res))
6264			continue;
6265
6266		ipr_cmd->u.res = res;
6267		ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
6268
6269		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6270		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6271		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6272
6273		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
6274		ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
6275		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
6276		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
6277
6278		ipr_init_ioadl(ipr_cmd,
6279			       ioa_cfg->vpd_cbs_dma +
6280				 offsetof(struct ipr_misc_cbs, supp_dev),
6281			       sizeof(struct ipr_supported_device),
6282			       IPR_IOADL_FLAGS_WRITE_LAST);
6283
6284		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
6285			   IPR_SET_SUP_DEVICE_TIMEOUT);
6286
6287		if (!ioa_cfg->sis64)
6288			ipr_cmd->job_step = ipr_set_supported_devs;
6289		return IPR_RC_JOB_RETURN;
6290	}
6291
6292	return IPR_RC_JOB_CONTINUE;
6293}
6294
6295/**
6296 * ipr_get_mode_page - Locate specified mode page
6297 * @mode_pages:	mode page buffer
6298 * @page_code:	page code to find
6299 * @len:		minimum required length for mode page
6300 *
6301 * Return value:
6302 * 	pointer to mode page / NULL on failure
6303 **/
6304static void *ipr_get_mode_page(struct ipr_mode_pages *mode_pages,
6305			       u32 page_code, u32 len)
6306{
6307	struct ipr_mode_page_hdr *mode_hdr;
6308	u32 page_length;
6309	u32 length;
6310
6311	if (!mode_pages || (mode_pages->hdr.length == 0))
6312		return NULL;
6313
6314	length = (mode_pages->hdr.length + 1) - 4 - mode_pages->hdr.block_desc_len;
6315	mode_hdr = (struct ipr_mode_page_hdr *)
6316		(mode_pages->data + mode_pages->hdr.block_desc_len);
6317
6318	while (length) {
6319		if (IPR_GET_MODE_PAGE_CODE(mode_hdr) == page_code) {
6320			if (mode_hdr->page_length >= (len - sizeof(struct ipr_mode_page_hdr)))
6321				return mode_hdr;
6322			break;
6323		} else {
6324			page_length = (sizeof(struct ipr_mode_page_hdr) +
6325				       mode_hdr->page_length);
6326			length -= page_length;
6327			mode_hdr = (struct ipr_mode_page_hdr *)
6328				((unsigned long)mode_hdr + page_length);
6329		}
6330	}
6331	return NULL;
6332}
6333
6334/**
6335 * ipr_check_term_power - Check for term power errors
6336 * @ioa_cfg:	ioa config struct
6337 * @mode_pages:	IOAFP mode pages buffer
6338 *
6339 * Check the IOAFP's mode page 28 for term power errors
6340 *
6341 * Return value:
6342 * 	nothing
6343 **/
6344static void ipr_check_term_power(struct ipr_ioa_cfg *ioa_cfg,
6345				 struct ipr_mode_pages *mode_pages)
6346{
6347	int i;
6348	int entry_length;
6349	struct ipr_dev_bus_entry *bus;
6350	struct ipr_mode_page28 *mode_page;
6351
6352	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6353				      sizeof(struct ipr_mode_page28));
6354
6355	entry_length = mode_page->entry_length;
6356
6357	bus = mode_page->bus;
6358
6359	for (i = 0; i < mode_page->num_entries; i++) {
6360		if (bus->flags & IPR_SCSI_ATTR_NO_TERM_PWR) {
6361			dev_err(&ioa_cfg->pdev->dev,
6362				"Term power is absent on scsi bus %d\n",
6363				bus->res_addr.bus);
6364		}
6365
6366		bus = (struct ipr_dev_bus_entry *)((char *)bus + entry_length);
6367	}
6368}
6369
6370/**
6371 * ipr_scsi_bus_speed_limit - Limit the SCSI speed based on SES table
6372 * @ioa_cfg:	ioa config struct
6373 *
6374 * Looks through the config table checking for SES devices. If
6375 * the SES device is in the SES table indicating a maximum SCSI
6376 * bus speed, the speed is limited for the bus.
6377 *
6378 * Return value:
6379 * 	none
6380 **/
6381static void ipr_scsi_bus_speed_limit(struct ipr_ioa_cfg *ioa_cfg)
6382{
6383	u32 max_xfer_rate;
6384	int i;
6385
6386	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
6387		max_xfer_rate = ipr_get_max_scsi_speed(ioa_cfg, i,
6388						       ioa_cfg->bus_attr[i].bus_width);
6389
6390		if (max_xfer_rate < ioa_cfg->bus_attr[i].max_xfer_rate)
6391			ioa_cfg->bus_attr[i].max_xfer_rate = max_xfer_rate;
6392	}
6393}
6394
6395/**
6396 * ipr_modify_ioafp_mode_page_28 - Modify IOAFP Mode Page 28
6397 * @ioa_cfg:	ioa config struct
6398 * @mode_pages:	mode page 28 buffer
6399 *
6400 * Updates mode page 28 based on driver configuration
6401 *
6402 * Return value:
6403 * 	none
6404 **/
6405static void ipr_modify_ioafp_mode_page_28(struct ipr_ioa_cfg *ioa_cfg,
6406					  	struct ipr_mode_pages *mode_pages)
6407{
6408	int i, entry_length;
6409	struct ipr_dev_bus_entry *bus;
6410	struct ipr_bus_attributes *bus_attr;
6411	struct ipr_mode_page28 *mode_page;
6412
6413	mode_page = ipr_get_mode_page(mode_pages, 0x28,
6414				      sizeof(struct ipr_mode_page28));
6415
6416	entry_length = mode_page->entry_length;
6417
6418	/* Loop for each device bus entry */
6419	for (i = 0, bus = mode_page->bus;
6420	     i < mode_page->num_entries;
6421	     i++, bus = (struct ipr_dev_bus_entry *)((u8 *)bus + entry_length)) {
6422		if (bus->res_addr.bus > IPR_MAX_NUM_BUSES) {
6423			dev_err(&ioa_cfg->pdev->dev,
6424				"Invalid resource address reported: 0x%08X\n",
6425				IPR_GET_PHYS_LOC(bus->res_addr));
6426			continue;
6427		}
6428
6429		bus_attr = &ioa_cfg->bus_attr[i];
6430		bus->extended_reset_delay = IPR_EXTENDED_RESET_DELAY;
6431		bus->bus_width = bus_attr->bus_width;
6432		bus->max_xfer_rate = cpu_to_be32(bus_attr->max_xfer_rate);
6433		bus->flags &= ~IPR_SCSI_ATTR_QAS_MASK;
6434		if (bus_attr->qas_enabled)
6435			bus->flags |= IPR_SCSI_ATTR_ENABLE_QAS;
6436		else
6437			bus->flags |= IPR_SCSI_ATTR_DISABLE_QAS;
6438	}
6439}
6440
6441/**
6442 * ipr_build_mode_select - Build a mode select command
6443 * @ipr_cmd:	ipr command struct
6444 * @res_handle:	resource handle to send command to
6445 * @parm:		Byte 2 of Mode Sense command
6446 * @dma_addr:	DMA buffer address
6447 * @xfer_len:	data transfer length
6448 *
6449 * Return value:
6450 * 	none
6451 **/
6452static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
6453				  __be32 res_handle, u8 parm,
6454				  dma_addr_t dma_addr, u8 xfer_len)
6455{
6456	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6457
6458	ioarcb->res_handle = res_handle;
6459	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6460	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
6461	ioarcb->cmd_pkt.cdb[0] = MODE_SELECT;
6462	ioarcb->cmd_pkt.cdb[1] = parm;
6463	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6464
6465	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
6466}
6467
6468/**
6469 * ipr_ioafp_mode_select_page28 - Issue Mode Select Page 28 to IOA
6470 * @ipr_cmd:	ipr command struct
6471 *
6472 * This function sets up the SCSI bus attributes and sends
6473 * a Mode Select for Page 28 to activate them.
6474 *
6475 * Return value:
6476 * 	IPR_RC_JOB_RETURN
6477 **/
6478static int ipr_ioafp_mode_select_page28(struct ipr_cmnd *ipr_cmd)
6479{
6480	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6481	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6482	int length;
6483
6484	ENTER;
6485	ipr_scsi_bus_speed_limit(ioa_cfg);
6486	ipr_check_term_power(ioa_cfg, mode_pages);
6487	ipr_modify_ioafp_mode_page_28(ioa_cfg, mode_pages);
6488	length = mode_pages->hdr.length + 1;
6489	mode_pages->hdr.length = 0;
6490
6491	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6492			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6493			      length);
6494
6495	ipr_cmd->job_step = ipr_set_supported_devs;
6496	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6497				    struct ipr_resource_entry, queue);
6498	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6499
6500	LEAVE;
6501	return IPR_RC_JOB_RETURN;
6502}
6503
6504/**
6505 * ipr_build_mode_sense - Builds a mode sense command
6506 * @ipr_cmd:	ipr command struct
6507 * @res:		resource entry struct
6508 * @parm:		Byte 2 of mode sense command
6509 * @dma_addr:	DMA address of mode sense buffer
6510 * @xfer_len:	Size of DMA buffer
6511 *
6512 * Return value:
6513 * 	none
6514 **/
6515static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
6516				 __be32 res_handle,
6517				 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
6518{
6519	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6520
6521	ioarcb->res_handle = res_handle;
6522	ioarcb->cmd_pkt.cdb[0] = MODE_SENSE;
6523	ioarcb->cmd_pkt.cdb[2] = parm;
6524	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6525	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6526
6527	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6528}
6529
6530/**
6531 * ipr_reset_cmd_failed - Handle failure of IOA reset command
6532 * @ipr_cmd:	ipr command struct
6533 *
6534 * This function handles the failure of an IOA bringup command.
6535 *
6536 * Return value:
6537 * 	IPR_RC_JOB_RETURN
6538 **/
6539static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
6540{
6541	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6542	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6543
6544	dev_err(&ioa_cfg->pdev->dev,
6545		"0x%02X failed with IOASC: 0x%08X\n",
6546		ipr_cmd->ioarcb.cmd_pkt.cdb[0], ioasc);
6547
6548	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
6549	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
6550	return IPR_RC_JOB_RETURN;
6551}
6552
6553/**
6554 * ipr_reset_mode_sense_failed - Handle failure of IOAFP mode sense
6555 * @ipr_cmd:	ipr command struct
6556 *
6557 * This function handles the failure of a Mode Sense to the IOAFP.
6558 * Some adapters do not handle all mode pages.
6559 *
6560 * Return value:
6561 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6562 **/
6563static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
6564{
6565	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6566	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6567
6568	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6569		ipr_cmd->job_step = ipr_set_supported_devs;
6570		ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
6571					    struct ipr_resource_entry, queue);
6572		return IPR_RC_JOB_CONTINUE;
6573	}
6574
6575	return ipr_reset_cmd_failed(ipr_cmd);
6576}
6577
6578/**
6579 * ipr_ioafp_mode_sense_page28 - Issue Mode Sense Page 28 to IOA
6580 * @ipr_cmd:	ipr command struct
6581 *
6582 * This function send a Page 28 mode sense to the IOA to
6583 * retrieve SCSI bus attributes.
6584 *
6585 * Return value:
6586 * 	IPR_RC_JOB_RETURN
6587 **/
6588static int ipr_ioafp_mode_sense_page28(struct ipr_cmnd *ipr_cmd)
6589{
6590	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6591
6592	ENTER;
6593	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6594			     0x28, ioa_cfg->vpd_cbs_dma +
6595			     offsetof(struct ipr_misc_cbs, mode_pages),
6596			     sizeof(struct ipr_mode_pages));
6597
6598	ipr_cmd->job_step = ipr_ioafp_mode_select_page28;
6599	ipr_cmd->job_step_failed = ipr_reset_mode_sense_failed;
6600
6601	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6602
6603	LEAVE;
6604	return IPR_RC_JOB_RETURN;
6605}
6606
6607/**
6608 * ipr_ioafp_mode_select_page24 - Issue Mode Select to IOA
6609 * @ipr_cmd:	ipr command struct
6610 *
6611 * This function enables dual IOA RAID support if possible.
6612 *
6613 * Return value:
6614 * 	IPR_RC_JOB_RETURN
6615 **/
6616static int ipr_ioafp_mode_select_page24(struct ipr_cmnd *ipr_cmd)
6617{
6618	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6619	struct ipr_mode_pages *mode_pages = &ioa_cfg->vpd_cbs->mode_pages;
6620	struct ipr_mode_page24 *mode_page;
6621	int length;
6622
6623	ENTER;
6624	mode_page = ipr_get_mode_page(mode_pages, 0x24,
6625				      sizeof(struct ipr_mode_page24));
6626
6627	if (mode_page)
6628		mode_page->flags |= IPR_ENABLE_DUAL_IOA_AF;
6629
6630	length = mode_pages->hdr.length + 1;
6631	mode_pages->hdr.length = 0;
6632
6633	ipr_build_mode_select(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE), 0x11,
6634			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
6635			      length);
6636
6637	ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6638	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6639
6640	LEAVE;
6641	return IPR_RC_JOB_RETURN;
6642}
6643
6644/**
6645 * ipr_reset_mode_sense_page24_failed - Handle failure of IOAFP mode sense
6646 * @ipr_cmd:	ipr command struct
6647 *
6648 * This function handles the failure of a Mode Sense to the IOAFP.
6649 * Some adapters do not handle all mode pages.
6650 *
6651 * Return value:
6652 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6653 **/
6654static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
6655{
6656	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6657
6658	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
6659		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6660		return IPR_RC_JOB_CONTINUE;
6661	}
6662
6663	return ipr_reset_cmd_failed(ipr_cmd);
6664}
6665
6666/**
6667 * ipr_ioafp_mode_sense_page24 - Issue Page 24 Mode Sense to IOA
6668 * @ipr_cmd:	ipr command struct
6669 *
6670 * This function send a mode sense to the IOA to retrieve
6671 * the IOA Advanced Function Control mode page.
6672 *
6673 * Return value:
6674 * 	IPR_RC_JOB_RETURN
6675 **/
6676static int ipr_ioafp_mode_sense_page24(struct ipr_cmnd *ipr_cmd)
6677{
6678	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6679
6680	ENTER;
6681	ipr_build_mode_sense(ipr_cmd, cpu_to_be32(IPR_IOA_RES_HANDLE),
6682			     0x24, ioa_cfg->vpd_cbs_dma +
6683			     offsetof(struct ipr_misc_cbs, mode_pages),
6684			     sizeof(struct ipr_mode_pages));
6685
6686	ipr_cmd->job_step = ipr_ioafp_mode_select_page24;
6687	ipr_cmd->job_step_failed = ipr_reset_mode_sense_page24_failed;
6688
6689	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6690
6691	LEAVE;
6692	return IPR_RC_JOB_RETURN;
6693}
6694
6695/**
6696 * ipr_init_res_table - Initialize the resource table
6697 * @ipr_cmd:	ipr command struct
6698 *
6699 * This function looks through the existing resource table, comparing
6700 * it with the config table. This function will take care of old/new
6701 * devices and schedule adding/removing them from the mid-layer
6702 * as appropriate.
6703 *
6704 * Return value:
6705 * 	IPR_RC_JOB_CONTINUE
6706 **/
6707static int ipr_init_res_table(struct ipr_cmnd *ipr_cmd)
6708{
6709	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6710	struct ipr_resource_entry *res, *temp;
6711	struct ipr_config_table_entry_wrapper cfgtew;
6712	int entries, found, flag, i;
6713	LIST_HEAD(old_res);
6714
6715	ENTER;
6716	if (ioa_cfg->sis64)
6717		flag = ioa_cfg->u.cfg_table64->hdr64.flags;
6718	else
6719		flag = ioa_cfg->u.cfg_table->hdr.flags;
6720
6721	if (flag & IPR_UCODE_DOWNLOAD_REQ)
6722		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
6723
6724	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
6725		list_move_tail(&res->queue, &old_res);
6726
6727	if (ioa_cfg->sis64)
6728		entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
6729	else
6730		entries = ioa_cfg->u.cfg_table->hdr.num_entries;
6731
6732	for (i = 0; i < entries; i++) {
6733		if (ioa_cfg->sis64)
6734			cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
6735		else
6736			cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
6737		found = 0;
6738
6739		list_for_each_entry_safe(res, temp, &old_res, queue) {
6740			if (ipr_is_same_device(res, &cfgtew)) {
6741				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6742				found = 1;
6743				break;
6744			}
6745		}
6746
6747		if (!found) {
6748			if (list_empty(&ioa_cfg->free_res_q)) {
6749				dev_err(&ioa_cfg->pdev->dev, "Too many devices attached\n");
6750				break;
6751			}
6752
6753			found = 1;
6754			res = list_entry(ioa_cfg->free_res_q.next,
6755					 struct ipr_resource_entry, queue);
6756			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6757			ipr_init_res_entry(res, &cfgtew);
6758			res->add_to_ml = 1;
6759		}
6760
6761		if (found)
6762			ipr_update_res_entry(res, &cfgtew);
6763	}
6764
6765	list_for_each_entry_safe(res, temp, &old_res, queue) {
6766		if (res->sdev) {
6767			res->del_from_ml = 1;
6768			res->res_handle = IPR_INVALID_RES_HANDLE;
6769			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
6770		}
6771	}
6772
6773	list_for_each_entry_safe(res, temp, &old_res, queue) {
6774		ipr_clear_res_target(res);
6775		list_move_tail(&res->queue, &ioa_cfg->free_res_q);
6776	}
6777
6778	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
6779		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
6780	else
6781		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
6782
6783	LEAVE;
6784	return IPR_RC_JOB_CONTINUE;
6785}
6786
6787/**
6788 * ipr_ioafp_query_ioa_cfg - Send a Query IOA Config to the adapter.
6789 * @ipr_cmd:	ipr command struct
6790 *
6791 * This function sends a Query IOA Configuration command
6792 * to the adapter to retrieve the IOA configuration table.
6793 *
6794 * Return value:
6795 * 	IPR_RC_JOB_RETURN
6796 **/
6797static int ipr_ioafp_query_ioa_cfg(struct ipr_cmnd *ipr_cmd)
6798{
6799	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6800	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6801	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
6802	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6803
6804	ENTER;
6805	if (cap->cap & IPR_CAP_DUAL_IOA_RAID)
6806		ioa_cfg->dual_raid = 1;
6807	dev_info(&ioa_cfg->pdev->dev, "Adapter firmware version: %02X%02X%02X%02X\n",
6808		 ucode_vpd->major_release, ucode_vpd->card_type,
6809		 ucode_vpd->minor_release[0], ucode_vpd->minor_release[1]);
6810	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
6811	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6812
6813	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
6814	ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
6815	ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
6816	ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
6817
6818	ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
6819		       IPR_IOADL_FLAGS_READ_LAST);
6820
6821	ipr_cmd->job_step = ipr_init_res_table;
6822
6823	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6824
6825	LEAVE;
6826	return IPR_RC_JOB_RETURN;
6827}
6828
6829/**
6830 * ipr_ioafp_inquiry - Send an Inquiry to the adapter.
6831 * @ipr_cmd:	ipr command struct
6832 *
6833 * This utility function sends an inquiry to the adapter.
6834 *
6835 * Return value:
6836 * 	none
6837 **/
6838static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
6839			      dma_addr_t dma_addr, u8 xfer_len)
6840{
6841	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
6842
6843	ENTER;
6844	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
6845	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
6846
6847	ioarcb->cmd_pkt.cdb[0] = INQUIRY;
6848	ioarcb->cmd_pkt.cdb[1] = flags;
6849	ioarcb->cmd_pkt.cdb[2] = page;
6850	ioarcb->cmd_pkt.cdb[4] = xfer_len;
6851
6852	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
6853
6854	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
6855	LEAVE;
6856}
6857
6858/**
6859 * ipr_inquiry_page_supported - Is the given inquiry page supported
6860 * @page0:		inquiry page 0 buffer
6861 * @page:		page code.
6862 *
6863 * This function determines if the specified inquiry page is supported.
6864 *
6865 * Return value:
6866 *	1 if page is supported / 0 if not
6867 **/
6868static int ipr_inquiry_page_supported(struct ipr_inquiry_page0 *page0, u8 page)
6869{
6870	int i;
6871
6872	for (i = 0; i < min_t(u8, page0->len, IPR_INQUIRY_PAGE0_ENTRIES); i++)
6873		if (page0->page[i] == page)
6874			return 1;
6875
6876	return 0;
6877}
6878
6879/**
6880 * ipr_ioafp_cap_inquiry - Send a Page 0xD0 Inquiry to the adapter.
6881 * @ipr_cmd:	ipr command struct
6882 *
6883 * This function sends a Page 0xD0 inquiry to the adapter
6884 * to retrieve adapter capabilities.
6885 *
6886 * Return value:
6887 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6888 **/
6889static int ipr_ioafp_cap_inquiry(struct ipr_cmnd *ipr_cmd)
6890{
6891	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6892	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
6893	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
6894
6895	ENTER;
6896	ipr_cmd->job_step = ipr_ioafp_query_ioa_cfg;
6897	memset(cap, 0, sizeof(*cap));
6898
6899	if (ipr_inquiry_page_supported(page0, 0xD0)) {
6900		ipr_ioafp_inquiry(ipr_cmd, 1, 0xD0,
6901				  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, cap),
6902				  sizeof(struct ipr_inquiry_cap));
6903		return IPR_RC_JOB_RETURN;
6904	}
6905
6906	LEAVE;
6907	return IPR_RC_JOB_CONTINUE;
6908}
6909
6910/**
6911 * ipr_ioafp_page3_inquiry - Send a Page 3 Inquiry to the adapter.
6912 * @ipr_cmd:	ipr command struct
6913 *
6914 * This function sends a Page 3 inquiry to the adapter
6915 * to retrieve software VPD information.
6916 *
6917 * Return value:
6918 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6919 **/
6920static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
6921{
6922	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6923
6924	ENTER;
6925
6926	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
6927
6928	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
6929			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page3_data),
6930			  sizeof(struct ipr_inquiry_page3));
6931
6932	LEAVE;
6933	return IPR_RC_JOB_RETURN;
6934}
6935
6936/**
6937 * ipr_ioafp_page0_inquiry - Send a Page 0 Inquiry to the adapter.
6938 * @ipr_cmd:	ipr command struct
6939 *
6940 * This function sends a Page 0 inquiry to the adapter
6941 * to retrieve supported inquiry pages.
6942 *
6943 * Return value:
6944 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
6945 **/
6946static int ipr_ioafp_page0_inquiry(struct ipr_cmnd *ipr_cmd)
6947{
6948	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6949	char type[5];
6950
6951	ENTER;
6952
6953	/* Grab the type out of the VPD and store it away */
6954	memcpy(type, ioa_cfg->vpd_cbs->ioa_vpd.std_inq_data.vpids.product_id, 4);
6955	type[4] = '\0';
6956	ioa_cfg->type = simple_strtoul((char *)type, NULL, 16);
6957
6958	ipr_cmd->job_step = ipr_ioafp_page3_inquiry;
6959
6960	ipr_ioafp_inquiry(ipr_cmd, 1, 0,
6961			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, page0_data),
6962			  sizeof(struct ipr_inquiry_page0));
6963
6964	LEAVE;
6965	return IPR_RC_JOB_RETURN;
6966}
6967
6968/**
6969 * ipr_ioafp_std_inquiry - Send a Standard Inquiry to the adapter.
6970 * @ipr_cmd:	ipr command struct
6971 *
6972 * This function sends a standard inquiry to the adapter.
6973 *
6974 * Return value:
6975 * 	IPR_RC_JOB_RETURN
6976 **/
6977static int ipr_ioafp_std_inquiry(struct ipr_cmnd *ipr_cmd)
6978{
6979	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6980
6981	ENTER;
6982	ipr_cmd->job_step = ipr_ioafp_page0_inquiry;
6983
6984	ipr_ioafp_inquiry(ipr_cmd, 0, 0,
6985			  ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, ioa_vpd),
6986			  sizeof(struct ipr_ioa_vpd));
6987
6988	LEAVE;
6989	return IPR_RC_JOB_RETURN;
6990}
6991
6992/**
6993 * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
6994 * @ipr_cmd:	ipr command struct
6995 *
6996 * This function send an Identify Host Request Response Queue
6997 * command to establish the HRRQ with the adapter.
6998 *
6999 * Return value:
7000 * 	IPR_RC_JOB_RETURN
7001 **/
7002static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
7003{
7004	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7005	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
7006
7007	ENTER;
7008	dev_info(&ioa_cfg->pdev->dev, "Starting IOA initialization sequence.\n");
7009
7010	ioarcb->cmd_pkt.cdb[0] = IPR_ID_HOST_RR_Q;
7011	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7012
7013	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7014	if (ioa_cfg->sis64)
7015		ioarcb->cmd_pkt.cdb[1] = 0x1;
7016	ioarcb->cmd_pkt.cdb[2] =
7017		((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
7018	ioarcb->cmd_pkt.cdb[3] =
7019		((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
7020	ioarcb->cmd_pkt.cdb[4] =
7021		((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
7022	ioarcb->cmd_pkt.cdb[5] =
7023		((u64) ioa_cfg->host_rrq_dma) & 0xff;
7024	ioarcb->cmd_pkt.cdb[7] =
7025		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
7026	ioarcb->cmd_pkt.cdb[8] =
7027		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
7028
7029	if (ioa_cfg->sis64) {
7030		ioarcb->cmd_pkt.cdb[10] =
7031			((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
7032		ioarcb->cmd_pkt.cdb[11] =
7033			((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
7034		ioarcb->cmd_pkt.cdb[12] =
7035			((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
7036		ioarcb->cmd_pkt.cdb[13] =
7037			((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
7038	}
7039
7040	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
7041
7042	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
7043
7044	LEAVE;
7045	return IPR_RC_JOB_RETURN;
7046}
7047
7048/**
7049 * ipr_reset_timer_done - Adapter reset timer function
7050 * @ipr_cmd:	ipr command struct
7051 *
7052 * Description: This function is used in adapter reset processing
7053 * for timing events. If the reset_cmd pointer in the IOA
7054 * config struct is not this adapter's we are doing nested
7055 * resets and fail_all_ops will take care of freeing the
7056 * command block.
7057 *
7058 * Return value:
7059 * 	none
7060 **/
7061static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd)
7062{
7063	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7064	unsigned long lock_flags = 0;
7065
7066	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
7067
7068	if (ioa_cfg->reset_cmd == ipr_cmd) {
7069		list_del(&ipr_cmd->queue);
7070		ipr_cmd->done(ipr_cmd);
7071	}
7072
7073	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
7074}
7075
7076/**
7077 * ipr_reset_start_timer - Start a timer for adapter reset job
7078 * @ipr_cmd:	ipr command struct
7079 * @timeout:	timeout value
7080 *
7081 * Description: This function is used in adapter reset processing
7082 * for timing events. If the reset_cmd pointer in the IOA
7083 * config struct is not this adapter's we are doing nested
7084 * resets and fail_all_ops will take care of freeing the
7085 * command block.
7086 *
7087 * Return value:
7088 * 	none
7089 **/
7090static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd,
7091				  unsigned long timeout)
7092{
7093	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7094	ipr_cmd->done = ipr_reset_ioa_job;
7095
7096	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7097	ipr_cmd->timer.expires = jiffies + timeout;
7098	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done;
7099	add_timer(&ipr_cmd->timer);
7100}
7101
7102/**
7103 * ipr_init_ioa_mem - Initialize ioa_cfg control block
7104 * @ioa_cfg:	ioa cfg struct
7105 *
7106 * Return value:
7107 * 	nothing
7108 **/
7109static void ipr_init_ioa_mem(struct ipr_ioa_cfg *ioa_cfg)
7110{
7111	memset(ioa_cfg->host_rrq, 0, sizeof(u32) * IPR_NUM_CMD_BLKS);
7112
7113	/* Initialize Host RRQ pointers */
7114	ioa_cfg->hrrq_start = ioa_cfg->host_rrq;
7115	ioa_cfg->hrrq_end = &ioa_cfg->host_rrq[IPR_NUM_CMD_BLKS - 1];
7116	ioa_cfg->hrrq_curr = ioa_cfg->hrrq_start;
7117	ioa_cfg->toggle_bit = 1;
7118
7119	/* Zero out config table */
7120	memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
7121}
7122
7123/**
7124 * ipr_reset_next_stage - Process IPL stage change based on feedback register.
7125 * @ipr_cmd:	ipr command struct
7126 *
7127 * Return value:
7128 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7129 **/
7130static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
7131{
7132	unsigned long stage, stage_time;
7133	u32 feedback;
7134	volatile u32 int_reg;
7135	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7136	u64 maskval = 0;
7137
7138	feedback = readl(ioa_cfg->regs.init_feedback_reg);
7139	stage = feedback & IPR_IPL_INIT_STAGE_MASK;
7140	stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
7141
7142	ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
7143
7144	/* sanity check the stage_time value */
7145	if (stage_time == 0)
7146		stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
7147	else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
7148		stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
7149	else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
7150		stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
7151
7152	if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
7153		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
7154		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7155		stage_time = ioa_cfg->transop_timeout;
7156		ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7157	} else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
7158		ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7159		maskval = IPR_PCII_IPL_STAGE_CHANGE;
7160		maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
7161		writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
7162		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7163		return IPR_RC_JOB_CONTINUE;
7164	}
7165
7166	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7167	ipr_cmd->timer.expires = jiffies + stage_time * HZ;
7168	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7169	ipr_cmd->done = ipr_reset_ioa_job;
7170	add_timer(&ipr_cmd->timer);
7171	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7172
7173	return IPR_RC_JOB_RETURN;
7174}
7175
7176/**
7177 * ipr_reset_enable_ioa - Enable the IOA following a reset.
7178 * @ipr_cmd:	ipr command struct
7179 *
7180 * This function reinitializes some control blocks and
7181 * enables destructive diagnostics on the adapter.
7182 *
7183 * Return value:
7184 * 	IPR_RC_JOB_RETURN
7185 **/
7186static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
7187{
7188	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7189	volatile u32 int_reg;
7190
7191	ENTER;
7192	ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
7193	ipr_init_ioa_mem(ioa_cfg);
7194
7195	ioa_cfg->allow_interrupts = 1;
7196	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7197
7198	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
7199		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
7200		       ioa_cfg->regs.clr_interrupt_mask_reg32);
7201		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7202		return IPR_RC_JOB_CONTINUE;
7203	}
7204
7205	/* Enable destructive diagnostics on IOA */
7206	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
7207
7208	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
7209	if (ioa_cfg->sis64)
7210		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg);
7211
7212	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
7213
7214	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
7215
7216	if (ioa_cfg->sis64) {
7217		ipr_cmd->job_step = ipr_reset_next_stage;
7218		return IPR_RC_JOB_CONTINUE;
7219	}
7220
7221	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
7222	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
7223	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
7224	ipr_cmd->done = ipr_reset_ioa_job;
7225	add_timer(&ipr_cmd->timer);
7226	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
7227
7228	LEAVE;
7229	return IPR_RC_JOB_RETURN;
7230}
7231
7232/**
7233 * ipr_reset_wait_for_dump - Wait for a dump to timeout.
7234 * @ipr_cmd:	ipr command struct
7235 *
7236 * This function is invoked when an adapter dump has run out
7237 * of processing time.
7238 *
7239 * Return value:
7240 * 	IPR_RC_JOB_CONTINUE
7241 **/
7242static int ipr_reset_wait_for_dump(struct ipr_cmnd *ipr_cmd)
7243{
7244	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7245
7246	if (ioa_cfg->sdt_state == GET_DUMP)
7247		ioa_cfg->sdt_state = ABORT_DUMP;
7248
7249	ipr_cmd->job_step = ipr_reset_alert;
7250
7251	return IPR_RC_JOB_CONTINUE;
7252}
7253
7254/**
7255 * ipr_unit_check_no_data - Log a unit check/no data error log
7256 * @ioa_cfg:		ioa config struct
7257 *
7258 * Logs an error indicating the adapter unit checked, but for some
7259 * reason, we were unable to fetch the unit check buffer.
7260 *
7261 * Return value:
7262 * 	nothing
7263 **/
7264static void ipr_unit_check_no_data(struct ipr_ioa_cfg *ioa_cfg)
7265{
7266	ioa_cfg->errors_logged++;
7267	dev_err(&ioa_cfg->pdev->dev, "IOA unit check with no data\n");
7268}
7269
7270/**
7271 * ipr_get_unit_check_buffer - Get the unit check buffer from the IOA
7272 * @ioa_cfg:		ioa config struct
7273 *
7274 * Fetches the unit check buffer from the adapter by clocking the data
7275 * through the mailbox register.
7276 *
7277 * Return value:
7278 * 	nothing
7279 **/
7280static void ipr_get_unit_check_buffer(struct ipr_ioa_cfg *ioa_cfg)
7281{
7282	unsigned long mailbox;
7283	struct ipr_hostrcb *hostrcb;
7284	struct ipr_uc_sdt sdt;
7285	int rc, length;
7286	u32 ioasc;
7287
7288	mailbox = readl(ioa_cfg->ioa_mailbox);
7289
7290	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
7291		ipr_unit_check_no_data(ioa_cfg);
7292		return;
7293	}
7294
7295	memset(&sdt, 0, sizeof(struct ipr_uc_sdt));
7296	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
7297					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
7298
7299	if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
7300	    ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
7301	    (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
7302		ipr_unit_check_no_data(ioa_cfg);
7303		return;
7304	}
7305
7306	/* Find length of the first sdt entry (UC buffer) */
7307	if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
7308		length = be32_to_cpu(sdt.entry[0].end_token);
7309	else
7310		length = (be32_to_cpu(sdt.entry[0].end_token) -
7311			  be32_to_cpu(sdt.entry[0].start_token)) &
7312			  IPR_FMT2_MBX_ADDR_MASK;
7313
7314	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
7315			     struct ipr_hostrcb, queue);
7316	list_del(&hostrcb->queue);
7317	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
7318
7319	rc = ipr_get_ldump_data_section(ioa_cfg,
7320					be32_to_cpu(sdt.entry[0].start_token),
7321					(__be32 *)&hostrcb->hcam,
7322					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
7323
7324	if (!rc) {
7325		ipr_handle_log_data(ioa_cfg, hostrcb);
7326		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
7327		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
7328		    ioa_cfg->sdt_state == GET_DUMP)
7329			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
7330	} else
7331		ipr_unit_check_no_data(ioa_cfg);
7332
7333	list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
7334}
7335
7336/**
7337 * ipr_reset_restore_cfg_space - Restore PCI config space.
7338 * @ipr_cmd:	ipr command struct
7339 *
7340 * Description: This function restores the saved PCI config space of
7341 * the adapter, fails all outstanding ops back to the callers, and
7342 * fetches the dump/unit check if applicable to this reset.
7343 *
7344 * Return value:
7345 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7346 **/
7347static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
7348{
7349	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7350	int rc;
7351
7352	ENTER;
7353	ioa_cfg->pdev->state_saved = true;
7354	rc = pci_restore_state(ioa_cfg->pdev);
7355
7356	if (rc != PCIBIOS_SUCCESSFUL) {
7357		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7358		return IPR_RC_JOB_CONTINUE;
7359	}
7360
7361	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
7362		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7363		return IPR_RC_JOB_CONTINUE;
7364	}
7365
7366	ipr_fail_all_ops(ioa_cfg);
7367
7368	if (ioa_cfg->ioa_unit_checked) {
7369		ioa_cfg->ioa_unit_checked = 0;
7370		ipr_get_unit_check_buffer(ioa_cfg);
7371		ipr_cmd->job_step = ipr_reset_alert;
7372		ipr_reset_start_timer(ipr_cmd, 0);
7373		return IPR_RC_JOB_RETURN;
7374	}
7375
7376	if (ioa_cfg->in_ioa_bringdown) {
7377		ipr_cmd->job_step = ipr_ioa_bringdown_done;
7378	} else {
7379		ipr_cmd->job_step = ipr_reset_enable_ioa;
7380
7381		if (GET_DUMP == ioa_cfg->sdt_state) {
7382			ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
7383			ipr_cmd->job_step = ipr_reset_wait_for_dump;
7384			schedule_work(&ioa_cfg->work_q);
7385			return IPR_RC_JOB_RETURN;
7386		}
7387	}
7388
7389	LEAVE;
7390	return IPR_RC_JOB_CONTINUE;
7391}
7392
7393/**
7394 * ipr_reset_bist_done - BIST has completed on the adapter.
7395 * @ipr_cmd:	ipr command struct
7396 *
7397 * Description: Unblock config space and resume the reset process.
7398 *
7399 * Return value:
7400 * 	IPR_RC_JOB_CONTINUE
7401 **/
7402static int ipr_reset_bist_done(struct ipr_cmnd *ipr_cmd)
7403{
7404	ENTER;
7405	pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7406	ipr_cmd->job_step = ipr_reset_restore_cfg_space;
7407	LEAVE;
7408	return IPR_RC_JOB_CONTINUE;
7409}
7410
7411/**
7412 * ipr_reset_start_bist - Run BIST on the adapter.
7413 * @ipr_cmd:	ipr command struct
7414 *
7415 * Description: This function runs BIST on the adapter, then delays 2 seconds.
7416 *
7417 * Return value:
7418 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7419 **/
7420static int ipr_reset_start_bist(struct ipr_cmnd *ipr_cmd)
7421{
7422	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7423	int rc;
7424
7425	ENTER;
7426	pci_block_user_cfg_access(ioa_cfg->pdev);
7427	rc = pci_write_config_byte(ioa_cfg->pdev, PCI_BIST, PCI_BIST_START);
7428
7429	if (rc != PCIBIOS_SUCCESSFUL) {
7430		pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
7431		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
7432		rc = IPR_RC_JOB_CONTINUE;
7433	} else {
7434		ipr_cmd->job_step = ipr_reset_bist_done;
7435		ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7436		rc = IPR_RC_JOB_RETURN;
7437	}
7438
7439	LEAVE;
7440	return rc;
7441}
7442
7443/**
7444 * ipr_reset_slot_reset_done - Clear PCI reset to the adapter
7445 * @ipr_cmd:	ipr command struct
7446 *
7447 * Description: This clears PCI reset to the adapter and delays two seconds.
7448 *
7449 * Return value:
7450 * 	IPR_RC_JOB_RETURN
7451 **/
7452static int ipr_reset_slot_reset_done(struct ipr_cmnd *ipr_cmd)
7453{
7454	ENTER;
7455	pci_set_pcie_reset_state(ipr_cmd->ioa_cfg->pdev, pcie_deassert_reset);
7456	ipr_cmd->job_step = ipr_reset_bist_done;
7457	ipr_reset_start_timer(ipr_cmd, IPR_WAIT_FOR_BIST_TIMEOUT);
7458	LEAVE;
7459	return IPR_RC_JOB_RETURN;
7460}
7461
7462/**
7463 * ipr_reset_slot_reset - Reset the PCI slot of the adapter.
7464 * @ipr_cmd:	ipr command struct
7465 *
7466 * Description: This asserts PCI reset to the adapter.
7467 *
7468 * Return value:
7469 * 	IPR_RC_JOB_RETURN
7470 **/
7471static int ipr_reset_slot_reset(struct ipr_cmnd *ipr_cmd)
7472{
7473	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7474	struct pci_dev *pdev = ioa_cfg->pdev;
7475
7476	ENTER;
7477	pci_block_user_cfg_access(pdev);
7478	pci_set_pcie_reset_state(pdev, pcie_warm_reset);
7479	ipr_cmd->job_step = ipr_reset_slot_reset_done;
7480	ipr_reset_start_timer(ipr_cmd, IPR_PCI_RESET_TIMEOUT);
7481	LEAVE;
7482	return IPR_RC_JOB_RETURN;
7483}
7484
7485/**
7486 * ipr_reset_allowed - Query whether or not IOA can be reset
7487 * @ioa_cfg:	ioa config struct
7488 *
7489 * Return value:
7490 * 	0 if reset not allowed / non-zero if reset is allowed
7491 **/
7492static int ipr_reset_allowed(struct ipr_ioa_cfg *ioa_cfg)
7493{
7494	volatile u32 temp_reg;
7495
7496	temp_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
7497	return ((temp_reg & IPR_PCII_CRITICAL_OPERATION) == 0);
7498}
7499
7500/**
7501 * ipr_reset_wait_to_start_bist - Wait for permission to reset IOA.
7502 * @ipr_cmd:	ipr command struct
7503 *
7504 * Description: This function waits for adapter permission to run BIST,
7505 * then runs BIST. If the adapter does not give permission after a
7506 * reasonable time, we will reset the adapter anyway. The impact of
7507 * resetting the adapter without warning the adapter is the risk of
7508 * losing the persistent error log on the adapter. If the adapter is
7509 * reset while it is writing to the flash on the adapter, the flash
7510 * segment will have bad ECC and be zeroed.
7511 *
7512 * Return value:
7513 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7514 **/
7515static int ipr_reset_wait_to_start_bist(struct ipr_cmnd *ipr_cmd)
7516{
7517	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7518	int rc = IPR_RC_JOB_RETURN;
7519
7520	if (!ipr_reset_allowed(ioa_cfg) && ipr_cmd->u.time_left) {
7521		ipr_cmd->u.time_left -= IPR_CHECK_FOR_RESET_TIMEOUT;
7522		ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7523	} else {
7524		ipr_cmd->job_step = ioa_cfg->reset;
7525		rc = IPR_RC_JOB_CONTINUE;
7526	}
7527
7528	return rc;
7529}
7530
7531/**
7532 * ipr_reset_alert_part2 - Alert the adapter of a pending reset
7533 * @ipr_cmd:	ipr command struct
7534 *
7535 * Description: This function alerts the adapter that it will be reset.
7536 * If memory space is not currently enabled, proceed directly
7537 * to running BIST on the adapter. The timer must always be started
7538 * so we guarantee we do not run BIST from ipr_isr.
7539 *
7540 * Return value:
7541 * 	IPR_RC_JOB_RETURN
7542 **/
7543static int ipr_reset_alert(struct ipr_cmnd *ipr_cmd)
7544{
7545	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7546	u16 cmd_reg;
7547	int rc;
7548
7549	ENTER;
7550	rc = pci_read_config_word(ioa_cfg->pdev, PCI_COMMAND, &cmd_reg);
7551
7552	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
7553		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
7554		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
7555		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
7556	} else {
7557		ipr_cmd->job_step = ioa_cfg->reset;
7558	}
7559
7560	ipr_cmd->u.time_left = IPR_WAIT_FOR_RESET_TIMEOUT;
7561	ipr_reset_start_timer(ipr_cmd, IPR_CHECK_FOR_RESET_TIMEOUT);
7562
7563	LEAVE;
7564	return IPR_RC_JOB_RETURN;
7565}
7566
7567/**
7568 * ipr_reset_ucode_download_done - Microcode download completion
7569 * @ipr_cmd:	ipr command struct
7570 *
7571 * Description: This function unmaps the microcode download buffer.
7572 *
7573 * Return value:
7574 * 	IPR_RC_JOB_CONTINUE
7575 **/
7576static int ipr_reset_ucode_download_done(struct ipr_cmnd *ipr_cmd)
7577{
7578	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7579	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7580
7581	pci_unmap_sg(ioa_cfg->pdev, sglist->scatterlist,
7582		     sglist->num_sg, DMA_TO_DEVICE);
7583
7584	ipr_cmd->job_step = ipr_reset_alert;
7585	return IPR_RC_JOB_CONTINUE;
7586}
7587
7588/**
7589 * ipr_reset_ucode_download - Download microcode to the adapter
7590 * @ipr_cmd:	ipr command struct
7591 *
7592 * Description: This function checks to see if it there is microcode
7593 * to download to the adapter. If there is, a download is performed.
7594 *
7595 * Return value:
7596 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7597 **/
7598static int ipr_reset_ucode_download(struct ipr_cmnd *ipr_cmd)
7599{
7600	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7601	struct ipr_sglist *sglist = ioa_cfg->ucode_sglist;
7602
7603	ENTER;
7604	ipr_cmd->job_step = ipr_reset_alert;
7605
7606	if (!sglist)
7607		return IPR_RC_JOB_CONTINUE;
7608
7609	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7610	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
7611	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = WRITE_BUFFER;
7612	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_WR_BUF_DOWNLOAD_AND_SAVE;
7613	ipr_cmd->ioarcb.cmd_pkt.cdb[6] = (sglist->buffer_len & 0xff0000) >> 16;
7614	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
7615	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
7616
7617	if (ioa_cfg->sis64)
7618		ipr_build_ucode_ioadl64(ipr_cmd, sglist);
7619	else
7620		ipr_build_ucode_ioadl(ipr_cmd, sglist);
7621	ipr_cmd->job_step = ipr_reset_ucode_download_done;
7622
7623	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
7624		   IPR_WRITE_BUFFER_TIMEOUT);
7625
7626	LEAVE;
7627	return IPR_RC_JOB_RETURN;
7628}
7629
7630/**
7631 * ipr_reset_shutdown_ioa - Shutdown the adapter
7632 * @ipr_cmd:	ipr command struct
7633 *
7634 * Description: This function issues an adapter shutdown of the
7635 * specified type to the specified adapter as part of the
7636 * adapter reset job.
7637 *
7638 * Return value:
7639 * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
7640 **/
7641static int ipr_reset_shutdown_ioa(struct ipr_cmnd *ipr_cmd)
7642{
7643	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7644	enum ipr_shutdown_type shutdown_type = ipr_cmd->u.shutdown_type;
7645	unsigned long timeout;
7646	int rc = IPR_RC_JOB_CONTINUE;
7647
7648	ENTER;
7649	if (shutdown_type != IPR_SHUTDOWN_NONE && !ioa_cfg->ioa_is_dead) {
7650		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
7651		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
7652		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
7653		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = shutdown_type;
7654
7655		if (shutdown_type == IPR_SHUTDOWN_NORMAL)
7656			timeout = IPR_SHUTDOWN_TIMEOUT;
7657		else if (shutdown_type == IPR_SHUTDOWN_PREPARE_FOR_NORMAL)
7658			timeout = IPR_INTERNAL_TIMEOUT;
7659		else if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
7660			timeout = IPR_DUAL_IOA_ABBR_SHUTDOWN_TO;
7661		else
7662			timeout = IPR_ABBREV_SHUTDOWN_TIMEOUT;
7663
7664		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, timeout);
7665
7666		rc = IPR_RC_JOB_RETURN;
7667		ipr_cmd->job_step = ipr_reset_ucode_download;
7668	} else
7669		ipr_cmd->job_step = ipr_reset_alert;
7670
7671	LEAVE;
7672	return rc;
7673}
7674
7675/**
7676 * ipr_reset_ioa_job - Adapter reset job
7677 * @ipr_cmd:	ipr command struct
7678 *
7679 * Description: This function is the job router for the adapter reset job.
7680 *
7681 * Return value:
7682 * 	none
7683 **/
7684static void ipr_reset_ioa_job(struct ipr_cmnd *ipr_cmd)
7685{
7686	u32 rc, ioasc;
7687	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
7688
7689	do {
7690		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
7691
7692		if (ioa_cfg->reset_cmd != ipr_cmd) {
7693			/*
7694			 * We are doing nested adapter resets and this is
7695			 * not the current reset job.
7696			 */
7697			list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
7698			return;
7699		}
7700
7701		if (IPR_IOASC_SENSE_KEY(ioasc)) {
7702			rc = ipr_cmd->job_step_failed(ipr_cmd);
7703			if (rc == IPR_RC_JOB_RETURN)
7704				return;
7705		}
7706
7707		ipr_reinit_ipr_cmnd(ipr_cmd);
7708		ipr_cmd->job_step_failed = ipr_reset_cmd_failed;
7709		rc = ipr_cmd->job_step(ipr_cmd);
7710	} while(rc == IPR_RC_JOB_CONTINUE);
7711}
7712
7713/**
7714 * _ipr_initiate_ioa_reset - Initiate an adapter reset
7715 * @ioa_cfg:		ioa config struct
7716 * @job_step:		first job step of reset job
7717 * @shutdown_type:	shutdown type
7718 *
7719 * Description: This function will initiate the reset of the given adapter
7720 * starting at the selected job step.
7721 * If the caller needs to wait on the completion of the reset,
7722 * the caller must sleep on the reset_wait_q.
7723 *
7724 * Return value:
7725 * 	none
7726 **/
7727static void _ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7728				    int (*job_step) (struct ipr_cmnd *),
7729				    enum ipr_shutdown_type shutdown_type)
7730{
7731	struct ipr_cmnd *ipr_cmd;
7732
7733	ioa_cfg->in_reset_reload = 1;
7734	ioa_cfg->allow_cmds = 0;
7735	scsi_block_requests(ioa_cfg->host);
7736
7737	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
7738	ioa_cfg->reset_cmd = ipr_cmd;
7739	ipr_cmd->job_step = job_step;
7740	ipr_cmd->u.shutdown_type = shutdown_type;
7741
7742	ipr_reset_ioa_job(ipr_cmd);
7743}
7744
7745/**
7746 * ipr_initiate_ioa_reset - Initiate an adapter reset
7747 * @ioa_cfg:		ioa config struct
7748 * @shutdown_type:	shutdown type
7749 *
7750 * Description: This function will initiate the reset of the given adapter.
7751 * If the caller needs to wait on the completion of the reset,
7752 * the caller must sleep on the reset_wait_q.
7753 *
7754 * Return value:
7755 * 	none
7756 **/
7757static void ipr_initiate_ioa_reset(struct ipr_ioa_cfg *ioa_cfg,
7758				   enum ipr_shutdown_type shutdown_type)
7759{
7760	if (ioa_cfg->ioa_is_dead)
7761		return;
7762
7763	if (ioa_cfg->in_reset_reload && ioa_cfg->sdt_state == GET_DUMP)
7764		ioa_cfg->sdt_state = ABORT_DUMP;
7765
7766	if (ioa_cfg->reset_retries++ >= IPR_NUM_RESET_RELOAD_RETRIES) {
7767		dev_err(&ioa_cfg->pdev->dev,
7768			"IOA taken offline - error recovery failed\n");
7769
7770		ioa_cfg->reset_retries = 0;
7771		ioa_cfg->ioa_is_dead = 1;
7772
7773		if (ioa_cfg->in_ioa_bringdown) {
7774			ioa_cfg->reset_cmd = NULL;
7775			ioa_cfg->in_reset_reload = 0;
7776			ipr_fail_all_ops(ioa_cfg);
7777			wake_up_all(&ioa_cfg->reset_wait_q);
7778
7779			spin_unlock_irq(ioa_cfg->host->host_lock);
7780			scsi_unblock_requests(ioa_cfg->host);
7781			spin_lock_irq(ioa_cfg->host->host_lock);
7782			return;
7783		} else {
7784			ioa_cfg->in_ioa_bringdown = 1;
7785			shutdown_type = IPR_SHUTDOWN_NONE;
7786		}
7787	}
7788
7789	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_shutdown_ioa,
7790				shutdown_type);
7791}
7792
7793/**
7794 * ipr_reset_freeze - Hold off all I/O activity
7795 * @ipr_cmd:	ipr command struct
7796 *
7797 * Description: If the PCI slot is frozen, hold off all I/O
7798 * activity; then, as soon as the slot is available again,
7799 * initiate an adapter reset.
7800 */
7801static int ipr_reset_freeze(struct ipr_cmnd *ipr_cmd)
7802{
7803	/* Disallow new interrupts, avoid loop */
7804	ipr_cmd->ioa_cfg->allow_interrupts = 0;
7805	list_add_tail(&ipr_cmd->queue, &ipr_cmd->ioa_cfg->pending_q);
7806	ipr_cmd->done = ipr_reset_ioa_job;
7807	return IPR_RC_JOB_RETURN;
7808}
7809
7810/**
7811 * ipr_pci_frozen - Called when slot has experienced a PCI bus error.
7812 * @pdev:	PCI device struct
7813 *
7814 * Description: This routine is called to tell us that the PCI bus
7815 * is down. Can't do anything here, except put the device driver
7816 * into a holding pattern, waiting for the PCI bus to come back.
7817 */
7818static void ipr_pci_frozen(struct pci_dev *pdev)
7819{
7820	unsigned long flags = 0;
7821	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7822
7823	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7824	_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_freeze, IPR_SHUTDOWN_NONE);
7825	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7826}
7827
7828/**
7829 * ipr_pci_slot_reset - Called when PCI slot has been reset.
7830 * @pdev:	PCI device struct
7831 *
7832 * Description: This routine is called by the pci error recovery
7833 * code after the PCI slot has been reset, just before we
7834 * should resume normal operations.
7835 */
7836static pci_ers_result_t ipr_pci_slot_reset(struct pci_dev *pdev)
7837{
7838	unsigned long flags = 0;
7839	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7840
7841	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7842	if (ioa_cfg->needs_warm_reset)
7843		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7844	else
7845		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_restore_cfg_space,
7846					IPR_SHUTDOWN_NONE);
7847	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7848	return PCI_ERS_RESULT_RECOVERED;
7849}
7850
7851/**
7852 * ipr_pci_perm_failure - Called when PCI slot is dead for good.
7853 * @pdev:	PCI device struct
7854 *
7855 * Description: This routine is called when the PCI bus has
7856 * permanently failed.
7857 */
7858static void ipr_pci_perm_failure(struct pci_dev *pdev)
7859{
7860	unsigned long flags = 0;
7861	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
7862
7863	spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
7864	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
7865		ioa_cfg->sdt_state = ABORT_DUMP;
7866	ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
7867	ioa_cfg->in_ioa_bringdown = 1;
7868	ioa_cfg->allow_cmds = 0;
7869	ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7870	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
7871}
7872
7873/**
7874 * ipr_pci_error_detected - Called when a PCI error is detected.
7875 * @pdev:	PCI device struct
7876 * @state:	PCI channel state
7877 *
7878 * Description: Called when a PCI error is detected.
7879 *
7880 * Return value:
7881 * 	PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT
7882 */
7883static pci_ers_result_t ipr_pci_error_detected(struct pci_dev *pdev,
7884					       pci_channel_state_t state)
7885{
7886	switch (state) {
7887	case pci_channel_io_frozen:
7888		ipr_pci_frozen(pdev);
7889		return PCI_ERS_RESULT_NEED_RESET;
7890	case pci_channel_io_perm_failure:
7891		ipr_pci_perm_failure(pdev);
7892		return PCI_ERS_RESULT_DISCONNECT;
7893		break;
7894	default:
7895		break;
7896	}
7897	return PCI_ERS_RESULT_NEED_RESET;
7898}
7899
7900/**
7901 * ipr_probe_ioa_part2 - Initializes IOAs found in ipr_probe_ioa(..)
7902 * @ioa_cfg:	ioa cfg struct
7903 *
7904 * Description: This is the second phase of adapter intialization
7905 * This function takes care of initilizing the adapter to the point
7906 * where it can accept new commands.
7907
7908 * Return value:
7909 * 	0 on success / -EIO on failure
7910 **/
7911static int __devinit ipr_probe_ioa_part2(struct ipr_ioa_cfg *ioa_cfg)
7912{
7913	int rc = 0;
7914	unsigned long host_lock_flags = 0;
7915
7916	ENTER;
7917	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7918	dev_dbg(&ioa_cfg->pdev->dev, "ioa_cfg adx: 0x%p\n", ioa_cfg);
7919	if (ioa_cfg->needs_hard_reset) {
7920		ioa_cfg->needs_hard_reset = 0;
7921		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
7922	} else
7923		_ipr_initiate_ioa_reset(ioa_cfg, ipr_reset_enable_ioa,
7924					IPR_SHUTDOWN_NONE);
7925
7926	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7927	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
7928	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
7929
7930	if (ioa_cfg->ioa_is_dead) {
7931		rc = -EIO;
7932	} else if (ipr_invalid_adapter(ioa_cfg)) {
7933		if (!ipr_testmode)
7934			rc = -EIO;
7935
7936		dev_err(&ioa_cfg->pdev->dev,
7937			"Adapter not supported in this hardware configuration.\n");
7938	}
7939
7940	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
7941
7942	LEAVE;
7943	return rc;
7944}
7945
7946/**
7947 * ipr_free_cmd_blks - Frees command blocks allocated for an adapter
7948 * @ioa_cfg:	ioa config struct
7949 *
7950 * Return value:
7951 * 	none
7952 **/
7953static void ipr_free_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
7954{
7955	int i;
7956
7957	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
7958		if (ioa_cfg->ipr_cmnd_list[i])
7959			pci_pool_free(ioa_cfg->ipr_cmd_pool,
7960				      ioa_cfg->ipr_cmnd_list[i],
7961				      ioa_cfg->ipr_cmnd_list_dma[i]);
7962
7963		ioa_cfg->ipr_cmnd_list[i] = NULL;
7964	}
7965
7966	if (ioa_cfg->ipr_cmd_pool)
7967		pci_pool_destroy (ioa_cfg->ipr_cmd_pool);
7968
7969	ioa_cfg->ipr_cmd_pool = NULL;
7970}
7971
7972/**
7973 * ipr_free_mem - Frees memory allocated for an adapter
7974 * @ioa_cfg:	ioa cfg struct
7975 *
7976 * Return value:
7977 * 	nothing
7978 **/
7979static void ipr_free_mem(struct ipr_ioa_cfg *ioa_cfg)
7980{
7981	int i;
7982
7983	kfree(ioa_cfg->res_entries);
7984	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_misc_cbs),
7985			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
7986	ipr_free_cmd_blks(ioa_cfg);
7987	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
7988			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
7989	pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
7990			    ioa_cfg->u.cfg_table,
7991			    ioa_cfg->cfg_table_dma);
7992
7993	for (i = 0; i < IPR_NUM_HCAMS; i++) {
7994		pci_free_consistent(ioa_cfg->pdev,
7995				    sizeof(struct ipr_hostrcb),
7996				    ioa_cfg->hostrcb[i],
7997				    ioa_cfg->hostrcb_dma[i]);
7998	}
7999
8000	ipr_free_dump(ioa_cfg);
8001	kfree(ioa_cfg->trace);
8002}
8003
8004/**
8005 * ipr_free_all_resources - Free all allocated resources for an adapter.
8006 * @ipr_cmd:	ipr command struct
8007 *
8008 * This function frees all allocated resources for the
8009 * specified adapter.
8010 *
8011 * Return value:
8012 * 	none
8013 **/
8014static void ipr_free_all_resources(struct ipr_ioa_cfg *ioa_cfg)
8015{
8016	struct pci_dev *pdev = ioa_cfg->pdev;
8017
8018	ENTER;
8019	free_irq(pdev->irq, ioa_cfg);
8020	pci_disable_msi(pdev);
8021	iounmap(ioa_cfg->hdw_dma_regs);
8022	pci_release_regions(pdev);
8023	ipr_free_mem(ioa_cfg);
8024	scsi_host_put(ioa_cfg->host);
8025	pci_disable_device(pdev);
8026	LEAVE;
8027}
8028
8029/**
8030 * ipr_alloc_cmd_blks - Allocate command blocks for an adapter
8031 * @ioa_cfg:	ioa config struct
8032 *
8033 * Return value:
8034 * 	0 on success / -ENOMEM on allocation failure
8035 **/
8036static int __devinit ipr_alloc_cmd_blks(struct ipr_ioa_cfg *ioa_cfg)
8037{
8038	struct ipr_cmnd *ipr_cmd;
8039	struct ipr_ioarcb *ioarcb;
8040	dma_addr_t dma_addr;
8041	int i;
8042
8043	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
8044						 sizeof(struct ipr_cmnd), 16, 0);
8045
8046	if (!ioa_cfg->ipr_cmd_pool)
8047		return -ENOMEM;
8048
8049	for (i = 0; i < IPR_NUM_CMD_BLKS; i++) {
8050		ipr_cmd = pci_pool_alloc (ioa_cfg->ipr_cmd_pool, GFP_KERNEL, &dma_addr);
8051
8052		if (!ipr_cmd) {
8053			ipr_free_cmd_blks(ioa_cfg);
8054			return -ENOMEM;
8055		}
8056
8057		memset(ipr_cmd, 0, sizeof(*ipr_cmd));
8058		ioa_cfg->ipr_cmnd_list[i] = ipr_cmd;
8059		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
8060
8061		ioarcb = &ipr_cmd->ioarcb;
8062		ipr_cmd->dma_addr = dma_addr;
8063		if (ioa_cfg->sis64)
8064			ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
8065		else
8066			ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
8067
8068		ioarcb->host_response_handle = cpu_to_be32(i << 2);
8069		if (ioa_cfg->sis64) {
8070			ioarcb->u.sis64_addr_data.data_ioadl_addr =
8071				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
8072			ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
8073				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
8074		} else {
8075			ioarcb->write_ioadl_addr =
8076				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
8077			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
8078			ioarcb->ioasa_host_pci_addr =
8079				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
8080		}
8081		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
8082		ipr_cmd->cmd_index = i;
8083		ipr_cmd->ioa_cfg = ioa_cfg;
8084		ipr_cmd->sense_buffer_dma = dma_addr +
8085			offsetof(struct ipr_cmnd, sense_buffer);
8086
8087		list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8088	}
8089
8090	return 0;
8091}
8092
8093/**
8094 * ipr_alloc_mem - Allocate memory for an adapter
8095 * @ioa_cfg:	ioa config struct
8096 *
8097 * Return value:
8098 * 	0 on success / non-zero for error
8099 **/
8100static int __devinit ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg)
8101{
8102	struct pci_dev *pdev = ioa_cfg->pdev;
8103	int i, rc = -ENOMEM;
8104
8105	ENTER;
8106	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
8107				       ioa_cfg->max_devs_supported, GFP_KERNEL);
8108
8109	if (!ioa_cfg->res_entries)
8110		goto out;
8111
8112	if (ioa_cfg->sis64) {
8113		ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
8114					      BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8115		ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
8116					     BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8117		ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
8118					    BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
8119	}
8120
8121	for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
8122		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
8123		ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
8124	}
8125
8126	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
8127						sizeof(struct ipr_misc_cbs),
8128						&ioa_cfg->vpd_cbs_dma);
8129
8130	if (!ioa_cfg->vpd_cbs)
8131		goto out_free_res_entries;
8132
8133	if (ipr_alloc_cmd_blks(ioa_cfg))
8134		goto out_free_vpd_cbs;
8135
8136	ioa_cfg->host_rrq = pci_alloc_consistent(ioa_cfg->pdev,
8137						 sizeof(u32) * IPR_NUM_CMD_BLKS,
8138						 &ioa_cfg->host_rrq_dma);
8139
8140	if (!ioa_cfg->host_rrq)
8141		goto out_ipr_free_cmd_blocks;
8142
8143	ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
8144						    ioa_cfg->cfg_table_size,
8145						    &ioa_cfg->cfg_table_dma);
8146
8147	if (!ioa_cfg->u.cfg_table)
8148		goto out_free_host_rrq;
8149
8150	for (i = 0; i < IPR_NUM_HCAMS; i++) {
8151		ioa_cfg->hostrcb[i] = pci_alloc_consistent(ioa_cfg->pdev,
8152							   sizeof(struct ipr_hostrcb),
8153							   &ioa_cfg->hostrcb_dma[i]);
8154
8155		if (!ioa_cfg->hostrcb[i])
8156			goto out_free_hostrcb_dma;
8157
8158		ioa_cfg->hostrcb[i]->hostrcb_dma =
8159			ioa_cfg->hostrcb_dma[i] + offsetof(struct ipr_hostrcb, hcam);
8160		ioa_cfg->hostrcb[i]->ioa_cfg = ioa_cfg;
8161		list_add_tail(&ioa_cfg->hostrcb[i]->queue, &ioa_cfg->hostrcb_free_q);
8162	}
8163
8164	ioa_cfg->trace = kzalloc(sizeof(struct ipr_trace_entry) *
8165				 IPR_NUM_TRACE_ENTRIES, GFP_KERNEL);
8166
8167	if (!ioa_cfg->trace)
8168		goto out_free_hostrcb_dma;
8169
8170	rc = 0;
8171out:
8172	LEAVE;
8173	return rc;
8174
8175out_free_hostrcb_dma:
8176	while (i-- > 0) {
8177		pci_free_consistent(pdev, sizeof(struct ipr_hostrcb),
8178				    ioa_cfg->hostrcb[i],
8179				    ioa_cfg->hostrcb_dma[i]);
8180	}
8181	pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
8182			    ioa_cfg->u.cfg_table,
8183			    ioa_cfg->cfg_table_dma);
8184out_free_host_rrq:
8185	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
8186			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
8187out_ipr_free_cmd_blocks:
8188	ipr_free_cmd_blks(ioa_cfg);
8189out_free_vpd_cbs:
8190	pci_free_consistent(pdev, sizeof(struct ipr_misc_cbs),
8191			    ioa_cfg->vpd_cbs, ioa_cfg->vpd_cbs_dma);
8192out_free_res_entries:
8193	kfree(ioa_cfg->res_entries);
8194	goto out;
8195}
8196
8197/**
8198 * ipr_initialize_bus_attr - Initialize SCSI bus attributes to default values
8199 * @ioa_cfg:	ioa config struct
8200 *
8201 * Return value:
8202 * 	none
8203 **/
8204static void __devinit ipr_initialize_bus_attr(struct ipr_ioa_cfg *ioa_cfg)
8205{
8206	int i;
8207
8208	for (i = 0; i < IPR_MAX_NUM_BUSES; i++) {
8209		ioa_cfg->bus_attr[i].bus = i;
8210		ioa_cfg->bus_attr[i].qas_enabled = 0;
8211		ioa_cfg->bus_attr[i].bus_width = IPR_DEFAULT_BUS_WIDTH;
8212		if (ipr_max_speed < ARRAY_SIZE(ipr_max_bus_speeds))
8213			ioa_cfg->bus_attr[i].max_xfer_rate = ipr_max_bus_speeds[ipr_max_speed];
8214		else
8215			ioa_cfg->bus_attr[i].max_xfer_rate = IPR_U160_SCSI_RATE;
8216	}
8217}
8218
8219/**
8220 * ipr_init_ioa_cfg - Initialize IOA config struct
8221 * @ioa_cfg:	ioa config struct
8222 * @host:		scsi host struct
8223 * @pdev:		PCI dev struct
8224 *
8225 * Return value:
8226 * 	none
8227 **/
8228static void __devinit ipr_init_ioa_cfg(struct ipr_ioa_cfg *ioa_cfg,
8229				       struct Scsi_Host *host, struct pci_dev *pdev)
8230{
8231	const struct ipr_interrupt_offsets *p;
8232	struct ipr_interrupts *t;
8233	void __iomem *base;
8234
8235	ioa_cfg->host = host;
8236	ioa_cfg->pdev = pdev;
8237	ioa_cfg->log_level = ipr_log_level;
8238	ioa_cfg->doorbell = IPR_DOORBELL;
8239	sprintf(ioa_cfg->eye_catcher, IPR_EYECATCHER);
8240	sprintf(ioa_cfg->trace_start, IPR_TRACE_START_LABEL);
8241	sprintf(ioa_cfg->ipr_free_label, IPR_FREEQ_LABEL);
8242	sprintf(ioa_cfg->ipr_pending_label, IPR_PENDQ_LABEL);
8243	sprintf(ioa_cfg->cfg_table_start, IPR_CFG_TBL_START);
8244	sprintf(ioa_cfg->resource_table_label, IPR_RES_TABLE_LABEL);
8245	sprintf(ioa_cfg->ipr_hcam_label, IPR_HCAM_LABEL);
8246	sprintf(ioa_cfg->ipr_cmd_label, IPR_CMD_LABEL);
8247
8248	INIT_LIST_HEAD(&ioa_cfg->free_q);
8249	INIT_LIST_HEAD(&ioa_cfg->pending_q);
8250	INIT_LIST_HEAD(&ioa_cfg->hostrcb_free_q);
8251	INIT_LIST_HEAD(&ioa_cfg->hostrcb_pending_q);
8252	INIT_LIST_HEAD(&ioa_cfg->free_res_q);
8253	INIT_LIST_HEAD(&ioa_cfg->used_res_q);
8254	INIT_WORK(&ioa_cfg->work_q, ipr_worker_thread);
8255	init_waitqueue_head(&ioa_cfg->reset_wait_q);
8256	init_waitqueue_head(&ioa_cfg->msi_wait_q);
8257	ioa_cfg->sdt_state = INACTIVE;
8258
8259	ipr_initialize_bus_attr(ioa_cfg);
8260	ioa_cfg->max_devs_supported = ipr_max_devs;
8261
8262	if (ioa_cfg->sis64) {
8263		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
8264		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
8265		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
8266			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
8267	} else {
8268		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
8269		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
8270		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
8271			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
8272	}
8273	host->max_channel = IPR_MAX_BUS_TO_SCAN;
8274	host->unique_id = host->host_no;
8275	host->max_cmd_len = IPR_MAX_CDB_LEN;
8276	pci_set_drvdata(pdev, ioa_cfg);
8277
8278	p = &ioa_cfg->chip_cfg->regs;
8279	t = &ioa_cfg->regs;
8280	base = ioa_cfg->hdw_dma_regs;
8281
8282	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
8283	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
8284	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
8285	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
8286	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
8287	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
8288	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
8289	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
8290	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
8291	t->ioarrin_reg = base + p->ioarrin_reg;
8292	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
8293	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
8294	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
8295	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
8296	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
8297	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
8298
8299	if (ioa_cfg->sis64) {
8300		t->init_feedback_reg = base + p->init_feedback_reg;
8301		t->dump_addr_reg = base + p->dump_addr_reg;
8302		t->dump_data_reg = base + p->dump_data_reg;
8303	}
8304}
8305
8306/**
8307 * ipr_get_chip_info - Find adapter chip information
8308 * @dev_id:		PCI device id struct
8309 *
8310 * Return value:
8311 * 	ptr to chip information on success / NULL on failure
8312 **/
8313static const struct ipr_chip_t * __devinit
8314ipr_get_chip_info(const struct pci_device_id *dev_id)
8315{
8316	int i;
8317
8318	for (i = 0; i < ARRAY_SIZE(ipr_chip); i++)
8319		if (ipr_chip[i].vendor == dev_id->vendor &&
8320		    ipr_chip[i].device == dev_id->device)
8321			return &ipr_chip[i];
8322	return NULL;
8323}
8324
8325/**
8326 * ipr_test_intr - Handle the interrupt generated in ipr_test_msi().
8327 * @pdev:		PCI device struct
8328 *
8329 * Description: Simply set the msi_received flag to 1 indicating that
8330 * Message Signaled Interrupts are supported.
8331 *
8332 * Return value:
8333 * 	0 on success / non-zero on failure
8334 **/
8335static irqreturn_t __devinit ipr_test_intr(int irq, void *devp)
8336{
8337	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
8338	unsigned long lock_flags = 0;
8339	irqreturn_t rc = IRQ_HANDLED;
8340
8341	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8342
8343	ioa_cfg->msi_received = 1;
8344	wake_up(&ioa_cfg->msi_wait_q);
8345
8346	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8347	return rc;
8348}
8349
8350/**
8351 * ipr_test_msi - Test for Message Signaled Interrupt (MSI) support.
8352 * @pdev:		PCI device struct
8353 *
8354 * Description: The return value from pci_enable_msi() can not always be
8355 * trusted.  This routine sets up and initiates a test interrupt to determine
8356 * if the interrupt is received via the ipr_test_intr() service routine.
8357 * If the tests fails, the driver will fall back to LSI.
8358 *
8359 * Return value:
8360 * 	0 on success / non-zero on failure
8361 **/
8362static int __devinit ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg,
8363				  struct pci_dev *pdev)
8364{
8365	int rc;
8366	volatile u32 int_reg;
8367	unsigned long lock_flags = 0;
8368
8369	ENTER;
8370
8371	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8372	init_waitqueue_head(&ioa_cfg->msi_wait_q);
8373	ioa_cfg->msi_received = 0;
8374	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8375	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
8376	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
8377	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8378
8379	rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
8380	if (rc) {
8381		dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
8382		return rc;
8383	} else if (ipr_debug)
8384		dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
8385
8386	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
8387	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
8388	wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
8389	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8390
8391	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8392	if (!ioa_cfg->msi_received) {
8393		/* MSI test failed */
8394		dev_info(&pdev->dev, "MSI test failed.  Falling back to LSI.\n");
8395		rc = -EOPNOTSUPP;
8396	} else if (ipr_debug)
8397		dev_info(&pdev->dev, "MSI test succeeded.\n");
8398
8399	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8400
8401	free_irq(pdev->irq, ioa_cfg);
8402
8403	LEAVE;
8404
8405	return rc;
8406}
8407
8408/**
8409 * ipr_probe_ioa - Allocates memory and does first stage of initialization
8410 * @pdev:		PCI device struct
8411 * @dev_id:		PCI device id struct
8412 *
8413 * Return value:
8414 * 	0 on success / non-zero on failure
8415 **/
8416static int __devinit ipr_probe_ioa(struct pci_dev *pdev,
8417				   const struct pci_device_id *dev_id)
8418{
8419	struct ipr_ioa_cfg *ioa_cfg;
8420	struct Scsi_Host *host;
8421	unsigned long ipr_regs_pci;
8422	void __iomem *ipr_regs;
8423	int rc = PCIBIOS_SUCCESSFUL;
8424	volatile u32 mask, uproc, interrupts;
8425
8426	ENTER;
8427
8428	if ((rc = pci_enable_device(pdev))) {
8429		dev_err(&pdev->dev, "Cannot enable adapter\n");
8430		goto out;
8431	}
8432
8433	dev_info(&pdev->dev, "Found IOA with IRQ: %d\n", pdev->irq);
8434
8435	host = scsi_host_alloc(&driver_template, sizeof(*ioa_cfg));
8436
8437	if (!host) {
8438		dev_err(&pdev->dev, "call to scsi_host_alloc failed!\n");
8439		rc = -ENOMEM;
8440		goto out_disable;
8441	}
8442
8443	ioa_cfg = (struct ipr_ioa_cfg *)host->hostdata;
8444	memset(ioa_cfg, 0, sizeof(struct ipr_ioa_cfg));
8445	ata_host_init(&ioa_cfg->ata_host, &pdev->dev,
8446		      sata_port_info.flags, &ipr_sata_ops);
8447
8448	ioa_cfg->ipr_chip = ipr_get_chip_info(dev_id);
8449
8450	if (!ioa_cfg->ipr_chip) {
8451		dev_err(&pdev->dev, "Unknown adapter chipset 0x%04X 0x%04X\n",
8452			dev_id->vendor, dev_id->device);
8453		goto out_scsi_host_put;
8454	}
8455
8456	/* set SIS 32 or SIS 64 */
8457	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
8458	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
8459
8460	if (ipr_transop_timeout)
8461		ioa_cfg->transop_timeout = ipr_transop_timeout;
8462	else if (dev_id->driver_data & IPR_USE_LONG_TRANSOP_TIMEOUT)
8463		ioa_cfg->transop_timeout = IPR_LONG_OPERATIONAL_TIMEOUT;
8464	else
8465		ioa_cfg->transop_timeout = IPR_OPERATIONAL_TIMEOUT;
8466
8467	ioa_cfg->revid = pdev->revision;
8468
8469	ipr_regs_pci = pci_resource_start(pdev, 0);
8470
8471	rc = pci_request_regions(pdev, IPR_NAME);
8472	if (rc < 0) {
8473		dev_err(&pdev->dev,
8474			"Couldn't register memory range of registers\n");
8475		goto out_scsi_host_put;
8476	}
8477
8478	ipr_regs = pci_ioremap_bar(pdev, 0);
8479
8480	if (!ipr_regs) {
8481		dev_err(&pdev->dev,
8482			"Couldn't map memory range of registers\n");
8483		rc = -ENOMEM;
8484		goto out_release_regions;
8485	}
8486
8487	ioa_cfg->hdw_dma_regs = ipr_regs;
8488	ioa_cfg->hdw_dma_regs_pci = ipr_regs_pci;
8489	ioa_cfg->ioa_mailbox = ioa_cfg->chip_cfg->mailbox + ipr_regs;
8490
8491	ipr_init_ioa_cfg(ioa_cfg, host, pdev);
8492
8493	pci_set_master(pdev);
8494
8495	if (ioa_cfg->sis64) {
8496		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
8497		if (rc < 0) {
8498			dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
8499			rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8500		}
8501
8502	} else
8503		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
8504
8505	if (rc < 0) {
8506		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
8507		goto cleanup_nomem;
8508	}
8509
8510	rc = pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE,
8511				   ioa_cfg->chip_cfg->cache_line_size);
8512
8513	if (rc != PCIBIOS_SUCCESSFUL) {
8514		dev_err(&pdev->dev, "Write of cache line size failed\n");
8515		rc = -EIO;
8516		goto cleanup_nomem;
8517	}
8518
8519	/* Enable MSI style interrupts if they are supported. */
8520	if (ioa_cfg->ipr_chip->intr_type == IPR_USE_MSI && !pci_enable_msi(pdev)) {
8521		rc = ipr_test_msi(ioa_cfg, pdev);
8522		if (rc == -EOPNOTSUPP)
8523			pci_disable_msi(pdev);
8524		else if (rc)
8525			goto out_msi_disable;
8526		else
8527			dev_info(&pdev->dev, "MSI enabled with IRQ: %d\n", pdev->irq);
8528	} else if (ipr_debug)
8529		dev_info(&pdev->dev, "Cannot enable MSI.\n");
8530
8531	/* Save away PCI config space for use following IOA reset */
8532	rc = pci_save_state(pdev);
8533
8534	if (rc != PCIBIOS_SUCCESSFUL) {
8535		dev_err(&pdev->dev, "Failed to save PCI config space\n");
8536		rc = -EIO;
8537		goto cleanup_nomem;
8538	}
8539
8540	if ((rc = ipr_save_pcix_cmd_reg(ioa_cfg)))
8541		goto cleanup_nomem;
8542
8543	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
8544		goto cleanup_nomem;
8545
8546	if (ioa_cfg->sis64)
8547		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
8548				+ ((sizeof(struct ipr_config_table_entry64)
8549				* ioa_cfg->max_devs_supported)));
8550	else
8551		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
8552				+ ((sizeof(struct ipr_config_table_entry)
8553				* ioa_cfg->max_devs_supported)));
8554
8555	rc = ipr_alloc_mem(ioa_cfg);
8556	if (rc < 0) {
8557		dev_err(&pdev->dev,
8558			"Couldn't allocate enough memory for device driver!\n");
8559		goto cleanup_nomem;
8560	}
8561
8562	/*
8563	 * If HRRQ updated interrupt is not masked, or reset alert is set,
8564	 * the card is in an unknown state and needs a hard reset
8565	 */
8566	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
8567	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
8568	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
8569	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
8570		ioa_cfg->needs_hard_reset = 1;
8571	if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
8572		ioa_cfg->needs_hard_reset = 1;
8573	if (interrupts & IPR_PCII_IOA_UNIT_CHECKED)
8574		ioa_cfg->ioa_unit_checked = 1;
8575
8576	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
8577	rc = request_irq(pdev->irq, ipr_isr,
8578			 ioa_cfg->msi_received ? 0 : IRQF_SHARED,
8579			 IPR_NAME, ioa_cfg);
8580
8581	if (rc) {
8582		dev_err(&pdev->dev, "Couldn't register IRQ %d! rc=%d\n",
8583			pdev->irq, rc);
8584		goto cleanup_nolog;
8585	}
8586
8587	if ((dev_id->driver_data & IPR_USE_PCI_WARM_RESET) ||
8588	    (dev_id->device == PCI_DEVICE_ID_IBM_OBSIDIAN_E && !ioa_cfg->revid)) {
8589		ioa_cfg->needs_warm_reset = 1;
8590		ioa_cfg->reset = ipr_reset_slot_reset;
8591	} else
8592		ioa_cfg->reset = ipr_reset_start_bist;
8593
8594	spin_lock(&ipr_driver_lock);
8595	list_add_tail(&ioa_cfg->queue, &ipr_ioa_head);
8596	spin_unlock(&ipr_driver_lock);
8597
8598	LEAVE;
8599out:
8600	return rc;
8601
8602cleanup_nolog:
8603	ipr_free_mem(ioa_cfg);
8604cleanup_nomem:
8605	iounmap(ipr_regs);
8606out_msi_disable:
8607	pci_disable_msi(pdev);
8608out_release_regions:
8609	pci_release_regions(pdev);
8610out_scsi_host_put:
8611	scsi_host_put(host);
8612out_disable:
8613	pci_disable_device(pdev);
8614	goto out;
8615}
8616
8617/**
8618 * ipr_scan_vsets - Scans for VSET devices
8619 * @ioa_cfg:	ioa config struct
8620 *
8621 * Description: Since the VSET resources do not follow SAM in that we can have
8622 * sparse LUNs with no LUN 0, we have to scan for these ourselves.
8623 *
8624 * Return value:
8625 * 	none
8626 **/
8627static void ipr_scan_vsets(struct ipr_ioa_cfg *ioa_cfg)
8628{
8629	int target, lun;
8630
8631	for (target = 0; target < IPR_MAX_NUM_TARGETS_PER_BUS; target++)
8632		for (lun = 0; lun < IPR_MAX_NUM_VSET_LUNS_PER_TARGET; lun++ )
8633			scsi_add_device(ioa_cfg->host, IPR_VSET_BUS, target, lun);
8634}
8635
8636/**
8637 * ipr_initiate_ioa_bringdown - Bring down an adapter
8638 * @ioa_cfg:		ioa config struct
8639 * @shutdown_type:	shutdown type
8640 *
8641 * Description: This function will initiate bringing down the adapter.
8642 * This consists of issuing an IOA shutdown to the adapter
8643 * to flush the cache, and running BIST.
8644 * If the caller needs to wait on the completion of the reset,
8645 * the caller must sleep on the reset_wait_q.
8646 *
8647 * Return value:
8648 * 	none
8649 **/
8650static void ipr_initiate_ioa_bringdown(struct ipr_ioa_cfg *ioa_cfg,
8651				       enum ipr_shutdown_type shutdown_type)
8652{
8653	ENTER;
8654	if (ioa_cfg->sdt_state == WAIT_FOR_DUMP)
8655		ioa_cfg->sdt_state = ABORT_DUMP;
8656	ioa_cfg->reset_retries = 0;
8657	ioa_cfg->in_ioa_bringdown = 1;
8658	ipr_initiate_ioa_reset(ioa_cfg, shutdown_type);
8659	LEAVE;
8660}
8661
8662/**
8663 * __ipr_remove - Remove a single adapter
8664 * @pdev:	pci device struct
8665 *
8666 * Adapter hot plug remove entry point.
8667 *
8668 * Return value:
8669 * 	none
8670 **/
8671static void __ipr_remove(struct pci_dev *pdev)
8672{
8673	unsigned long host_lock_flags = 0;
8674	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8675	ENTER;
8676
8677	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8678	while(ioa_cfg->in_reset_reload) {
8679		spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8680		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8681		spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8682	}
8683
8684	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8685
8686	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8687	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8688	flush_scheduled_work();
8689	spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
8690
8691	spin_lock(&ipr_driver_lock);
8692	list_del(&ioa_cfg->queue);
8693	spin_unlock(&ipr_driver_lock);
8694
8695	if (ioa_cfg->sdt_state == ABORT_DUMP)
8696		ioa_cfg->sdt_state = WAIT_FOR_DUMP;
8697	spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
8698
8699	ipr_free_all_resources(ioa_cfg);
8700
8701	LEAVE;
8702}
8703
8704/**
8705 * ipr_remove - IOA hot plug remove entry point
8706 * @pdev:	pci device struct
8707 *
8708 * Adapter hot plug remove entry point.
8709 *
8710 * Return value:
8711 * 	none
8712 **/
8713static void __devexit ipr_remove(struct pci_dev *pdev)
8714{
8715	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8716
8717	ENTER;
8718
8719	ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8720			      &ipr_trace_attr);
8721	ipr_remove_dump_file(&ioa_cfg->host->shost_dev.kobj,
8722			     &ipr_dump_attr);
8723	scsi_remove_host(ioa_cfg->host);
8724
8725	__ipr_remove(pdev);
8726
8727	LEAVE;
8728}
8729
8730/**
8731 * ipr_probe - Adapter hot plug add entry point
8732 *
8733 * Return value:
8734 * 	0 on success / non-zero on failure
8735 **/
8736static int __devinit ipr_probe(struct pci_dev *pdev,
8737			       const struct pci_device_id *dev_id)
8738{
8739	struct ipr_ioa_cfg *ioa_cfg;
8740	int rc;
8741
8742	rc = ipr_probe_ioa(pdev, dev_id);
8743
8744	if (rc)
8745		return rc;
8746
8747	ioa_cfg = pci_get_drvdata(pdev);
8748	rc = ipr_probe_ioa_part2(ioa_cfg);
8749
8750	if (rc) {
8751		__ipr_remove(pdev);
8752		return rc;
8753	}
8754
8755	rc = scsi_add_host(ioa_cfg->host, &pdev->dev);
8756
8757	if (rc) {
8758		__ipr_remove(pdev);
8759		return rc;
8760	}
8761
8762	rc = ipr_create_trace_file(&ioa_cfg->host->shost_dev.kobj,
8763				   &ipr_trace_attr);
8764
8765	if (rc) {
8766		scsi_remove_host(ioa_cfg->host);
8767		__ipr_remove(pdev);
8768		return rc;
8769	}
8770
8771	rc = ipr_create_dump_file(&ioa_cfg->host->shost_dev.kobj,
8772				   &ipr_dump_attr);
8773
8774	if (rc) {
8775		ipr_remove_trace_file(&ioa_cfg->host->shost_dev.kobj,
8776				      &ipr_trace_attr);
8777		scsi_remove_host(ioa_cfg->host);
8778		__ipr_remove(pdev);
8779		return rc;
8780	}
8781
8782	scsi_scan_host(ioa_cfg->host);
8783	ipr_scan_vsets(ioa_cfg);
8784	scsi_add_device(ioa_cfg->host, IPR_IOA_BUS, IPR_IOA_TARGET, IPR_IOA_LUN);
8785	ioa_cfg->allow_ml_add_del = 1;
8786	ioa_cfg->host->max_channel = IPR_VSET_BUS;
8787	schedule_work(&ioa_cfg->work_q);
8788	return 0;
8789}
8790
8791/**
8792 * ipr_shutdown - Shutdown handler.
8793 * @pdev:	pci device struct
8794 *
8795 * This function is invoked upon system shutdown/reboot. It will issue
8796 * an adapter shutdown to the adapter to flush the write cache.
8797 *
8798 * Return value:
8799 * 	none
8800 **/
8801static void ipr_shutdown(struct pci_dev *pdev)
8802{
8803	struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
8804	unsigned long lock_flags = 0;
8805
8806	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8807	while(ioa_cfg->in_reset_reload) {
8808		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8809		wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8810		spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
8811	}
8812
8813	ipr_initiate_ioa_bringdown(ioa_cfg, IPR_SHUTDOWN_NORMAL);
8814	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
8815	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
8816}
8817
8818static struct pci_device_id ipr_pci_table[] __devinitdata = {
8819	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8820		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5702, 0, 0, 0 },
8821	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8822		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_5703, 0, 0, 0 },
8823	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8824		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573D, 0, 0, 0 },
8825	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE,
8826		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_573E, 0, 0, 0 },
8827	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8828		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571B, 0, 0, 0 },
8829	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8830		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572E, 0, 0, 0 },
8831	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8832		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571A, 0, 0, 0 },
8833	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE,
8834		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575B, 0, 0,
8835		IPR_USE_LONG_TRANSOP_TIMEOUT },
8836	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8837	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
8838	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8839	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8840	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8841	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN,
8842	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8843	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8844	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8845	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572A, 0, 0, 0 },
8846	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8847	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572B, 0, 0,
8848	      IPR_USE_LONG_TRANSOP_TIMEOUT},
8849	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
8850	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575C, 0, 0,
8851	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8852	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8853	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
8854	      IPR_USE_LONG_TRANSOP_TIMEOUT },
8855	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8856	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
8857	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
8858	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
8859	      IPR_USE_LONG_TRANSOP_TIMEOUT | IPR_USE_PCI_WARM_RESET },
8860	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE,
8861		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_2780, 0, 0, 0 },
8862	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8863		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571E, 0, 0, 0 },
8864	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8865		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_571F, 0, 0,
8866		IPR_USE_LONG_TRANSOP_TIMEOUT },
8867	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
8868		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
8869		IPR_USE_LONG_TRANSOP_TIMEOUT },
8870	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8871		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
8872	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8873		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
8874	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
8875		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
8876	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8877		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
8878	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8879		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
8880	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8881		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
8882	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8883		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
8884	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
8885		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
8886	{ }
8887};
8888MODULE_DEVICE_TABLE(pci, ipr_pci_table);
8889
8890static struct pci_error_handlers ipr_err_handler = {
8891	.error_detected = ipr_pci_error_detected,
8892	.slot_reset = ipr_pci_slot_reset,
8893};
8894
8895static struct pci_driver ipr_driver = {
8896	.name = IPR_NAME,
8897	.id_table = ipr_pci_table,
8898	.probe = ipr_probe,
8899	.remove = __devexit_p(ipr_remove),
8900	.shutdown = ipr_shutdown,
8901	.err_handler = &ipr_err_handler,
8902};
8903
8904/**
8905 * ipr_halt_done - Shutdown prepare completion
8906 *
8907 * Return value:
8908 * 	none
8909 **/
8910static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
8911{
8912	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
8913
8914	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
8915}
8916
8917/**
8918 * ipr_halt - Issue shutdown prepare to all adapters
8919 *
8920 * Return value:
8921 * 	NOTIFY_OK on success / NOTIFY_DONE on failure
8922 **/
8923static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
8924{
8925	struct ipr_cmnd *ipr_cmd;
8926	struct ipr_ioa_cfg *ioa_cfg;
8927	unsigned long flags = 0;
8928
8929	if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
8930		return NOTIFY_DONE;
8931
8932	spin_lock(&ipr_driver_lock);
8933
8934	list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
8935		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
8936		if (!ioa_cfg->allow_cmds) {
8937			spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8938			continue;
8939		}
8940
8941		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
8942		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
8943		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
8944		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
8945		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
8946
8947		ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
8948		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
8949	}
8950	spin_unlock(&ipr_driver_lock);
8951
8952	return NOTIFY_OK;
8953}
8954
8955static struct notifier_block ipr_notifier = {
8956	ipr_halt, NULL, 0
8957};
8958
8959/**
8960 * ipr_init - Module entry point
8961 *
8962 * Return value:
8963 * 	0 on success / negative value on failure
8964 **/
8965static int __init ipr_init(void)
8966{
8967	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
8968		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
8969
8970	register_reboot_notifier(&ipr_notifier);
8971	return pci_register_driver(&ipr_driver);
8972}
8973
8974/**
8975 * ipr_exit - Module unload
8976 *
8977 * Module unload entry point.
8978 *
8979 * Return value:
8980 * 	none
8981 **/
8982static void __exit ipr_exit(void)
8983{
8984	unregister_reboot_notifier(&ipr_notifier);
8985	pci_unregister_driver(&ipr_driver);
8986}
8987
8988module_init(ipr_init);
8989module_exit(ipr_exit);
8990