1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c)  2003-2014 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7
8/*
9 * Table for showing the current message id in use for particular level
10 * Change this table for addition of log/debug messages.
11 * ----------------------------------------------------------------------
12 * |             Level            |   Last Value Used  |     Holes	|
13 * ----------------------------------------------------------------------
14 * | Module Init and Probe        |       0x017d       | 0x0144,0x0146	|
15 * |                              |                    | 0x015b-0x0160	|
16 * |                              |                    | 0x016e-0x0170	|
17 * | Mailbox commands             |       0x118d       | 0x1115-0x1116	|
18 * |                              |                    | 0x111a-0x111b  |
19 * | Device Discovery             |       0x2016       | 0x2020-0x2022, |
20 * |                              |                    | 0x2011-0x2012, |
21 * |                              |                    | 0x2099-0x20a4  |
22 * | Queue Command and IO tracing |       0x3059       | 0x300b         |
23 * |                              |                    | 0x3027-0x3028  |
24 * |                              |                    | 0x303d-0x3041  |
25 * |                              |                    | 0x302d,0x3033  |
26 * |                              |                    | 0x3036,0x3038  |
27 * |                              |                    | 0x303a		|
28 * | DPC Thread                   |       0x4023       | 0x4002,0x4013  |
29 * | Async Events                 |       0x5087       | 0x502b-0x502f  |
30 * |                              |                    | 0x5047		|
31 * |                              |                    | 0x5084,0x5075	|
32 * |                              |                    | 0x503d,0x5044  |
33 * |                              |                    | 0x507b,0x505f	|
34 * | Timer Routines               |       0x6012       |                |
35 * | User Space Interactions      |       0x70e2       | 0x7018,0x702e  |
36 * |				  |		       | 0x7020,0x7024  |
37 * |                              |                    | 0x7039,0x7045  |
38 * |                              |                    | 0x7073-0x7075  |
39 * |                              |                    | 0x70a5-0x70a6  |
40 * |                              |                    | 0x70a8,0x70ab  |
41 * |                              |                    | 0x70ad-0x70ae  |
42 * |                              |                    | 0x70d7-0x70db  |
43 * |                              |                    | 0x70de-0x70df  |
44 * | Task Management              |       0x803d       | 0x8000,0x800b  |
45 * |                              |                    | 0x8019         |
46 * |                              |                    | 0x8025,0x8026  |
47 * |                              |                    | 0x8031,0x8032  |
48 * |                              |                    | 0x8039,0x803c  |
49 * | AER/EEH                      |       0x9011       |		|
50 * | Virtual Port                 |       0xa007       |		|
51 * | ISP82XX Specific             |       0xb157       | 0xb002,0xb024  |
52 * |                              |                    | 0xb09e,0xb0ae  |
53 * |				  |		       | 0xb0c3,0xb0c6  |
54 * |                              |                    | 0xb0e0-0xb0ef  |
55 * |                              |                    | 0xb085,0xb0dc  |
56 * |                              |                    | 0xb107,0xb108  |
57 * |                              |                    | 0xb111,0xb11e  |
58 * |                              |                    | 0xb12c,0xb12d  |
59 * |                              |                    | 0xb13a,0xb142  |
60 * |                              |                    | 0xb13c-0xb140  |
61 * |                              |                    | 0xb149		|
62 * | MultiQ                       |       0xc00c       |		|
63 * | Misc                         |       0xd213       | 0xd011-0xd017	|
64 * |                              |                    | 0xd021,0xd024	|
65 * |                              |                    | 0xd025,0xd029	|
66 * |                              |                    | 0xd02a,0xd02e	|
67 * |                              |                    | 0xd031-0xd0ff	|
68 * |                              |                    | 0xd101-0xd1fe	|
69 * |                              |                    | 0xd214-0xd2fe	|
70 * | Target Mode		  |	  0xe079       |		|
71 * | Target Mode Management	  |	  0xf072       | 0xf002		|
72 * |                              |                    | 0xf046-0xf049  |
73 * | Target Mode Task Management  |	  0x1000b      |		|
74 * ----------------------------------------------------------------------
75 */
76
77#include "qla_def.h"
78
79#include <linux/delay.h>
80
81static uint32_t ql_dbg_offset = 0x800;
82
83static inline void
84qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
85{
86	fw_dump->fw_major_version = htonl(ha->fw_major_version);
87	fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
88	fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
89	fw_dump->fw_attributes = htonl(ha->fw_attributes);
90
91	fw_dump->vendor = htonl(ha->pdev->vendor);
92	fw_dump->device = htonl(ha->pdev->device);
93	fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
94	fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
95}
96
97static inline void *
98qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
99{
100	struct req_que *req = ha->req_q_map[0];
101	struct rsp_que *rsp = ha->rsp_q_map[0];
102	/* Request queue. */
103	memcpy(ptr, req->ring, req->length *
104	    sizeof(request_t));
105
106	/* Response queue. */
107	ptr += req->length * sizeof(request_t);
108	memcpy(ptr, rsp->ring, rsp->length  *
109	    sizeof(response_t));
110
111	return ptr + (rsp->length * sizeof(response_t));
112}
113
114int
115qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
116	uint32_t ram_dwords, void **nxt)
117{
118	int rval;
119	uint32_t cnt, stat, timer, dwords, idx;
120	uint16_t mb0, mb1;
121	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
122	dma_addr_t dump_dma = ha->gid_list_dma;
123	uint32_t *dump = (uint32_t *)ha->gid_list;
124
125	rval = QLA_SUCCESS;
126	mb0 = 0;
127
128	WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
129	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
130
131	dwords = qla2x00_gid_list_size(ha) / 4;
132	for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
133	    cnt += dwords, addr += dwords) {
134		if (cnt + dwords > ram_dwords)
135			dwords = ram_dwords - cnt;
136
137		WRT_REG_WORD(&reg->mailbox1, LSW(addr));
138		WRT_REG_WORD(&reg->mailbox8, MSW(addr));
139
140		WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
141		WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
142		WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
143		WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
144
145		WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
146		WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
147
148		WRT_REG_WORD(&reg->mailbox9, 0);
149		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
150
151		ha->flags.mbox_int = 0;
152		for (timer = 6000000; timer; timer--) {
153			/* Check for pending interrupts. */
154			stat = RD_REG_DWORD(&reg->host_status);
155			if (stat & HSRX_RISC_INT) {
156				stat &= 0xff;
157
158				if (stat == 0x1 || stat == 0x2 ||
159				    stat == 0x10 || stat == 0x11) {
160					set_bit(MBX_INTERRUPT,
161					    &ha->mbx_cmd_flags);
162
163					mb0 = RD_REG_WORD(&reg->mailbox0);
164					mb1 = RD_REG_WORD(&reg->mailbox1);
165
166					WRT_REG_DWORD(&reg->hccr,
167					    HCCRX_CLR_RISC_INT);
168					RD_REG_DWORD(&reg->hccr);
169					break;
170				}
171
172				/* Clear this intr; it wasn't a mailbox intr */
173				WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
174				RD_REG_DWORD(&reg->hccr);
175			}
176			udelay(5);
177		}
178		ha->flags.mbox_int = 1;
179
180		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
181			rval = mb0 & MBS_MASK;
182			for (idx = 0; idx < dwords; idx++)
183				ram[cnt + idx] = IS_QLA27XX(ha) ?
184				    le32_to_cpu(dump[idx]) : swab32(dump[idx]);
185		} else {
186			rval = QLA_FUNCTION_FAILED;
187		}
188	}
189
190	*nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
191	return rval;
192}
193
194int
195qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
196    uint32_t ram_dwords, void **nxt)
197{
198	int rval;
199	uint32_t cnt, stat, timer, dwords, idx;
200	uint16_t mb0;
201	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
202	dma_addr_t dump_dma = ha->gid_list_dma;
203	uint32_t *dump = (uint32_t *)ha->gid_list;
204
205	rval = QLA_SUCCESS;
206	mb0 = 0;
207
208	WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
209	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
210
211	dwords = qla2x00_gid_list_size(ha) / 4;
212	for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
213	    cnt += dwords, addr += dwords) {
214		if (cnt + dwords > ram_dwords)
215			dwords = ram_dwords - cnt;
216
217		WRT_REG_WORD(&reg->mailbox1, LSW(addr));
218		WRT_REG_WORD(&reg->mailbox8, MSW(addr));
219
220		WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
221		WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
222		WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
223		WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
224
225		WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
226		WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
227		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
228
229		ha->flags.mbox_int = 0;
230		for (timer = 6000000; timer; timer--) {
231			/* Check for pending interrupts. */
232			stat = RD_REG_DWORD(&reg->host_status);
233			if (stat & HSRX_RISC_INT) {
234				stat &= 0xff;
235
236				if (stat == 0x1 || stat == 0x2 ||
237				    stat == 0x10 || stat == 0x11) {
238					set_bit(MBX_INTERRUPT,
239					    &ha->mbx_cmd_flags);
240
241					mb0 = RD_REG_WORD(&reg->mailbox0);
242
243					WRT_REG_DWORD(&reg->hccr,
244					    HCCRX_CLR_RISC_INT);
245					RD_REG_DWORD(&reg->hccr);
246					break;
247				}
248
249				/* Clear this intr; it wasn't a mailbox intr */
250				WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
251				RD_REG_DWORD(&reg->hccr);
252			}
253			udelay(5);
254		}
255		ha->flags.mbox_int = 1;
256
257		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
258			rval = mb0 & MBS_MASK;
259			for (idx = 0; idx < dwords; idx++)
260				ram[cnt + idx] = IS_QLA27XX(ha) ?
261				    le32_to_cpu(dump[idx]) : swab32(dump[idx]);
262		} else {
263			rval = QLA_FUNCTION_FAILED;
264		}
265	}
266
267	*nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
268	return rval;
269}
270
271static int
272qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
273    uint32_t cram_size, void **nxt)
274{
275	int rval;
276
277	/* Code RAM. */
278	rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
279	if (rval != QLA_SUCCESS)
280		return rval;
281
282	set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
283
284	/* External Memory. */
285	rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
286	    ha->fw_memory_size - 0x100000 + 1, nxt);
287	if (rval == QLA_SUCCESS)
288		set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
289
290	return rval;
291}
292
293static uint32_t *
294qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
295    uint32_t count, uint32_t *buf)
296{
297	uint32_t __iomem *dmp_reg;
298
299	WRT_REG_DWORD(&reg->iobase_addr, iobase);
300	dmp_reg = &reg->iobase_window;
301	while (count--)
302		*buf++ = htonl(RD_REG_DWORD(dmp_reg++));
303
304	return buf;
305}
306
307void
308qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
309{
310	WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
311
312	/* 100 usec delay is sufficient enough for hardware to pause RISC */
313	udelay(100);
314	if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
315		set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
316}
317
318int
319qla24xx_soft_reset(struct qla_hw_data *ha)
320{
321	int rval = QLA_SUCCESS;
322	uint32_t cnt;
323	uint16_t wd;
324	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
325
326	/*
327	 * Reset RISC. The delay is dependent on system architecture.
328	 * Driver can proceed with the reset sequence after waiting
329	 * for a timeout period.
330	 */
331	WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
332	for (cnt = 0; cnt < 30000; cnt++) {
333		if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
334			break;
335
336		udelay(10);
337	}
338	if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
339		set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
340
341	WRT_REG_DWORD(&reg->ctrl_status,
342	    CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
343	pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
344
345	udelay(100);
346
347	/* Wait for soft-reset to complete. */
348	for (cnt = 0; cnt < 30000; cnt++) {
349		if ((RD_REG_DWORD(&reg->ctrl_status) &
350		    CSRX_ISP_SOFT_RESET) == 0)
351			break;
352
353		udelay(10);
354	}
355	if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
356		set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
357
358	WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
359	RD_REG_DWORD(&reg->hccr);             /* PCI Posting. */
360
361	for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
362	    rval == QLA_SUCCESS; cnt--) {
363		if (cnt)
364			udelay(10);
365		else
366			rval = QLA_FUNCTION_TIMEOUT;
367	}
368	if (rval == QLA_SUCCESS)
369		set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
370
371	return rval;
372}
373
374static int
375qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
376    uint32_t ram_words, void **nxt)
377{
378	int rval;
379	uint32_t cnt, stat, timer, words, idx;
380	uint16_t mb0;
381	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
382	dma_addr_t dump_dma = ha->gid_list_dma;
383	uint16_t *dump = (uint16_t *)ha->gid_list;
384
385	rval = QLA_SUCCESS;
386	mb0 = 0;
387
388	WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
389	clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
390
391	words = qla2x00_gid_list_size(ha) / 2;
392	for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
393	    cnt += words, addr += words) {
394		if (cnt + words > ram_words)
395			words = ram_words - cnt;
396
397		WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
398		WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
399
400		WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
401		WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
402		WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
403		WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
404
405		WRT_MAILBOX_REG(ha, reg, 4, words);
406		WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
407
408		for (timer = 6000000; timer; timer--) {
409			/* Check for pending interrupts. */
410			stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
411			if (stat & HSR_RISC_INT) {
412				stat &= 0xff;
413
414				if (stat == 0x1 || stat == 0x2) {
415					set_bit(MBX_INTERRUPT,
416					    &ha->mbx_cmd_flags);
417
418					mb0 = RD_MAILBOX_REG(ha, reg, 0);
419
420					/* Release mailbox registers. */
421					WRT_REG_WORD(&reg->semaphore, 0);
422					WRT_REG_WORD(&reg->hccr,
423					    HCCR_CLR_RISC_INT);
424					RD_REG_WORD(&reg->hccr);
425					break;
426				} else if (stat == 0x10 || stat == 0x11) {
427					set_bit(MBX_INTERRUPT,
428					    &ha->mbx_cmd_flags);
429
430					mb0 = RD_MAILBOX_REG(ha, reg, 0);
431
432					WRT_REG_WORD(&reg->hccr,
433					    HCCR_CLR_RISC_INT);
434					RD_REG_WORD(&reg->hccr);
435					break;
436				}
437
438				/* clear this intr; it wasn't a mailbox intr */
439				WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
440				RD_REG_WORD(&reg->hccr);
441			}
442			udelay(5);
443		}
444
445		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
446			rval = mb0 & MBS_MASK;
447			for (idx = 0; idx < words; idx++)
448				ram[cnt + idx] = swab16(dump[idx]);
449		} else {
450			rval = QLA_FUNCTION_FAILED;
451		}
452	}
453
454	*nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
455	return rval;
456}
457
458static inline void
459qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
460    uint16_t *buf)
461{
462	uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
463
464	while (count--)
465		*buf++ = htons(RD_REG_WORD(dmp_reg++));
466}
467
468static inline void *
469qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
470{
471	if (!ha->eft)
472		return ptr;
473
474	memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
475	return ptr + ntohl(ha->fw_dump->eft_size);
476}
477
478static inline void *
479qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
480{
481	uint32_t cnt;
482	uint32_t *iter_reg;
483	struct qla2xxx_fce_chain *fcec = ptr;
484
485	if (!ha->fce)
486		return ptr;
487
488	*last_chain = &fcec->type;
489	fcec->type = __constant_htonl(DUMP_CHAIN_FCE);
490	fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
491	    fce_calc_size(ha->fce_bufs));
492	fcec->size = htonl(fce_calc_size(ha->fce_bufs));
493	fcec->addr_l = htonl(LSD(ha->fce_dma));
494	fcec->addr_h = htonl(MSD(ha->fce_dma));
495
496	iter_reg = fcec->eregs;
497	for (cnt = 0; cnt < 8; cnt++)
498		*iter_reg++ = htonl(ha->fce_mb[cnt]);
499
500	memcpy(iter_reg, ha->fce, ntohl(fcec->size));
501
502	return (char *)iter_reg + ntohl(fcec->size);
503}
504
505static inline void *
506qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
507	uint32_t **last_chain)
508{
509	struct qla2xxx_mqueue_chain *q;
510	struct qla2xxx_mqueue_header *qh;
511	uint32_t num_queues;
512	int que;
513	struct {
514		int length;
515		void *ring;
516	} aq, *aqp;
517
518	if (!ha->tgt.atio_ring)
519		return ptr;
520
521	num_queues = 1;
522	aqp = &aq;
523	aqp->length = ha->tgt.atio_q_length;
524	aqp->ring = ha->tgt.atio_ring;
525
526	for (que = 0; que < num_queues; que++) {
527		/* aqp = ha->atio_q_map[que]; */
528		q = ptr;
529		*last_chain = &q->type;
530		q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
531		q->chain_size = htonl(
532		    sizeof(struct qla2xxx_mqueue_chain) +
533		    sizeof(struct qla2xxx_mqueue_header) +
534		    (aqp->length * sizeof(request_t)));
535		ptr += sizeof(struct qla2xxx_mqueue_chain);
536
537		/* Add header. */
538		qh = ptr;
539		qh->queue = __constant_htonl(TYPE_ATIO_QUEUE);
540		qh->number = htonl(que);
541		qh->size = htonl(aqp->length * sizeof(request_t));
542		ptr += sizeof(struct qla2xxx_mqueue_header);
543
544		/* Add data. */
545		memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
546
547		ptr += aqp->length * sizeof(request_t);
548	}
549
550	return ptr;
551}
552
553static inline void *
554qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
555{
556	struct qla2xxx_mqueue_chain *q;
557	struct qla2xxx_mqueue_header *qh;
558	struct req_que *req;
559	struct rsp_que *rsp;
560	int que;
561
562	if (!ha->mqenable)
563		return ptr;
564
565	/* Request queues */
566	for (que = 1; que < ha->max_req_queues; que++) {
567		req = ha->req_q_map[que];
568		if (!req)
569			break;
570
571		/* Add chain. */
572		q = ptr;
573		*last_chain = &q->type;
574		q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
575		q->chain_size = htonl(
576		    sizeof(struct qla2xxx_mqueue_chain) +
577		    sizeof(struct qla2xxx_mqueue_header) +
578		    (req->length * sizeof(request_t)));
579		ptr += sizeof(struct qla2xxx_mqueue_chain);
580
581		/* Add header. */
582		qh = ptr;
583		qh->queue = __constant_htonl(TYPE_REQUEST_QUEUE);
584		qh->number = htonl(que);
585		qh->size = htonl(req->length * sizeof(request_t));
586		ptr += sizeof(struct qla2xxx_mqueue_header);
587
588		/* Add data. */
589		memcpy(ptr, req->ring, req->length * sizeof(request_t));
590		ptr += req->length * sizeof(request_t);
591	}
592
593	/* Response queues */
594	for (que = 1; que < ha->max_rsp_queues; que++) {
595		rsp = ha->rsp_q_map[que];
596		if (!rsp)
597			break;
598
599		/* Add chain. */
600		q = ptr;
601		*last_chain = &q->type;
602		q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
603		q->chain_size = htonl(
604		    sizeof(struct qla2xxx_mqueue_chain) +
605		    sizeof(struct qla2xxx_mqueue_header) +
606		    (rsp->length * sizeof(response_t)));
607		ptr += sizeof(struct qla2xxx_mqueue_chain);
608
609		/* Add header. */
610		qh = ptr;
611		qh->queue = __constant_htonl(TYPE_RESPONSE_QUEUE);
612		qh->number = htonl(que);
613		qh->size = htonl(rsp->length * sizeof(response_t));
614		ptr += sizeof(struct qla2xxx_mqueue_header);
615
616		/* Add data. */
617		memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
618		ptr += rsp->length * sizeof(response_t);
619	}
620
621	return ptr;
622}
623
624static inline void *
625qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
626{
627	uint32_t cnt, que_idx;
628	uint8_t que_cnt;
629	struct qla2xxx_mq_chain *mq = ptr;
630	device_reg_t __iomem *reg;
631
632	if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
633		return ptr;
634
635	mq = ptr;
636	*last_chain = &mq->type;
637	mq->type = __constant_htonl(DUMP_CHAIN_MQ);
638	mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
639
640	que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
641		ha->max_req_queues : ha->max_rsp_queues;
642	mq->count = htonl(que_cnt);
643	for (cnt = 0; cnt < que_cnt; cnt++) {
644		reg = ISP_QUE_REG(ha, cnt);
645		que_idx = cnt * 4;
646		mq->qregs[que_idx] =
647		    htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
648		mq->qregs[que_idx+1] =
649		    htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
650		mq->qregs[que_idx+2] =
651		    htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
652		mq->qregs[que_idx+3] =
653		    htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
654	}
655
656	return ptr + sizeof(struct qla2xxx_mq_chain);
657}
658
659void
660qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
661{
662	struct qla_hw_data *ha = vha->hw;
663
664	if (rval != QLA_SUCCESS) {
665		ql_log(ql_log_warn, vha, 0xd000,
666		    "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
667		    rval, ha->fw_dump_cap_flags);
668		ha->fw_dumped = 0;
669	} else {
670		ql_log(ql_log_info, vha, 0xd001,
671		    "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
672		    vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
673		ha->fw_dumped = 1;
674		qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
675	}
676}
677
678/**
679 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
680 * @ha: HA context
681 * @hardware_locked: Called with the hardware_lock
682 */
683void
684qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
685{
686	int		rval;
687	uint32_t	cnt;
688	struct qla_hw_data *ha = vha->hw;
689	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
690	uint16_t __iomem *dmp_reg;
691	unsigned long	flags;
692	struct qla2300_fw_dump	*fw;
693	void		*nxt;
694	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
695
696	flags = 0;
697
698	if (!hardware_locked)
699		spin_lock_irqsave(&ha->hardware_lock, flags);
700
701	if (!ha->fw_dump) {
702		ql_log(ql_log_warn, vha, 0xd002,
703		    "No buffer available for dump.\n");
704		goto qla2300_fw_dump_failed;
705	}
706
707	if (ha->fw_dumped) {
708		ql_log(ql_log_warn, vha, 0xd003,
709		    "Firmware has been previously dumped (%p) "
710		    "-- ignoring request.\n",
711		    ha->fw_dump);
712		goto qla2300_fw_dump_failed;
713	}
714	fw = &ha->fw_dump->isp.isp23;
715	qla2xxx_prep_dump(ha, ha->fw_dump);
716
717	rval = QLA_SUCCESS;
718	fw->hccr = htons(RD_REG_WORD(&reg->hccr));
719
720	/* Pause RISC. */
721	WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
722	if (IS_QLA2300(ha)) {
723		for (cnt = 30000;
724		    (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
725			rval == QLA_SUCCESS; cnt--) {
726			if (cnt)
727				udelay(100);
728			else
729				rval = QLA_FUNCTION_TIMEOUT;
730		}
731	} else {
732		RD_REG_WORD(&reg->hccr);		/* PCI Posting. */
733		udelay(10);
734	}
735
736	if (rval == QLA_SUCCESS) {
737		dmp_reg = &reg->flash_address;
738		for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
739			fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
740
741		dmp_reg = &reg->u.isp2300.req_q_in;
742		for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++)
743			fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
744
745		dmp_reg = &reg->u.isp2300.mailbox0;
746		for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
747			fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
748
749		WRT_REG_WORD(&reg->ctrl_status, 0x40);
750		qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
751
752		WRT_REG_WORD(&reg->ctrl_status, 0x50);
753		qla2xxx_read_window(reg, 48, fw->dma_reg);
754
755		WRT_REG_WORD(&reg->ctrl_status, 0x00);
756		dmp_reg = &reg->risc_hw;
757		for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
758			fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
759
760		WRT_REG_WORD(&reg->pcr, 0x2000);
761		qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
762
763		WRT_REG_WORD(&reg->pcr, 0x2200);
764		qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
765
766		WRT_REG_WORD(&reg->pcr, 0x2400);
767		qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
768
769		WRT_REG_WORD(&reg->pcr, 0x2600);
770		qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
771
772		WRT_REG_WORD(&reg->pcr, 0x2800);
773		qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
774
775		WRT_REG_WORD(&reg->pcr, 0x2A00);
776		qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
777
778		WRT_REG_WORD(&reg->pcr, 0x2C00);
779		qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
780
781		WRT_REG_WORD(&reg->pcr, 0x2E00);
782		qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
783
784		WRT_REG_WORD(&reg->ctrl_status, 0x10);
785		qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
786
787		WRT_REG_WORD(&reg->ctrl_status, 0x20);
788		qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
789
790		WRT_REG_WORD(&reg->ctrl_status, 0x30);
791		qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
792
793		/* Reset RISC. */
794		WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
795		for (cnt = 0; cnt < 30000; cnt++) {
796			if ((RD_REG_WORD(&reg->ctrl_status) &
797			    CSR_ISP_SOFT_RESET) == 0)
798				break;
799
800			udelay(10);
801		}
802	}
803
804	if (!IS_QLA2300(ha)) {
805		for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
806		    rval == QLA_SUCCESS; cnt--) {
807			if (cnt)
808				udelay(100);
809			else
810				rval = QLA_FUNCTION_TIMEOUT;
811		}
812	}
813
814	/* Get RISC SRAM. */
815	if (rval == QLA_SUCCESS)
816		rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
817		    sizeof(fw->risc_ram) / 2, &nxt);
818
819	/* Get stack SRAM. */
820	if (rval == QLA_SUCCESS)
821		rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
822		    sizeof(fw->stack_ram) / 2, &nxt);
823
824	/* Get data SRAM. */
825	if (rval == QLA_SUCCESS)
826		rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
827		    ha->fw_memory_size - 0x11000 + 1, &nxt);
828
829	if (rval == QLA_SUCCESS)
830		qla2xxx_copy_queues(ha, nxt);
831
832	qla2xxx_dump_post_process(base_vha, rval);
833
834qla2300_fw_dump_failed:
835	if (!hardware_locked)
836		spin_unlock_irqrestore(&ha->hardware_lock, flags);
837}
838
839/**
840 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
841 * @ha: HA context
842 * @hardware_locked: Called with the hardware_lock
843 */
844void
845qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
846{
847	int		rval;
848	uint32_t	cnt, timer;
849	uint16_t	risc_address;
850	uint16_t	mb0, mb2;
851	struct qla_hw_data *ha = vha->hw;
852	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
853	uint16_t __iomem *dmp_reg;
854	unsigned long	flags;
855	struct qla2100_fw_dump	*fw;
856	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
857
858	risc_address = 0;
859	mb0 = mb2 = 0;
860	flags = 0;
861
862	if (!hardware_locked)
863		spin_lock_irqsave(&ha->hardware_lock, flags);
864
865	if (!ha->fw_dump) {
866		ql_log(ql_log_warn, vha, 0xd004,
867		    "No buffer available for dump.\n");
868		goto qla2100_fw_dump_failed;
869	}
870
871	if (ha->fw_dumped) {
872		ql_log(ql_log_warn, vha, 0xd005,
873		    "Firmware has been previously dumped (%p) "
874		    "-- ignoring request.\n",
875		    ha->fw_dump);
876		goto qla2100_fw_dump_failed;
877	}
878	fw = &ha->fw_dump->isp.isp21;
879	qla2xxx_prep_dump(ha, ha->fw_dump);
880
881	rval = QLA_SUCCESS;
882	fw->hccr = htons(RD_REG_WORD(&reg->hccr));
883
884	/* Pause RISC. */
885	WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
886	for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
887	    rval == QLA_SUCCESS; cnt--) {
888		if (cnt)
889			udelay(100);
890		else
891			rval = QLA_FUNCTION_TIMEOUT;
892	}
893	if (rval == QLA_SUCCESS) {
894		dmp_reg = &reg->flash_address;
895		for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
896			fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
897
898		dmp_reg = &reg->u.isp2100.mailbox0;
899		for (cnt = 0; cnt < ha->mbx_count; cnt++) {
900			if (cnt == 8)
901				dmp_reg = &reg->u_end.isp2200.mailbox8;
902
903			fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
904		}
905
906		dmp_reg = &reg->u.isp2100.unused_2[0];
907		for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
908			fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
909
910		WRT_REG_WORD(&reg->ctrl_status, 0x00);
911		dmp_reg = &reg->risc_hw;
912		for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
913			fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
914
915		WRT_REG_WORD(&reg->pcr, 0x2000);
916		qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
917
918		WRT_REG_WORD(&reg->pcr, 0x2100);
919		qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
920
921		WRT_REG_WORD(&reg->pcr, 0x2200);
922		qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
923
924		WRT_REG_WORD(&reg->pcr, 0x2300);
925		qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
926
927		WRT_REG_WORD(&reg->pcr, 0x2400);
928		qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
929
930		WRT_REG_WORD(&reg->pcr, 0x2500);
931		qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
932
933		WRT_REG_WORD(&reg->pcr, 0x2600);
934		qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
935
936		WRT_REG_WORD(&reg->pcr, 0x2700);
937		qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
938
939		WRT_REG_WORD(&reg->ctrl_status, 0x10);
940		qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
941
942		WRT_REG_WORD(&reg->ctrl_status, 0x20);
943		qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
944
945		WRT_REG_WORD(&reg->ctrl_status, 0x30);
946		qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
947
948		/* Reset the ISP. */
949		WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
950	}
951
952	for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
953	    rval == QLA_SUCCESS; cnt--) {
954		if (cnt)
955			udelay(100);
956		else
957			rval = QLA_FUNCTION_TIMEOUT;
958	}
959
960	/* Pause RISC. */
961	if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
962	    (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
963
964		WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
965		for (cnt = 30000;
966		    (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
967		    rval == QLA_SUCCESS; cnt--) {
968			if (cnt)
969				udelay(100);
970			else
971				rval = QLA_FUNCTION_TIMEOUT;
972		}
973		if (rval == QLA_SUCCESS) {
974			/* Set memory configuration and timing. */
975			if (IS_QLA2100(ha))
976				WRT_REG_WORD(&reg->mctr, 0xf1);
977			else
978				WRT_REG_WORD(&reg->mctr, 0xf2);
979			RD_REG_WORD(&reg->mctr);	/* PCI Posting. */
980
981			/* Release RISC. */
982			WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
983		}
984	}
985
986	if (rval == QLA_SUCCESS) {
987		/* Get RISC SRAM. */
988		risc_address = 0x1000;
989 		WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
990		clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
991	}
992	for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
993	    cnt++, risc_address++) {
994 		WRT_MAILBOX_REG(ha, reg, 1, risc_address);
995		WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
996
997		for (timer = 6000000; timer != 0; timer--) {
998			/* Check for pending interrupts. */
999			if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
1000				if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
1001					set_bit(MBX_INTERRUPT,
1002					    &ha->mbx_cmd_flags);
1003
1004					mb0 = RD_MAILBOX_REG(ha, reg, 0);
1005					mb2 = RD_MAILBOX_REG(ha, reg, 2);
1006
1007					WRT_REG_WORD(&reg->semaphore, 0);
1008					WRT_REG_WORD(&reg->hccr,
1009					    HCCR_CLR_RISC_INT);
1010					RD_REG_WORD(&reg->hccr);
1011					break;
1012				}
1013				WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
1014				RD_REG_WORD(&reg->hccr);
1015			}
1016			udelay(5);
1017		}
1018
1019		if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1020			rval = mb0 & MBS_MASK;
1021			fw->risc_ram[cnt] = htons(mb2);
1022		} else {
1023			rval = QLA_FUNCTION_FAILED;
1024		}
1025	}
1026
1027	if (rval == QLA_SUCCESS)
1028		qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
1029
1030	qla2xxx_dump_post_process(base_vha, rval);
1031
1032qla2100_fw_dump_failed:
1033	if (!hardware_locked)
1034		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1035}
1036
1037void
1038qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1039{
1040	int		rval;
1041	uint32_t	cnt;
1042	uint32_t	risc_address;
1043	struct qla_hw_data *ha = vha->hw;
1044	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1045	uint32_t __iomem *dmp_reg;
1046	uint32_t	*iter_reg;
1047	uint16_t __iomem *mbx_reg;
1048	unsigned long	flags;
1049	struct qla24xx_fw_dump *fw;
1050	uint32_t	ext_mem_cnt;
1051	void		*nxt;
1052	void		*nxt_chain;
1053	uint32_t	*last_chain = NULL;
1054	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1055
1056	if (IS_P3P_TYPE(ha))
1057		return;
1058
1059	risc_address = ext_mem_cnt = 0;
1060	flags = 0;
1061	ha->fw_dump_cap_flags = 0;
1062
1063	if (!hardware_locked)
1064		spin_lock_irqsave(&ha->hardware_lock, flags);
1065
1066	if (!ha->fw_dump) {
1067		ql_log(ql_log_warn, vha, 0xd006,
1068		    "No buffer available for dump.\n");
1069		goto qla24xx_fw_dump_failed;
1070	}
1071
1072	if (ha->fw_dumped) {
1073		ql_log(ql_log_warn, vha, 0xd007,
1074		    "Firmware has been previously dumped (%p) "
1075		    "-- ignoring request.\n",
1076		    ha->fw_dump);
1077		goto qla24xx_fw_dump_failed;
1078	}
1079	fw = &ha->fw_dump->isp.isp24;
1080	qla2xxx_prep_dump(ha, ha->fw_dump);
1081
1082	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1083
1084	/*
1085	 * Pause RISC. No need to track timeout, as resetting the chip
1086	 * is the right approach incase of pause timeout
1087	 */
1088	qla24xx_pause_risc(reg, ha);
1089
1090	/* Host interface registers. */
1091	dmp_reg = &reg->flash_addr;
1092	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1093		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1094
1095	/* Disable interrupts. */
1096	WRT_REG_DWORD(&reg->ictrl, 0);
1097	RD_REG_DWORD(&reg->ictrl);
1098
1099	/* Shadow registers. */
1100	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1101	RD_REG_DWORD(&reg->iobase_addr);
1102	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1103	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1104
1105	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1106	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1107
1108	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1109	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1110
1111	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1112	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1113
1114	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1115	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1116
1117	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1118	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1119
1120	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1121	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1122
1123	/* Mailbox registers. */
1124	mbx_reg = &reg->mailbox0;
1125	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1126		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1127
1128	/* Transfer sequence registers. */
1129	iter_reg = fw->xseq_gp_reg;
1130	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1131	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1132	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1133	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1134	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1135	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1136	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1137	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1138
1139	qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
1140	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1141
1142	/* Receive sequence registers. */
1143	iter_reg = fw->rseq_gp_reg;
1144	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1145	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1146	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1147	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1148	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1149	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1150	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1151	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1152
1153	qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
1154	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1155	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1156
1157	/* Command DMA registers. */
1158	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1159
1160	/* Queues. */
1161	iter_reg = fw->req0_dma_reg;
1162	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1163	dmp_reg = &reg->iobase_q;
1164	for (cnt = 0; cnt < 7; cnt++)
1165		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1166
1167	iter_reg = fw->resp0_dma_reg;
1168	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1169	dmp_reg = &reg->iobase_q;
1170	for (cnt = 0; cnt < 7; cnt++)
1171		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1172
1173	iter_reg = fw->req1_dma_reg;
1174	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1175	dmp_reg = &reg->iobase_q;
1176	for (cnt = 0; cnt < 7; cnt++)
1177		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1178
1179	/* Transmit DMA registers. */
1180	iter_reg = fw->xmt0_dma_reg;
1181	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1182	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1183
1184	iter_reg = fw->xmt1_dma_reg;
1185	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1186	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1187
1188	iter_reg = fw->xmt2_dma_reg;
1189	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1190	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1191
1192	iter_reg = fw->xmt3_dma_reg;
1193	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1194	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1195
1196	iter_reg = fw->xmt4_dma_reg;
1197	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1198	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1199
1200	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1201
1202	/* Receive DMA registers. */
1203	iter_reg = fw->rcvt0_data_dma_reg;
1204	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1205	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1206
1207	iter_reg = fw->rcvt1_data_dma_reg;
1208	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1209	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1210
1211	/* RISC registers. */
1212	iter_reg = fw->risc_gp_reg;
1213	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1214	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1215	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1216	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1217	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1218	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1219	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1220	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1221
1222	/* Local memory controller registers. */
1223	iter_reg = fw->lmc_reg;
1224	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1225	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1226	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1227	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1228	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1229	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1230	qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1231
1232	/* Fibre Protocol Module registers. */
1233	iter_reg = fw->fpm_hdw_reg;
1234	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1235	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1236	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1237	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1238	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1239	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1240	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1241	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1242	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1243	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1244	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1245	qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1246
1247	/* Frame Buffer registers. */
1248	iter_reg = fw->fb_hdw_reg;
1249	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1250	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1251	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1252	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1253	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1254	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1255	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1256	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1257	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1258	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1259	qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1260
1261	rval = qla24xx_soft_reset(ha);
1262	if (rval != QLA_SUCCESS)
1263		goto qla24xx_fw_dump_failed_0;
1264
1265	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1266	    &nxt);
1267	if (rval != QLA_SUCCESS)
1268		goto qla24xx_fw_dump_failed_0;
1269
1270	nxt = qla2xxx_copy_queues(ha, nxt);
1271
1272	qla24xx_copy_eft(ha, nxt);
1273
1274	nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1275	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1276	if (last_chain) {
1277		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1278		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1279	}
1280
1281	/* Adjust valid length. */
1282	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1283
1284qla24xx_fw_dump_failed_0:
1285	qla2xxx_dump_post_process(base_vha, rval);
1286
1287qla24xx_fw_dump_failed:
1288	if (!hardware_locked)
1289		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1290}
1291
1292void
1293qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1294{
1295	int		rval;
1296	uint32_t	cnt;
1297	uint32_t	risc_address;
1298	struct qla_hw_data *ha = vha->hw;
1299	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1300	uint32_t __iomem *dmp_reg;
1301	uint32_t	*iter_reg;
1302	uint16_t __iomem *mbx_reg;
1303	unsigned long	flags;
1304	struct qla25xx_fw_dump *fw;
1305	uint32_t	ext_mem_cnt;
1306	void		*nxt, *nxt_chain;
1307	uint32_t	*last_chain = NULL;
1308	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1309
1310	risc_address = ext_mem_cnt = 0;
1311	flags = 0;
1312	ha->fw_dump_cap_flags = 0;
1313
1314	if (!hardware_locked)
1315		spin_lock_irqsave(&ha->hardware_lock, flags);
1316
1317	if (!ha->fw_dump) {
1318		ql_log(ql_log_warn, vha, 0xd008,
1319		    "No buffer available for dump.\n");
1320		goto qla25xx_fw_dump_failed;
1321	}
1322
1323	if (ha->fw_dumped) {
1324		ql_log(ql_log_warn, vha, 0xd009,
1325		    "Firmware has been previously dumped (%p) "
1326		    "-- ignoring request.\n",
1327		    ha->fw_dump);
1328		goto qla25xx_fw_dump_failed;
1329	}
1330	fw = &ha->fw_dump->isp.isp25;
1331	qla2xxx_prep_dump(ha, ha->fw_dump);
1332	ha->fw_dump->version = __constant_htonl(2);
1333
1334	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1335
1336	/*
1337	 * Pause RISC. No need to track timeout, as resetting the chip
1338	 * is the right approach incase of pause timeout
1339	 */
1340	qla24xx_pause_risc(reg, ha);
1341
1342	/* Host/Risc registers. */
1343	iter_reg = fw->host_risc_reg;
1344	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1345	qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1346
1347	/* PCIe registers. */
1348	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1349	RD_REG_DWORD(&reg->iobase_addr);
1350	WRT_REG_DWORD(&reg->iobase_window, 0x01);
1351	dmp_reg = &reg->iobase_c4;
1352	fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
1353	fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
1354	fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1355	fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1356
1357	WRT_REG_DWORD(&reg->iobase_window, 0x00);
1358	RD_REG_DWORD(&reg->iobase_window);
1359
1360	/* Host interface registers. */
1361	dmp_reg = &reg->flash_addr;
1362	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1363		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1364
1365	/* Disable interrupts. */
1366	WRT_REG_DWORD(&reg->ictrl, 0);
1367	RD_REG_DWORD(&reg->ictrl);
1368
1369	/* Shadow registers. */
1370	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1371	RD_REG_DWORD(&reg->iobase_addr);
1372	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1373	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1374
1375	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1376	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1377
1378	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1379	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1380
1381	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1382	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1383
1384	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1385	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1386
1387	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1388	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1389
1390	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1391	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1392
1393	WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1394	fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1395
1396	WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1397	fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1398
1399	WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1400	fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1401
1402	WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1403	fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1404
1405	/* RISC I/O register. */
1406	WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1407	fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1408
1409	/* Mailbox registers. */
1410	mbx_reg = &reg->mailbox0;
1411	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1412		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1413
1414	/* Transfer sequence registers. */
1415	iter_reg = fw->xseq_gp_reg;
1416	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1417	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1418	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1419	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1420	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1421	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1422	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1423	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1424
1425	iter_reg = fw->xseq_0_reg;
1426	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1427	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1428	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1429
1430	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1431
1432	/* Receive sequence registers. */
1433	iter_reg = fw->rseq_gp_reg;
1434	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1435	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1436	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1437	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1438	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1439	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1440	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1441	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1442
1443	iter_reg = fw->rseq_0_reg;
1444	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1445	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1446
1447	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1448	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1449
1450	/* Auxiliary sequence registers. */
1451	iter_reg = fw->aseq_gp_reg;
1452	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1453	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1454	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1455	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1456	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1457	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1458	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1459	qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1460
1461	iter_reg = fw->aseq_0_reg;
1462	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1463	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1464
1465	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1466	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1467
1468	/* Command DMA registers. */
1469	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1470
1471	/* Queues. */
1472	iter_reg = fw->req0_dma_reg;
1473	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1474	dmp_reg = &reg->iobase_q;
1475	for (cnt = 0; cnt < 7; cnt++)
1476		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1477
1478	iter_reg = fw->resp0_dma_reg;
1479	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1480	dmp_reg = &reg->iobase_q;
1481	for (cnt = 0; cnt < 7; cnt++)
1482		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1483
1484	iter_reg = fw->req1_dma_reg;
1485	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1486	dmp_reg = &reg->iobase_q;
1487	for (cnt = 0; cnt < 7; cnt++)
1488		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1489
1490	/* Transmit DMA registers. */
1491	iter_reg = fw->xmt0_dma_reg;
1492	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1493	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1494
1495	iter_reg = fw->xmt1_dma_reg;
1496	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1497	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1498
1499	iter_reg = fw->xmt2_dma_reg;
1500	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1501	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1502
1503	iter_reg = fw->xmt3_dma_reg;
1504	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1505	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1506
1507	iter_reg = fw->xmt4_dma_reg;
1508	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1509	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1510
1511	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1512
1513	/* Receive DMA registers. */
1514	iter_reg = fw->rcvt0_data_dma_reg;
1515	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1516	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1517
1518	iter_reg = fw->rcvt1_data_dma_reg;
1519	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1520	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1521
1522	/* RISC registers. */
1523	iter_reg = fw->risc_gp_reg;
1524	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1525	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1526	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1527	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1528	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1529	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1530	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1531	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1532
1533	/* Local memory controller registers. */
1534	iter_reg = fw->lmc_reg;
1535	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1536	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1537	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1538	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1539	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1540	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1541	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1542	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1543
1544	/* Fibre Protocol Module registers. */
1545	iter_reg = fw->fpm_hdw_reg;
1546	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1547	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1548	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1549	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1550	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1551	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1552	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1553	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1554	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1555	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1556	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1557	qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1558
1559	/* Frame Buffer registers. */
1560	iter_reg = fw->fb_hdw_reg;
1561	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1562	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1563	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1564	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1565	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1566	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1567	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1568	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1569	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1570	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1571	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1572	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1573
1574	/* Multi queue registers */
1575	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1576	    &last_chain);
1577
1578	rval = qla24xx_soft_reset(ha);
1579	if (rval != QLA_SUCCESS)
1580		goto qla25xx_fw_dump_failed_0;
1581
1582	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1583	    &nxt);
1584	if (rval != QLA_SUCCESS)
1585		goto qla25xx_fw_dump_failed_0;
1586
1587	nxt = qla2xxx_copy_queues(ha, nxt);
1588
1589	qla24xx_copy_eft(ha, nxt);
1590
1591	/* Chain entries -- started with MQ. */
1592	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1593	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1594	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1595	if (last_chain) {
1596		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1597		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1598	}
1599
1600	/* Adjust valid length. */
1601	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1602
1603qla25xx_fw_dump_failed_0:
1604	qla2xxx_dump_post_process(base_vha, rval);
1605
1606qla25xx_fw_dump_failed:
1607	if (!hardware_locked)
1608		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1609}
1610
1611void
1612qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1613{
1614	int		rval;
1615	uint32_t	cnt;
1616	uint32_t	risc_address;
1617	struct qla_hw_data *ha = vha->hw;
1618	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1619	uint32_t __iomem *dmp_reg;
1620	uint32_t	*iter_reg;
1621	uint16_t __iomem *mbx_reg;
1622	unsigned long	flags;
1623	struct qla81xx_fw_dump *fw;
1624	uint32_t	ext_mem_cnt;
1625	void		*nxt, *nxt_chain;
1626	uint32_t	*last_chain = NULL;
1627	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1628
1629	risc_address = ext_mem_cnt = 0;
1630	flags = 0;
1631	ha->fw_dump_cap_flags = 0;
1632
1633	if (!hardware_locked)
1634		spin_lock_irqsave(&ha->hardware_lock, flags);
1635
1636	if (!ha->fw_dump) {
1637		ql_log(ql_log_warn, vha, 0xd00a,
1638		    "No buffer available for dump.\n");
1639		goto qla81xx_fw_dump_failed;
1640	}
1641
1642	if (ha->fw_dumped) {
1643		ql_log(ql_log_warn, vha, 0xd00b,
1644		    "Firmware has been previously dumped (%p) "
1645		    "-- ignoring request.\n",
1646		    ha->fw_dump);
1647		goto qla81xx_fw_dump_failed;
1648	}
1649	fw = &ha->fw_dump->isp.isp81;
1650	qla2xxx_prep_dump(ha, ha->fw_dump);
1651
1652	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1653
1654	/*
1655	 * Pause RISC. No need to track timeout, as resetting the chip
1656	 * is the right approach incase of pause timeout
1657	 */
1658	qla24xx_pause_risc(reg, ha);
1659
1660	/* Host/Risc registers. */
1661	iter_reg = fw->host_risc_reg;
1662	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1663	qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1664
1665	/* PCIe registers. */
1666	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1667	RD_REG_DWORD(&reg->iobase_addr);
1668	WRT_REG_DWORD(&reg->iobase_window, 0x01);
1669	dmp_reg = &reg->iobase_c4;
1670	fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
1671	fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
1672	fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1673	fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1674
1675	WRT_REG_DWORD(&reg->iobase_window, 0x00);
1676	RD_REG_DWORD(&reg->iobase_window);
1677
1678	/* Host interface registers. */
1679	dmp_reg = &reg->flash_addr;
1680	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1681		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1682
1683	/* Disable interrupts. */
1684	WRT_REG_DWORD(&reg->ictrl, 0);
1685	RD_REG_DWORD(&reg->ictrl);
1686
1687	/* Shadow registers. */
1688	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1689	RD_REG_DWORD(&reg->iobase_addr);
1690	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1691	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1692
1693	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1694	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1695
1696	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1697	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1698
1699	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1700	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1701
1702	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1703	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1704
1705	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1706	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1707
1708	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1709	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1710
1711	WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1712	fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1713
1714	WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1715	fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1716
1717	WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1718	fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1719
1720	WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1721	fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1722
1723	/* RISC I/O register. */
1724	WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1725	fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1726
1727	/* Mailbox registers. */
1728	mbx_reg = &reg->mailbox0;
1729	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1730		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1731
1732	/* Transfer sequence registers. */
1733	iter_reg = fw->xseq_gp_reg;
1734	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1735	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1736	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1737	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1738	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1739	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1740	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1741	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1742
1743	iter_reg = fw->xseq_0_reg;
1744	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1745	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1746	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1747
1748	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1749
1750	/* Receive sequence registers. */
1751	iter_reg = fw->rseq_gp_reg;
1752	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1753	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1754	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1755	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1756	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1757	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1758	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1759	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1760
1761	iter_reg = fw->rseq_0_reg;
1762	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1763	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1764
1765	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1766	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1767
1768	/* Auxiliary sequence registers. */
1769	iter_reg = fw->aseq_gp_reg;
1770	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1771	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1772	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1773	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1774	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1775	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1776	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1777	qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1778
1779	iter_reg = fw->aseq_0_reg;
1780	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1781	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1782
1783	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1784	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1785
1786	/* Command DMA registers. */
1787	qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1788
1789	/* Queues. */
1790	iter_reg = fw->req0_dma_reg;
1791	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1792	dmp_reg = &reg->iobase_q;
1793	for (cnt = 0; cnt < 7; cnt++)
1794		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1795
1796	iter_reg = fw->resp0_dma_reg;
1797	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1798	dmp_reg = &reg->iobase_q;
1799	for (cnt = 0; cnt < 7; cnt++)
1800		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1801
1802	iter_reg = fw->req1_dma_reg;
1803	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1804	dmp_reg = &reg->iobase_q;
1805	for (cnt = 0; cnt < 7; cnt++)
1806		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1807
1808	/* Transmit DMA registers. */
1809	iter_reg = fw->xmt0_dma_reg;
1810	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1811	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1812
1813	iter_reg = fw->xmt1_dma_reg;
1814	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1815	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1816
1817	iter_reg = fw->xmt2_dma_reg;
1818	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1819	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1820
1821	iter_reg = fw->xmt3_dma_reg;
1822	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1823	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1824
1825	iter_reg = fw->xmt4_dma_reg;
1826	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1827	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1828
1829	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1830
1831	/* Receive DMA registers. */
1832	iter_reg = fw->rcvt0_data_dma_reg;
1833	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1834	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1835
1836	iter_reg = fw->rcvt1_data_dma_reg;
1837	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1838	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1839
1840	/* RISC registers. */
1841	iter_reg = fw->risc_gp_reg;
1842	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1843	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1844	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1845	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1846	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1847	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1848	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1849	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1850
1851	/* Local memory controller registers. */
1852	iter_reg = fw->lmc_reg;
1853	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1854	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1855	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1856	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1857	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1858	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1859	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1860	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1861
1862	/* Fibre Protocol Module registers. */
1863	iter_reg = fw->fpm_hdw_reg;
1864	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1865	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1866	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1867	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1868	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1869	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1870	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1871	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1872	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1873	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1874	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1875	iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1876	iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1877	qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1878
1879	/* Frame Buffer registers. */
1880	iter_reg = fw->fb_hdw_reg;
1881	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1882	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1883	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1884	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1885	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1886	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1887	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1888	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1889	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1890	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1891	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1892	iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1893	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1894
1895	/* Multi queue registers */
1896	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1897	    &last_chain);
1898
1899	rval = qla24xx_soft_reset(ha);
1900	if (rval != QLA_SUCCESS)
1901		goto qla81xx_fw_dump_failed_0;
1902
1903	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1904	    &nxt);
1905	if (rval != QLA_SUCCESS)
1906		goto qla81xx_fw_dump_failed_0;
1907
1908	nxt = qla2xxx_copy_queues(ha, nxt);
1909
1910	qla24xx_copy_eft(ha, nxt);
1911
1912	/* Chain entries -- started with MQ. */
1913	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1914	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1915	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1916	if (last_chain) {
1917		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1918		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1919	}
1920
1921	/* Adjust valid length. */
1922	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1923
1924qla81xx_fw_dump_failed_0:
1925	qla2xxx_dump_post_process(base_vha, rval);
1926
1927qla81xx_fw_dump_failed:
1928	if (!hardware_locked)
1929		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1930}
1931
1932void
1933qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1934{
1935	int		rval;
1936	uint32_t	cnt, reg_data;
1937	uint32_t	risc_address;
1938	struct qla_hw_data *ha = vha->hw;
1939	struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1940	uint32_t __iomem *dmp_reg;
1941	uint32_t	*iter_reg;
1942	uint16_t __iomem *mbx_reg;
1943	unsigned long	flags;
1944	struct qla83xx_fw_dump *fw;
1945	uint32_t	ext_mem_cnt;
1946	void		*nxt, *nxt_chain;
1947	uint32_t	*last_chain = NULL;
1948	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1949
1950	risc_address = ext_mem_cnt = 0;
1951	flags = 0;
1952	ha->fw_dump_cap_flags = 0;
1953
1954	if (!hardware_locked)
1955		spin_lock_irqsave(&ha->hardware_lock, flags);
1956
1957	if (!ha->fw_dump) {
1958		ql_log(ql_log_warn, vha, 0xd00c,
1959		    "No buffer available for dump!!!\n");
1960		goto qla83xx_fw_dump_failed;
1961	}
1962
1963	if (ha->fw_dumped) {
1964		ql_log(ql_log_warn, vha, 0xd00d,
1965		    "Firmware has been previously dumped (%p) -- ignoring "
1966		    "request...\n", ha->fw_dump);
1967		goto qla83xx_fw_dump_failed;
1968	}
1969	fw = &ha->fw_dump->isp.isp83;
1970	qla2xxx_prep_dump(ha, ha->fw_dump);
1971
1972	fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1973
1974	/*
1975	 * Pause RISC. No need to track timeout, as resetting the chip
1976	 * is the right approach incase of pause timeout
1977	 */
1978	qla24xx_pause_risc(reg, ha);
1979
1980	WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
1981	dmp_reg = &reg->iobase_window;
1982	reg_data = RD_REG_DWORD(dmp_reg);
1983	WRT_REG_DWORD(dmp_reg, 0);
1984
1985	dmp_reg = &reg->unused_4_1[0];
1986	reg_data = RD_REG_DWORD(dmp_reg);
1987	WRT_REG_DWORD(dmp_reg, 0);
1988
1989	WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
1990	dmp_reg = &reg->unused_4_1[2];
1991	reg_data = RD_REG_DWORD(dmp_reg);
1992	WRT_REG_DWORD(dmp_reg, 0);
1993
1994	/* select PCR and disable ecc checking and correction */
1995	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1996	RD_REG_DWORD(&reg->iobase_addr);
1997	WRT_REG_DWORD(&reg->iobase_select, 0x60000000);	/* write to F0h = PCR */
1998
1999	/* Host/Risc registers. */
2000	iter_reg = fw->host_risc_reg;
2001	iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
2002	iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
2003	qla24xx_read_window(reg, 0x7040, 16, iter_reg);
2004
2005	/* PCIe registers. */
2006	WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2007	RD_REG_DWORD(&reg->iobase_addr);
2008	WRT_REG_DWORD(&reg->iobase_window, 0x01);
2009	dmp_reg = &reg->iobase_c4;
2010	fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
2011	fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
2012	fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
2013	fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
2014
2015	WRT_REG_DWORD(&reg->iobase_window, 0x00);
2016	RD_REG_DWORD(&reg->iobase_window);
2017
2018	/* Host interface registers. */
2019	dmp_reg = &reg->flash_addr;
2020	for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
2021		fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
2022
2023	/* Disable interrupts. */
2024	WRT_REG_DWORD(&reg->ictrl, 0);
2025	RD_REG_DWORD(&reg->ictrl);
2026
2027	/* Shadow registers. */
2028	WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
2029	RD_REG_DWORD(&reg->iobase_addr);
2030	WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
2031	fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2032
2033	WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
2034	fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2035
2036	WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
2037	fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2038
2039	WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
2040	fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2041
2042	WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
2043	fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2044
2045	WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
2046	fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2047
2048	WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
2049	fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2050
2051	WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
2052	fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2053
2054	WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
2055	fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2056
2057	WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
2058	fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2059
2060	WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
2061	fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2062
2063	/* RISC I/O register. */
2064	WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
2065	fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
2066
2067	/* Mailbox registers. */
2068	mbx_reg = &reg->mailbox0;
2069	for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
2070		fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
2071
2072	/* Transfer sequence registers. */
2073	iter_reg = fw->xseq_gp_reg;
2074	iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
2075	iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
2076	iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
2077	iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
2078	iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
2079	iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
2080	iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
2081	iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
2082	iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
2083	iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
2084	iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
2085	iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
2086	iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
2087	iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
2088	iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
2089	qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
2090
2091	iter_reg = fw->xseq_0_reg;
2092	iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
2093	iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
2094	qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
2095
2096	qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
2097
2098	qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
2099
2100	/* Receive sequence registers. */
2101	iter_reg = fw->rseq_gp_reg;
2102	iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
2103	iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
2104	iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
2105	iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
2106	iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
2107	iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
2108	iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
2109	iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
2110	iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
2111	iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
2112	iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
2113	iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
2114	iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
2115	iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
2116	iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
2117	qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
2118
2119	iter_reg = fw->rseq_0_reg;
2120	iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
2121	qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
2122
2123	qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
2124	qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
2125	qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
2126
2127	/* Auxiliary sequence registers. */
2128	iter_reg = fw->aseq_gp_reg;
2129	iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
2130	iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
2131	iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
2132	iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
2133	iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
2134	iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
2135	iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
2136	iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
2137	iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
2138	iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
2139	iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
2140	iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
2141	iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
2142	iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
2143	iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
2144	qla24xx_read_window(reg, 0xB170, 16, iter_reg);
2145
2146	iter_reg = fw->aseq_0_reg;
2147	iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
2148	qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
2149
2150	qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
2151	qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
2152	qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
2153
2154	/* Command DMA registers. */
2155	iter_reg = fw->cmd_dma_reg;
2156	iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
2157	iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
2158	iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
2159	qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
2160
2161	/* Queues. */
2162	iter_reg = fw->req0_dma_reg;
2163	iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
2164	dmp_reg = &reg->iobase_q;
2165	for (cnt = 0; cnt < 7; cnt++)
2166		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
2167
2168	iter_reg = fw->resp0_dma_reg;
2169	iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
2170	dmp_reg = &reg->iobase_q;
2171	for (cnt = 0; cnt < 7; cnt++)
2172		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
2173
2174	iter_reg = fw->req1_dma_reg;
2175	iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
2176	dmp_reg = &reg->iobase_q;
2177	for (cnt = 0; cnt < 7; cnt++)
2178		*iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
2179
2180	/* Transmit DMA registers. */
2181	iter_reg = fw->xmt0_dma_reg;
2182	iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
2183	qla24xx_read_window(reg, 0x7610, 16, iter_reg);
2184
2185	iter_reg = fw->xmt1_dma_reg;
2186	iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
2187	qla24xx_read_window(reg, 0x7630, 16, iter_reg);
2188
2189	iter_reg = fw->xmt2_dma_reg;
2190	iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
2191	qla24xx_read_window(reg, 0x7650, 16, iter_reg);
2192
2193	iter_reg = fw->xmt3_dma_reg;
2194	iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
2195	qla24xx_read_window(reg, 0x7670, 16, iter_reg);
2196
2197	iter_reg = fw->xmt4_dma_reg;
2198	iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
2199	qla24xx_read_window(reg, 0x7690, 16, iter_reg);
2200
2201	qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
2202
2203	/* Receive DMA registers. */
2204	iter_reg = fw->rcvt0_data_dma_reg;
2205	iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
2206	qla24xx_read_window(reg, 0x7710, 16, iter_reg);
2207
2208	iter_reg = fw->rcvt1_data_dma_reg;
2209	iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
2210	qla24xx_read_window(reg, 0x7730, 16, iter_reg);
2211
2212	/* RISC registers. */
2213	iter_reg = fw->risc_gp_reg;
2214	iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
2215	iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
2216	iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
2217	iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
2218	iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
2219	iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
2220	iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
2221	qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
2222
2223	/* Local memory controller registers. */
2224	iter_reg = fw->lmc_reg;
2225	iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
2226	iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
2227	iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
2228	iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
2229	iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
2230	iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
2231	iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
2232	qla24xx_read_window(reg, 0x3070, 16, iter_reg);
2233
2234	/* Fibre Protocol Module registers. */
2235	iter_reg = fw->fpm_hdw_reg;
2236	iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
2237	iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
2238	iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
2239	iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
2240	iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
2241	iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
2242	iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
2243	iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
2244	iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
2245	iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
2246	iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
2247	iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
2248	iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
2249	iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
2250	iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
2251	qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
2252
2253	/* RQ0 Array registers. */
2254	iter_reg = fw->rq0_array_reg;
2255	iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
2256	iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
2257	iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
2258	iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
2259	iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
2260	iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
2261	iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
2262	iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
2263	iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
2264	iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
2265	iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
2266	iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
2267	iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
2268	iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
2269	iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
2270	qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
2271
2272	/* RQ1 Array registers. */
2273	iter_reg = fw->rq1_array_reg;
2274	iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
2275	iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
2276	iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
2277	iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
2278	iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
2279	iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
2280	iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
2281	iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
2282	iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
2283	iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
2284	iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
2285	iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
2286	iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
2287	iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
2288	iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
2289	qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
2290
2291	/* RP0 Array registers. */
2292	iter_reg = fw->rp0_array_reg;
2293	iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
2294	iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
2295	iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
2296	iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
2297	iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
2298	iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
2299	iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
2300	iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
2301	iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
2302	iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
2303	iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
2304	iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
2305	iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
2306	iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
2307	iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
2308	qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
2309
2310	/* RP1 Array registers. */
2311	iter_reg = fw->rp1_array_reg;
2312	iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
2313	iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
2314	iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
2315	iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
2316	iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
2317	iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
2318	iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
2319	iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
2320	iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
2321	iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
2322	iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
2323	iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
2324	iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
2325	iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
2326	iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
2327	qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
2328
2329	iter_reg = fw->at0_array_reg;
2330	iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
2331	iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
2332	iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
2333	iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
2334	iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
2335	iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
2336	iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
2337	qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
2338
2339	/* I/O Queue Control registers. */
2340	qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
2341
2342	/* Frame Buffer registers. */
2343	iter_reg = fw->fb_hdw_reg;
2344	iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
2345	iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
2346	iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
2347	iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
2348	iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
2349	iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
2350	iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
2351	iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
2352	iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
2353	iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
2354	iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
2355	iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
2356	iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
2357	iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
2358	iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
2359	iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
2360	iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
2361	iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
2362	iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
2363	iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
2364	iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
2365	iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
2366	iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
2367	iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
2368	iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
2369	iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
2370	qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
2371
2372	/* Multi queue registers */
2373	nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
2374	    &last_chain);
2375
2376	rval = qla24xx_soft_reset(ha);
2377	if (rval != QLA_SUCCESS) {
2378		ql_log(ql_log_warn, vha, 0xd00e,
2379		    "SOFT RESET FAILED, forcing continuation of dump!!!\n");
2380		rval = QLA_SUCCESS;
2381
2382		ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
2383
2384		WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2385		RD_REG_DWORD(&reg->hccr);
2386
2387		WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2388		RD_REG_DWORD(&reg->hccr);
2389
2390		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2391		RD_REG_DWORD(&reg->hccr);
2392
2393		for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
2394			udelay(5);
2395
2396		if (!cnt) {
2397			nxt = fw->code_ram;
2398			nxt += sizeof(fw->code_ram);
2399			nxt += (ha->fw_memory_size - 0x100000 + 1);
2400			goto copy_queue;
2401		} else {
2402			set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2403			ql_log(ql_log_warn, vha, 0xd010,
2404			    "bigger hammer success?\n");
2405		}
2406	}
2407
2408	rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
2409	    &nxt);
2410	if (rval != QLA_SUCCESS)
2411		goto qla83xx_fw_dump_failed_0;
2412
2413copy_queue:
2414	nxt = qla2xxx_copy_queues(ha, nxt);
2415
2416	qla24xx_copy_eft(ha, nxt);
2417
2418	/* Chain entries -- started with MQ. */
2419	nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2420	nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2421	nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2422	if (last_chain) {
2423		ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
2424		*last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
2425	}
2426
2427	/* Adjust valid length. */
2428	ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
2429
2430qla83xx_fw_dump_failed_0:
2431	qla2xxx_dump_post_process(base_vha, rval);
2432
2433qla83xx_fw_dump_failed:
2434	if (!hardware_locked)
2435		spin_unlock_irqrestore(&ha->hardware_lock, flags);
2436}
2437
2438/****************************************************************************/
2439/*                         Driver Debug Functions.                          */
2440/****************************************************************************/
2441
2442static inline int
2443ql_mask_match(uint32_t level)
2444{
2445	if (ql2xextended_error_logging == 1)
2446		ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
2447	return (level & ql2xextended_error_logging) == level;
2448}
2449
2450/*
2451 * This function is for formatting and logging debug information.
2452 * It is to be used when vha is available. It formats the message
2453 * and logs it to the messages file.
2454 * parameters:
2455 * level: The level of the debug messages to be printed.
2456 *        If ql2xextended_error_logging value is correctly set,
2457 *        this message will appear in the messages file.
2458 * vha:   Pointer to the scsi_qla_host_t.
2459 * id:    This is a unique identifier for the level. It identifies the
2460 *        part of the code from where the message originated.
2461 * msg:   The message to be displayed.
2462 */
2463void
2464ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2465{
2466	va_list va;
2467	struct va_format vaf;
2468
2469	if (!ql_mask_match(level))
2470		return;
2471
2472	va_start(va, fmt);
2473
2474	vaf.fmt = fmt;
2475	vaf.va = &va;
2476
2477	if (vha != NULL) {
2478		const struct pci_dev *pdev = vha->hw->pdev;
2479		/* <module-name> <pci-name> <msg-id>:<host> Message */
2480		pr_warn("%s [%s]-%04x:%ld: %pV",
2481			QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
2482			vha->host_no, &vaf);
2483	} else {
2484		pr_warn("%s [%s]-%04x: : %pV",
2485			QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
2486	}
2487
2488	va_end(va);
2489
2490}
2491
2492/*
2493 * This function is for formatting and logging debug information.
2494 * It is to be used when vha is not available and pci is available,
2495 * i.e., before host allocation. It formats the message and logs it
2496 * to the messages file.
2497 * parameters:
2498 * level: The level of the debug messages to be printed.
2499 *        If ql2xextended_error_logging value is correctly set,
2500 *        this message will appear in the messages file.
2501 * pdev:  Pointer to the struct pci_dev.
2502 * id:    This is a unique id for the level. It identifies the part
2503 *        of the code from where the message originated.
2504 * msg:   The message to be displayed.
2505 */
2506void
2507ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
2508	   const char *fmt, ...)
2509{
2510	va_list va;
2511	struct va_format vaf;
2512
2513	if (pdev == NULL)
2514		return;
2515	if (!ql_mask_match(level))
2516		return;
2517
2518	va_start(va, fmt);
2519
2520	vaf.fmt = fmt;
2521	vaf.va = &va;
2522
2523	/* <module-name> <dev-name>:<msg-id> Message */
2524	pr_warn("%s [%s]-%04x: : %pV",
2525		QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
2526
2527	va_end(va);
2528}
2529
2530/*
2531 * This function is for formatting and logging log messages.
2532 * It is to be used when vha is available. It formats the message
2533 * and logs it to the messages file. All the messages will be logged
2534 * irrespective of value of ql2xextended_error_logging.
2535 * parameters:
2536 * level: The level of the log messages to be printed in the
2537 *        messages file.
2538 * vha:   Pointer to the scsi_qla_host_t
2539 * id:    This is a unique id for the level. It identifies the
2540 *        part of the code from where the message originated.
2541 * msg:   The message to be displayed.
2542 */
2543void
2544ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2545{
2546	va_list va;
2547	struct va_format vaf;
2548	char pbuf[128];
2549
2550	if (level > ql_errlev)
2551		return;
2552
2553	if (vha != NULL) {
2554		const struct pci_dev *pdev = vha->hw->pdev;
2555		/* <module-name> <msg-id>:<host> Message */
2556		snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
2557			QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
2558	} else {
2559		snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2560			QL_MSGHDR, "0000:00:00.0", id);
2561	}
2562	pbuf[sizeof(pbuf) - 1] = 0;
2563
2564	va_start(va, fmt);
2565
2566	vaf.fmt = fmt;
2567	vaf.va = &va;
2568
2569	switch (level) {
2570	case ql_log_fatal: /* FATAL LOG */
2571		pr_crit("%s%pV", pbuf, &vaf);
2572		break;
2573	case ql_log_warn:
2574		pr_err("%s%pV", pbuf, &vaf);
2575		break;
2576	case ql_log_info:
2577		pr_warn("%s%pV", pbuf, &vaf);
2578		break;
2579	default:
2580		pr_info("%s%pV", pbuf, &vaf);
2581		break;
2582	}
2583
2584	va_end(va);
2585}
2586
2587/*
2588 * This function is for formatting and logging log messages.
2589 * It is to be used when vha is not available and pci is available,
2590 * i.e., before host allocation. It formats the message and logs
2591 * it to the messages file. All the messages are logged irrespective
2592 * of the value of ql2xextended_error_logging.
2593 * parameters:
2594 * level: The level of the log messages to be printed in the
2595 *        messages file.
2596 * pdev:  Pointer to the struct pci_dev.
2597 * id:    This is a unique id for the level. It identifies the
2598 *        part of the code from where the message originated.
2599 * msg:   The message to be displayed.
2600 */
2601void
2602ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
2603	   const char *fmt, ...)
2604{
2605	va_list va;
2606	struct va_format vaf;
2607	char pbuf[128];
2608
2609	if (pdev == NULL)
2610		return;
2611	if (level > ql_errlev)
2612		return;
2613
2614	/* <module-name> <dev-name>:<msg-id> Message */
2615	snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2616		 QL_MSGHDR, dev_name(&(pdev->dev)), id);
2617	pbuf[sizeof(pbuf) - 1] = 0;
2618
2619	va_start(va, fmt);
2620
2621	vaf.fmt = fmt;
2622	vaf.va = &va;
2623
2624	switch (level) {
2625	case ql_log_fatal: /* FATAL LOG */
2626		pr_crit("%s%pV", pbuf, &vaf);
2627		break;
2628	case ql_log_warn:
2629		pr_err("%s%pV", pbuf, &vaf);
2630		break;
2631	case ql_log_info:
2632		pr_warn("%s%pV", pbuf, &vaf);
2633		break;
2634	default:
2635		pr_info("%s%pV", pbuf, &vaf);
2636		break;
2637	}
2638
2639	va_end(va);
2640}
2641
2642void
2643ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
2644{
2645	int i;
2646	struct qla_hw_data *ha = vha->hw;
2647	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2648	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2649	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
2650	uint16_t __iomem *mbx_reg;
2651
2652	if (!ql_mask_match(level))
2653		return;
2654
2655	if (IS_P3P_TYPE(ha))
2656		mbx_reg = &reg82->mailbox_in[0];
2657	else if (IS_FWI2_CAPABLE(ha))
2658		mbx_reg = &reg24->mailbox0;
2659	else
2660		mbx_reg = MAILBOX_REG(ha, reg, 0);
2661
2662	ql_dbg(level, vha, id, "Mailbox registers:\n");
2663	for (i = 0; i < 6; i++)
2664		ql_dbg(level, vha, id,
2665		    "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
2666}
2667
2668
2669void
2670ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
2671	uint8_t *b, uint32_t size)
2672{
2673	uint32_t cnt;
2674	uint8_t c;
2675
2676	if (!ql_mask_match(level))
2677		return;
2678
2679	ql_dbg(level, vha, id, " 0   1   2   3   4   5   6   7   8   "
2680	    "9  Ah  Bh  Ch  Dh  Eh  Fh\n");
2681	ql_dbg(level, vha, id, "----------------------------------"
2682	    "----------------------------\n");
2683
2684	ql_dbg(level, vha, id, " ");
2685	for (cnt = 0; cnt < size;) {
2686		c = *b++;
2687		printk("%02x", (uint32_t) c);
2688		cnt++;
2689		if (!(cnt % 16))
2690			printk("\n");
2691		else
2692			printk("  ");
2693	}
2694	if (cnt % 16)
2695		ql_dbg(level, vha, id, "\n");
2696}
2697