ql4_isr.c revision 7664a1fd76d2eb49b07443f5fc46c75f6a95c98b
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c)  2003-2010 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11#include "ql4_inline.h"
12
13/**
14 * qla4xxx_copy_sense - copy sense data	into cmd sense buffer
15 * @ha: Pointer to host adapter structure.
16 * @sts_entry: Pointer to status entry structure.
17 * @srb: Pointer to srb structure.
18 **/
19static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
20                               struct status_entry *sts_entry,
21                               struct srb *srb)
22{
23	struct scsi_cmnd *cmd = srb->cmd;
24	uint16_t sense_len;
25
26	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
27	sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
28	if (sense_len == 0) {
29		DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: %s:"
30				  " sense len 0\n", ha->host_no,
31				  cmd->device->channel, cmd->device->id,
32				  cmd->device->lun, __func__));
33		ha->status_srb = NULL;
34		return;
35	}
36	/* Save total available sense length,
37	 * not to exceed cmd's sense buffer size */
38	sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
39	srb->req_sense_ptr = cmd->sense_buffer;
40	srb->req_sense_len = sense_len;
41
42	/* Copy sense from sts_entry pkt */
43	sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN);
44	memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len);
45
46	DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: %s: sense key = %x, "
47		"ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no,
48		cmd->device->channel, cmd->device->id,
49		cmd->device->lun, __func__,
50		sts_entry->senseData[2] & 0x0f,
51		sts_entry->senseData[7],
52		sts_entry->senseData[12],
53		sts_entry->senseData[13]));
54
55	DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len));
56	srb->flags |= SRB_GOT_SENSE;
57
58	/* Update srb, in case a sts_cont pkt follows */
59	srb->req_sense_ptr += sense_len;
60	srb->req_sense_len -= sense_len;
61	if (srb->req_sense_len != 0)
62		ha->status_srb = srb;
63	else
64		ha->status_srb = NULL;
65}
66
67/**
68 * qla4xxx_status_cont_entry - Process a Status Continuations entry.
69 * @ha: SCSI driver HA context
70 * @sts_cont: Entry pointer
71 *
72 * Extended sense data.
73 */
74static void
75qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
76			  struct status_cont_entry *sts_cont)
77{
78	struct srb *srb = ha->status_srb;
79	struct scsi_cmnd *cmd;
80	uint16_t sense_len;
81
82	if (srb == NULL)
83		return;
84
85	cmd = srb->cmd;
86	if (cmd == NULL) {
87		DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned "
88			"back to OS srb=%p srb->state:%d\n", ha->host_no,
89			__func__, srb, srb->state));
90		ha->status_srb = NULL;
91		return;
92	}
93
94	/* Copy sense data. */
95	sense_len = min_t(uint16_t, srb->req_sense_len,
96			  IOCB_MAX_EXT_SENSEDATA_LEN);
97	memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len);
98	DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len));
99
100	srb->req_sense_ptr += sense_len;
101	srb->req_sense_len -= sense_len;
102
103	/* Place command on done queue. */
104	if (srb->req_sense_len == 0) {
105		kref_put(&srb->srb_ref, qla4xxx_srb_compl);
106		ha->status_srb = NULL;
107	}
108}
109
110/**
111 * qla4xxx_status_entry - processes status IOCBs
112 * @ha: Pointer to host adapter structure.
113 * @sts_entry: Pointer to status entry structure.
114 **/
115static void qla4xxx_status_entry(struct scsi_qla_host *ha,
116				 struct status_entry *sts_entry)
117{
118	uint8_t scsi_status;
119	struct scsi_cmnd *cmd;
120	struct srb *srb;
121	struct ddb_entry *ddb_entry;
122	uint32_t residual;
123
124	srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
125	if (!srb) {
126		ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
127			   "handle=0x%0x, srb=%p\n", __func__,
128			   sts_entry->handle, srb);
129		if (is_qla8022(ha))
130			set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
131		else
132			set_bit(DPC_RESET_HA, &ha->dpc_flags);
133		return;
134	}
135
136	cmd = srb->cmd;
137	if (cmd == NULL) {
138		DEBUG2(printk("scsi%ld: %s: Command already returned back to "
139			      "OS pkt->handle=%d srb=%p srb->state:%d\n",
140			      ha->host_no, __func__, sts_entry->handle,
141			      srb, srb->state));
142		ql4_printk(KERN_WARNING, ha, "Command is NULL:"
143		    " already returned to OS (srb=%p)\n", srb);
144		return;
145	}
146
147	ddb_entry = srb->ddb;
148	if (ddb_entry == NULL) {
149		cmd->result = DID_NO_CONNECT << 16;
150		goto status_entry_exit;
151	}
152
153	residual = le32_to_cpu(sts_entry->residualByteCnt);
154
155	/* Translate ISP error to a Linux SCSI error. */
156	scsi_status = sts_entry->scsiStatus;
157	switch (sts_entry->completionStatus) {
158	case SCS_COMPLETE:
159
160		if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
161			cmd->result = DID_ERROR << 16;
162			break;
163		}
164
165		if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
166			scsi_set_resid(cmd, residual);
167			if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
168				cmd->underflow)) {
169
170				cmd->result = DID_ERROR << 16;
171
172				DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
173					"Mid-layer Data underrun0, "
174					"xferlen = 0x%x, "
175					"residual = 0x%x\n", ha->host_no,
176					cmd->device->channel,
177					cmd->device->id,
178					cmd->device->lun, __func__,
179					scsi_bufflen(cmd), residual));
180				break;
181			}
182		}
183
184		cmd->result = DID_OK << 16 | scsi_status;
185
186		if (scsi_status != SCSI_CHECK_CONDITION)
187			break;
188
189		/* Copy Sense Data into sense buffer. */
190		qla4xxx_copy_sense(ha, sts_entry, srb);
191		break;
192
193	case SCS_INCOMPLETE:
194		/* Always set the status to DID_ERROR, since
195		 * all conditions result in that status anyway */
196		cmd->result = DID_ERROR << 16;
197		break;
198
199	case SCS_RESET_OCCURRED:
200		DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
201			      ha->host_no, cmd->device->channel,
202			      cmd->device->id, cmd->device->lun, __func__));
203
204		cmd->result = DID_RESET << 16;
205		break;
206
207	case SCS_ABORTED:
208		DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
209			      ha->host_no, cmd->device->channel,
210			      cmd->device->id, cmd->device->lun, __func__));
211
212		cmd->result = DID_RESET << 16;
213		break;
214
215	case SCS_TIMEOUT:
216		DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
217			      ha->host_no, cmd->device->channel,
218			      cmd->device->id, cmd->device->lun));
219
220		cmd->result = DID_TRANSPORT_DISRUPTED << 16;
221
222		/*
223		 * Mark device missing so that we won't continue to send
224		 * I/O to this device.	We should get a ddb state change
225		 * AEN soon.
226		 */
227		if (iscsi_is_session_online(ddb_entry->sess))
228			qla4xxx_mark_device_missing(ddb_entry->sess);
229		break;
230
231	case SCS_DATA_UNDERRUN:
232	case SCS_DATA_OVERRUN:
233		if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
234		     (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
235			DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun\n",
236				      ha->host_no,
237				      cmd->device->channel, cmd->device->id,
238				      cmd->device->lun, __func__));
239
240			cmd->result = DID_ERROR << 16;
241			break;
242		}
243
244		scsi_set_resid(cmd, residual);
245
246		if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) {
247
248			/* Both the firmware and target reported UNDERRUN:
249			 *
250			 * MID-LAYER UNDERFLOW case:
251			 * Some kernels do not properly detect midlayer
252			 * underflow, so we manually check it and return
253			 * ERROR if the minimum required data was not
254			 * received.
255			 *
256			 * ALL OTHER cases:
257			 * Fall thru to check scsi_status
258			 */
259			if (!scsi_status && (scsi_bufflen(cmd) - residual) <
260			    cmd->underflow) {
261				DEBUG2(ql4_printk(KERN_INFO, ha,
262						  "scsi%ld:%d:%d:%d: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n",
263						   ha->host_no,
264						   cmd->device->channel,
265						   cmd->device->id,
266						   cmd->device->lun, __func__,
267						   scsi_bufflen(cmd),
268						   residual));
269
270				cmd->result = DID_ERROR << 16;
271				break;
272			}
273
274		} else if (scsi_status != SAM_STAT_TASK_SET_FULL &&
275			   scsi_status != SAM_STAT_BUSY) {
276
277			/*
278			 * The firmware reports UNDERRUN, but the target does
279			 * not report it:
280			 *
281			 *   scsi_status     |    host_byte       device_byte
282			 *                   |     (19:16)          (7:0)
283			 *   =============   |    =========       ===========
284			 *   TASK_SET_FULL   |    DID_OK          scsi_status
285			 *   BUSY            |    DID_OK          scsi_status
286			 *   ALL OTHERS      |    DID_ERROR       scsi_status
287			 *
288			 *   Note: If scsi_status is task set full or busy,
289			 *   then this else if would fall thru to check the
290			 *   scsi_status and return DID_OK.
291			 */
292
293			DEBUG2(ql4_printk(KERN_INFO, ha,
294					  "scsi%ld:%d:%d:%d: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
295					  ha->host_no,
296					  cmd->device->channel,
297					  cmd->device->id,
298					  cmd->device->lun, __func__,
299					  residual,
300					  scsi_bufflen(cmd)));
301
302			cmd->result = DID_ERROR << 16 | scsi_status;
303			goto check_scsi_status;
304		}
305
306		cmd->result = DID_OK << 16 | scsi_status;
307
308check_scsi_status:
309		if (scsi_status == SAM_STAT_CHECK_CONDITION)
310			qla4xxx_copy_sense(ha, sts_entry, srb);
311
312		break;
313
314	case SCS_DEVICE_LOGGED_OUT:
315	case SCS_DEVICE_UNAVAILABLE:
316		DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: SCS_DEVICE "
317		    "state: 0x%x\n", ha->host_no,
318		    cmd->device->channel, cmd->device->id,
319		    cmd->device->lun, sts_entry->completionStatus));
320		/*
321		 * Mark device missing so that we won't continue to
322		 * send I/O to this device.  We should get a ddb
323		 * state change AEN soon.
324		 */
325		if (iscsi_is_session_online(ddb_entry->sess))
326			qla4xxx_mark_device_missing(ddb_entry->sess);
327
328		cmd->result = DID_TRANSPORT_DISRUPTED << 16;
329		break;
330
331	case SCS_QUEUE_FULL:
332		/*
333		 * SCSI Mid-Layer handles device queue full
334		 */
335		cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
336		DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
337			      "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
338			      " iResp=%02x\n", ha->host_no, cmd->device->id,
339			      cmd->device->lun, __func__,
340			      sts_entry->completionStatus,
341			      sts_entry->scsiStatus, sts_entry->state_flags,
342			      sts_entry->iscsiFlags,
343			      sts_entry->iscsiResponse));
344		break;
345
346	default:
347		cmd->result = DID_ERROR << 16;
348		break;
349	}
350
351status_entry_exit:
352
353	/* complete the request, if not waiting for status_continuation pkt */
354	srb->cc_stat = sts_entry->completionStatus;
355	if (ha->status_srb == NULL)
356		kref_put(&srb->srb_ref, qla4xxx_srb_compl);
357}
358
359/**
360 * qla4xxx_passthru_status_entry - processes passthru status IOCBs (0x3C)
361 * @ha: Pointer to host adapter structure.
362 * @sts_entry: Pointer to status entry structure.
363 **/
364static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
365					  struct passthru_status *sts_entry)
366{
367	struct iscsi_task *task;
368	struct ddb_entry *ddb_entry;
369	struct ql4_task_data *task_data;
370	struct iscsi_cls_conn *cls_conn;
371	struct iscsi_conn *conn;
372	itt_t itt;
373	uint32_t fw_ddb_index;
374
375	itt = sts_entry->handle;
376	fw_ddb_index = le32_to_cpu(sts_entry->target);
377
378	ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
379
380	if (ddb_entry == NULL) {
381		ql4_printk(KERN_ERR, ha, "%s: Invalid target index = 0x%x\n",
382			   __func__, sts_entry->target);
383		return;
384	}
385
386	cls_conn = ddb_entry->conn;
387	conn = cls_conn->dd_data;
388	spin_lock(&conn->session->lock);
389	task = iscsi_itt_to_task(conn, itt);
390	spin_unlock(&conn->session->lock);
391
392	if (task == NULL) {
393		ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__);
394		return;
395	}
396
397	task_data = task->dd_data;
398	memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status));
399	ha->req_q_count += task_data->iocb_req_cnt;
400	ha->iocb_cnt -= task_data->iocb_req_cnt;
401	queue_work(ha->task_wq, &task_data->task_work);
402}
403
404static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha,
405						     uint32_t index)
406{
407	struct mrb *mrb = NULL;
408
409	/* validate handle and remove from active array */
410	if (index >= MAX_MRB)
411		return mrb;
412
413	mrb = ha->active_mrb_array[index];
414	ha->active_mrb_array[index] = NULL;
415	if (!mrb)
416		return mrb;
417
418	/* update counters */
419	ha->req_q_count += mrb->iocb_cnt;
420	ha->iocb_cnt -= mrb->iocb_cnt;
421
422	return mrb;
423}
424
425static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha,
426				      struct mbox_status_iocb *mbox_sts_entry)
427{
428	struct mrb *mrb;
429	uint32_t status;
430	uint32_t data_size;
431
432	mrb = qla4xxx_del_mrb_from_active_array(ha,
433					le32_to_cpu(mbox_sts_entry->handle));
434
435	if (mrb == NULL) {
436		ql4_printk(KERN_WARNING, ha, "%s: mrb[%d] is null\n", __func__,
437			   mbox_sts_entry->handle);
438		return;
439	}
440
441	switch (mrb->mbox_cmd) {
442	case MBOX_CMD_PING:
443		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_cmd = 0x%x, "
444				  "mbox_sts[0] = 0x%x, mbox_sts[6] = 0x%x\n",
445				  __func__, mrb->mbox_cmd,
446				  mbox_sts_entry->out_mbox[0],
447				  mbox_sts_entry->out_mbox[6]));
448
449		if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE)
450			status = ISCSI_PING_SUCCESS;
451		else
452			status = mbox_sts_entry->out_mbox[6];
453
454		data_size = sizeof(mbox_sts_entry->out_mbox);
455
456		qla4xxx_post_ping_evt_work(ha, status, mrb->pid, data_size,
457					(uint8_t *) mbox_sts_entry->out_mbox);
458		break;
459
460	default:
461		DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: invalid mbox_cmd = "
462				  "0x%x\n", __func__, mrb->mbox_cmd));
463	}
464
465	kfree(mrb);
466	return;
467}
468
469/**
470 * qla4xxx_process_response_queue - process response queue completions
471 * @ha: Pointer to host adapter structure.
472 *
473 * This routine process response queue completions in interrupt context.
474 * Hardware_lock locked upon entry
475 **/
476void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
477{
478	uint32_t count = 0;
479	struct srb *srb = NULL;
480	struct status_entry *sts_entry;
481
482	/* Process all responses from response queue */
483	while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) {
484		sts_entry = (struct status_entry *) ha->response_ptr;
485		count++;
486
487		/* Advance pointers for next entry */
488		if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
489			ha->response_out = 0;
490			ha->response_ptr = ha->response_ring;
491		} else {
492			ha->response_out++;
493			ha->response_ptr++;
494		}
495
496		/* process entry */
497		switch (sts_entry->hdr.entryType) {
498		case ET_STATUS:
499			/* Common status */
500			qla4xxx_status_entry(ha, sts_entry);
501			break;
502
503		case ET_PASSTHRU_STATUS:
504			if (sts_entry->hdr.systemDefined == SD_ISCSI_PDU)
505				qla4xxx_passthru_status_entry(ha,
506					(struct passthru_status *)sts_entry);
507			else
508				ql4_printk(KERN_ERR, ha,
509					   "%s: Invalid status received\n",
510					   __func__);
511
512			break;
513
514		case ET_STATUS_CONTINUATION:
515			qla4xxx_status_cont_entry(ha,
516				(struct status_cont_entry *) sts_entry);
517			break;
518
519		case ET_COMMAND:
520			/* ISP device queue is full. Command not
521			 * accepted by ISP.  Queue command for
522			 * later */
523
524			srb = qla4xxx_del_from_active_array(ha,
525						    le32_to_cpu(sts_entry->
526								handle));
527			if (srb == NULL)
528				goto exit_prq_invalid_handle;
529
530			DEBUG2(printk("scsi%ld: %s: FW device queue full, "
531				      "srb %p\n", ha->host_no, __func__, srb));
532
533			/* ETRY normally by sending it back with
534			 * DID_BUS_BUSY */
535			srb->cmd->result = DID_BUS_BUSY << 16;
536			kref_put(&srb->srb_ref, qla4xxx_srb_compl);
537			break;
538
539		case ET_CONTINUE:
540			/* Just throw away the continuation entries */
541			DEBUG2(printk("scsi%ld: %s: Continuation entry - "
542				      "ignoring\n", ha->host_no, __func__));
543			break;
544
545		case ET_MBOX_STATUS:
546			DEBUG2(ql4_printk(KERN_INFO, ha,
547					  "%s: mbox status IOCB\n", __func__));
548			qla4xxx_mbox_status_entry(ha,
549					(struct mbox_status_iocb *)sts_entry);
550			break;
551
552		default:
553			/*
554			 * Invalid entry in response queue, reset RISC
555			 * firmware.
556			 */
557			DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
558				      "response queue \n", ha->host_no,
559				      __func__,
560				      sts_entry->hdr.entryType));
561			goto exit_prq_error;
562		}
563		((struct response *)sts_entry)->signature = RESPONSE_PROCESSED;
564		wmb();
565	}
566
567	/*
568	 * Tell ISP we're done with response(s). This also clears the interrupt.
569	 */
570	ha->isp_ops->complete_iocb(ha);
571
572	return;
573
574exit_prq_invalid_handle:
575	DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
576		      ha->host_no, __func__, srb, sts_entry->hdr.entryType,
577		      sts_entry->completionStatus));
578
579exit_prq_error:
580	ha->isp_ops->complete_iocb(ha);
581	set_bit(DPC_RESET_HA, &ha->dpc_flags);
582}
583
584/**
585 * qla4xxx_isr_decode_mailbox - decodes mailbox status
586 * @ha: Pointer to host adapter structure.
587 * @mailbox_status: Mailbox status.
588 *
589 * This routine decodes the mailbox status during the ISR.
590 * Hardware_lock locked upon entry. runs in interrupt context.
591 **/
592static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
593				       uint32_t mbox_status)
594{
595	int i;
596	uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
597
598	if ((mbox_status == MBOX_STS_BUSY) ||
599	    (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
600	    (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
601		ha->mbox_status[0] = mbox_status;
602
603		if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
604			/*
605			 * Copy all mailbox registers to a temporary
606			 * location and set mailbox command done flag
607			 */
608			for (i = 0; i < ha->mbox_status_count; i++)
609				ha->mbox_status[i] = is_qla8022(ha)
610				    ? readl(&ha->qla4_82xx_reg->mailbox_out[i])
611				    : readl(&ha->reg->mailbox[i]);
612
613			set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
614
615			if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags))
616				complete(&ha->mbx_intr_comp);
617		}
618	} else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
619		for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
620			mbox_sts[i] = is_qla8022(ha)
621			    ? readl(&ha->qla4_82xx_reg->mailbox_out[i])
622			    : readl(&ha->reg->mailbox[i]);
623
624		/* Immediately process the AENs that don't require much work.
625		 * Only queue the database_changed AENs */
626		if (ha->aen_log.count < MAX_AEN_ENTRIES) {
627			for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
628				ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
629				    mbox_sts[i];
630			ha->aen_log.count++;
631		}
632		switch (mbox_status) {
633		case MBOX_ASTS_SYSTEM_ERROR:
634			/* Log Mailbox registers */
635			ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
636			qla4xxx_dump_registers(ha);
637
638			if (ql4xdontresethba) {
639				DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
640				    ha->host_no, __func__));
641			} else {
642				set_bit(AF_GET_CRASH_RECORD, &ha->flags);
643				set_bit(DPC_RESET_HA, &ha->dpc_flags);
644			}
645			break;
646
647		case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
648		case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
649		case MBOX_ASTS_NVRAM_INVALID:
650		case MBOX_ASTS_IP_ADDRESS_CHANGED:
651		case MBOX_ASTS_DHCP_LEASE_EXPIRED:
652			DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
653				      "Reset HA\n", ha->host_no, mbox_status));
654			if (is_qla8022(ha))
655				set_bit(DPC_RESET_HA_FW_CONTEXT,
656					&ha->dpc_flags);
657			else
658				set_bit(DPC_RESET_HA, &ha->dpc_flags);
659			break;
660
661		case MBOX_ASTS_LINK_UP:
662			set_bit(AF_LINK_UP, &ha->flags);
663			if (test_bit(AF_INIT_DONE, &ha->flags))
664				set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
665
666			ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__);
667			qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKUP,
668					      sizeof(mbox_sts),
669					      (uint8_t *) mbox_sts);
670			break;
671
672		case MBOX_ASTS_LINK_DOWN:
673			clear_bit(AF_LINK_UP, &ha->flags);
674			if (test_bit(AF_INIT_DONE, &ha->flags))
675				set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
676
677			ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
678			qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
679					      sizeof(mbox_sts),
680					      (uint8_t *) mbox_sts);
681			break;
682
683		case MBOX_ASTS_HEARTBEAT:
684			ha->seconds_since_last_heartbeat = 0;
685			break;
686
687		case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
688			DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
689				      "ACQUIRED\n", ha->host_no, mbox_status));
690			set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
691			break;
692
693		case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
694		case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
695							   * mode
696							   * only */
697		case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED:  /* Connection mode */
698		case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
699		case MBOX_ASTS_SUBNET_STATE_CHANGE:
700		case MBOX_ASTS_DUPLICATE_IP:
701			/* No action */
702			DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
703				      mbox_status));
704			break;
705
706		case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
707			printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, "
708			    "mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0],
709			    mbox_sts[2], mbox_sts[3]);
710
711			/* mbox_sts[2] = Old ACB state
712			 * mbox_sts[3] = new ACB state */
713			if ((mbox_sts[3] == ACB_STATE_VALID) &&
714			    ((mbox_sts[2] == ACB_STATE_TENTATIVE) ||
715			    (mbox_sts[2] == ACB_STATE_ACQUIRING)))
716				set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
717			else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
718				 (mbox_sts[2] == ACB_STATE_VALID)) {
719				if (is_qla8022(ha))
720					set_bit(DPC_RESET_HA_FW_CONTEXT,
721						&ha->dpc_flags);
722				else
723					set_bit(DPC_RESET_HA, &ha->dpc_flags);
724			} else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
725				complete(&ha->disable_acb_comp);
726			break;
727
728		case MBOX_ASTS_MAC_ADDRESS_CHANGED:
729		case MBOX_ASTS_DNS:
730			/* No action */
731			DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
732				      "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
733				      ha->host_no, mbox_sts[0],
734				      mbox_sts[1], mbox_sts[2]));
735			break;
736
737		case MBOX_ASTS_SELF_TEST_FAILED:
738		case MBOX_ASTS_LOGIN_FAILED:
739			/* No action */
740			DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
741				      "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
742				      ha->host_no, mbox_sts[0], mbox_sts[1],
743				      mbox_sts[2], mbox_sts[3]));
744			break;
745
746		case MBOX_ASTS_DATABASE_CHANGED:
747			/* Queue AEN information and process it in the DPC
748			 * routine */
749			if (ha->aen_q_count > 0) {
750
751				/* decrement available counter */
752				ha->aen_q_count--;
753
754				for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
755					ha->aen_q[ha->aen_in].mbox_sts[i] =
756					    mbox_sts[i];
757
758				/* print debug message */
759				DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
760					      "mb1:0x%x mb2:0x%x mb3:0x%x "
761					      "mb4:0x%x mb5:0x%x\n",
762					      ha->host_no, ha->aen_in,
763					      mbox_sts[0], mbox_sts[1],
764					      mbox_sts[2], mbox_sts[3],
765					      mbox_sts[4], mbox_sts[5]));
766
767				/* advance pointer */
768				ha->aen_in++;
769				if (ha->aen_in == MAX_AEN_ENTRIES)
770					ha->aen_in = 0;
771
772				/* The DPC routine will process the aen */
773				set_bit(DPC_AEN, &ha->dpc_flags);
774			} else {
775				DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
776					      "overflowed!  AEN LOST!!\n",
777					      ha->host_no, __func__,
778					      mbox_sts[0]));
779
780				DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
781					      ha->host_no));
782
783				for (i = 0; i < MAX_AEN_ENTRIES; i++) {
784					DEBUG2(printk("AEN[%d] %04x %04x %04x "
785						      "%04x\n", i, mbox_sts[0],
786						      mbox_sts[1], mbox_sts[2],
787						      mbox_sts[3]));
788				}
789			}
790			break;
791
792		case MBOX_ASTS_TXSCVR_INSERTED:
793			DEBUG2(printk(KERN_WARNING
794			    "scsi%ld: AEN %04x Transceiver"
795			    " inserted\n",  ha->host_no, mbox_sts[0]));
796			break;
797
798		case MBOX_ASTS_TXSCVR_REMOVED:
799			DEBUG2(printk(KERN_WARNING
800			    "scsi%ld: AEN %04x Transceiver"
801			    " removed\n",  ha->host_no, mbox_sts[0]));
802			break;
803
804		default:
805			DEBUG2(printk(KERN_WARNING
806				      "scsi%ld: AEN %04x UNKNOWN\n",
807				      ha->host_no, mbox_sts[0]));
808			break;
809		}
810	} else {
811		DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
812			      ha->host_no, mbox_status));
813
814		ha->mbox_status[0] = mbox_status;
815	}
816}
817
818/**
819 * qla4_82xx_interrupt_service_routine - isr
820 * @ha: pointer to host adapter structure.
821 *
822 * This is the main interrupt service routine.
823 * hardware_lock locked upon entry. runs in interrupt context.
824 **/
825void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
826    uint32_t intr_status)
827{
828	/* Process response queue interrupt. */
829	if (intr_status & HSRX_RISC_IOCB_INT)
830		qla4xxx_process_response_queue(ha);
831
832	/* Process mailbox/asynch event interrupt.*/
833	if (intr_status & HSRX_RISC_MB_INT)
834		qla4xxx_isr_decode_mailbox(ha,
835		    readl(&ha->qla4_82xx_reg->mailbox_out[0]));
836
837	/* clear the interrupt */
838	writel(0, &ha->qla4_82xx_reg->host_int);
839	readl(&ha->qla4_82xx_reg->host_int);
840}
841
842/**
843 * qla4xxx_interrupt_service_routine - isr
844 * @ha: pointer to host adapter structure.
845 *
846 * This is the main interrupt service routine.
847 * hardware_lock locked upon entry. runs in interrupt context.
848 **/
849void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
850				       uint32_t intr_status)
851{
852	/* Process response queue interrupt. */
853	if (intr_status & CSR_SCSI_COMPLETION_INTR)
854		qla4xxx_process_response_queue(ha);
855
856	/* Process mailbox/asynch event	 interrupt.*/
857	if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
858		qla4xxx_isr_decode_mailbox(ha,
859					   readl(&ha->reg->mailbox[0]));
860
861		/* Clear Mailbox Interrupt */
862		writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
863		       &ha->reg->ctrl_status);
864		readl(&ha->reg->ctrl_status);
865	}
866}
867
868/**
869 * qla4_82xx_spurious_interrupt - processes spurious interrupt
870 * @ha: pointer to host adapter structure.
871 * @reqs_count: .
872 *
873 **/
874static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
875    uint8_t reqs_count)
876{
877	if (reqs_count)
878		return;
879
880	DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
881	if (is_qla8022(ha)) {
882		writel(0, &ha->qla4_82xx_reg->host_int);
883		if (test_bit(AF_INTx_ENABLED, &ha->flags))
884			qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
885			    0xfbff);
886	}
887	ha->spurious_int_count++;
888}
889
890/**
891 * qla4xxx_intr_handler - hardware interrupt handler.
892 * @irq: Unused
893 * @dev_id: Pointer to host adapter structure
894 **/
895irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
896{
897	struct scsi_qla_host *ha;
898	uint32_t intr_status;
899	unsigned long flags = 0;
900	uint8_t reqs_count = 0;
901
902	ha = (struct scsi_qla_host *) dev_id;
903	if (!ha) {
904		DEBUG2(printk(KERN_INFO
905			      "qla4xxx: Interrupt with NULL host ptr\n"));
906		return IRQ_NONE;
907	}
908
909	spin_lock_irqsave(&ha->hardware_lock, flags);
910
911	ha->isr_count++;
912	/*
913	 * Repeatedly service interrupts up to a maximum of
914	 * MAX_REQS_SERVICED_PER_INTR
915	 */
916	while (1) {
917		/*
918		 * Read interrupt status
919		 */
920		if (ha->isp_ops->rd_shdw_rsp_q_in(ha) !=
921		    ha->response_out)
922			intr_status = CSR_SCSI_COMPLETION_INTR;
923		else
924			intr_status = readl(&ha->reg->ctrl_status);
925
926		if ((intr_status &
927		    (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) {
928			if (reqs_count == 0)
929				ha->spurious_int_count++;
930			break;
931		}
932
933		if (intr_status & CSR_FATAL_ERROR) {
934			DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
935				      "Status 0x%04x\n", ha->host_no,
936				      readl(isp_port_error_status (ha))));
937
938			/* Issue Soft Reset to clear this error condition.
939			 * This will prevent the RISC from repeatedly
940			 * interrupting the driver; thus, allowing the DPC to
941			 * get scheduled to continue error recovery.
942			 * NOTE: Disabling RISC interrupts does not work in
943			 * this case, as CSR_FATAL_ERROR overrides
944			 * CSR_SCSI_INTR_ENABLE */
945			if ((readl(&ha->reg->ctrl_status) &
946			     CSR_SCSI_RESET_INTR) == 0) {
947				writel(set_rmask(CSR_SOFT_RESET),
948				       &ha->reg->ctrl_status);
949				readl(&ha->reg->ctrl_status);
950			}
951
952			writel(set_rmask(CSR_FATAL_ERROR),
953			       &ha->reg->ctrl_status);
954			readl(&ha->reg->ctrl_status);
955
956			__qla4xxx_disable_intrs(ha);
957
958			set_bit(DPC_RESET_HA, &ha->dpc_flags);
959
960			break;
961		} else if (intr_status & CSR_SCSI_RESET_INTR) {
962			clear_bit(AF_ONLINE, &ha->flags);
963			__qla4xxx_disable_intrs(ha);
964
965			writel(set_rmask(CSR_SCSI_RESET_INTR),
966			       &ha->reg->ctrl_status);
967			readl(&ha->reg->ctrl_status);
968
969			if (!test_bit(AF_HA_REMOVAL, &ha->flags))
970				set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
971
972			break;
973		} else if (intr_status & INTR_PENDING) {
974			ha->isp_ops->interrupt_service_routine(ha, intr_status);
975			ha->total_io_count++;
976			if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
977				break;
978		}
979	}
980
981	spin_unlock_irqrestore(&ha->hardware_lock, flags);
982
983	return IRQ_HANDLED;
984}
985
986/**
987 * qla4_82xx_intr_handler - hardware interrupt handler.
988 * @irq: Unused
989 * @dev_id: Pointer to host adapter structure
990 **/
991irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id)
992{
993	struct scsi_qla_host *ha = dev_id;
994	uint32_t intr_status;
995	uint32_t status;
996	unsigned long flags = 0;
997	uint8_t reqs_count = 0;
998
999	if (unlikely(pci_channel_offline(ha->pdev)))
1000		return IRQ_HANDLED;
1001
1002	ha->isr_count++;
1003	status = qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1004	if (!(status & ha->nx_legacy_intr.int_vec_bit))
1005		return IRQ_NONE;
1006
1007	status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG);
1008	if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
1009		DEBUG2(ql4_printk(KERN_INFO, ha,
1010		    "%s legacy Int not triggered\n", __func__));
1011		return IRQ_NONE;
1012	}
1013
1014	/* clear the interrupt */
1015	qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
1016
1017	/* read twice to ensure write is flushed */
1018	qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1019	qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1020
1021	spin_lock_irqsave(&ha->hardware_lock, flags);
1022	while (1) {
1023		if (!(readl(&ha->qla4_82xx_reg->host_int) &
1024		    ISRX_82XX_RISC_INT)) {
1025			qla4_82xx_spurious_interrupt(ha, reqs_count);
1026			break;
1027		}
1028		intr_status =  readl(&ha->qla4_82xx_reg->host_status);
1029		if ((intr_status &
1030		    (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0)  {
1031			qla4_82xx_spurious_interrupt(ha, reqs_count);
1032			break;
1033		}
1034
1035		ha->isp_ops->interrupt_service_routine(ha, intr_status);
1036
1037		/* Enable Interrupt */
1038		qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
1039
1040		if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1041			break;
1042	}
1043
1044	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1045	return IRQ_HANDLED;
1046}
1047
1048irqreturn_t
1049qla4_8xxx_msi_handler(int irq, void *dev_id)
1050{
1051	struct scsi_qla_host *ha;
1052
1053	ha = (struct scsi_qla_host *) dev_id;
1054	if (!ha) {
1055		DEBUG2(printk(KERN_INFO
1056		    "qla4xxx: MSIX: Interrupt with NULL host ptr\n"));
1057		return IRQ_NONE;
1058	}
1059
1060	ha->isr_count++;
1061	/* clear the interrupt */
1062	qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
1063
1064	/* read twice to ensure write is flushed */
1065	qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1066	qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1067
1068	return qla4_8xxx_default_intr_handler(irq, dev_id);
1069}
1070
1071/**
1072 * qla4_8xxx_default_intr_handler - hardware interrupt handler.
1073 * @irq: Unused
1074 * @dev_id: Pointer to host adapter structure
1075 *
1076 * This interrupt handler is called directly for MSI-X, and
1077 * called indirectly for MSI.
1078 **/
1079irqreturn_t
1080qla4_8xxx_default_intr_handler(int irq, void *dev_id)
1081{
1082	struct scsi_qla_host *ha = dev_id;
1083	unsigned long   flags;
1084	uint32_t intr_status;
1085	uint8_t reqs_count = 0;
1086
1087	spin_lock_irqsave(&ha->hardware_lock, flags);
1088	while (1) {
1089		if (!(readl(&ha->qla4_82xx_reg->host_int) &
1090		    ISRX_82XX_RISC_INT)) {
1091			qla4_82xx_spurious_interrupt(ha, reqs_count);
1092			break;
1093		}
1094
1095		intr_status =  readl(&ha->qla4_82xx_reg->host_status);
1096		if ((intr_status &
1097		    (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
1098			qla4_82xx_spurious_interrupt(ha, reqs_count);
1099			break;
1100		}
1101
1102		ha->isp_ops->interrupt_service_routine(ha, intr_status);
1103
1104		if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1105			break;
1106	}
1107
1108	ha->isr_count++;
1109	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1110	return IRQ_HANDLED;
1111}
1112
1113irqreturn_t
1114qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
1115{
1116	struct scsi_qla_host *ha = dev_id;
1117	unsigned long flags;
1118
1119	spin_lock_irqsave(&ha->hardware_lock, flags);
1120	qla4xxx_process_response_queue(ha);
1121	writel(0, &ha->qla4_82xx_reg->host_int);
1122	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1123
1124	ha->isr_count++;
1125	return IRQ_HANDLED;
1126}
1127
1128/**
1129 * qla4xxx_process_aen - processes AENs generated by firmware
1130 * @ha: pointer to host adapter structure.
1131 * @process_aen: type of AENs to process
1132 *
1133 * Processes specific types of Asynchronous Events generated by firmware.
1134 * The type of AENs to process is specified by process_aen and can be
1135 *	PROCESS_ALL_AENS	 0
1136 *	FLUSH_DDB_CHANGED_AENS	 1
1137 *	RELOGIN_DDB_CHANGED_AENS 2
1138 **/
1139void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
1140{
1141	uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
1142	struct aen *aen;
1143	int i;
1144	unsigned long flags;
1145
1146	spin_lock_irqsave(&ha->hardware_lock, flags);
1147	while (ha->aen_out != ha->aen_in) {
1148		aen = &ha->aen_q[ha->aen_out];
1149		/* copy aen information to local structure */
1150		for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
1151			mbox_sts[i] = aen->mbox_sts[i];
1152
1153		ha->aen_q_count++;
1154		ha->aen_out++;
1155
1156		if (ha->aen_out == MAX_AEN_ENTRIES)
1157			ha->aen_out = 0;
1158
1159		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1160
1161		DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
1162			" mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
1163			(ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
1164			mbox_sts[0], mbox_sts[1], mbox_sts[2],
1165			mbox_sts[3], mbox_sts[4]));
1166
1167		switch (mbox_sts[0]) {
1168		case MBOX_ASTS_DATABASE_CHANGED:
1169			switch (process_aen) {
1170			case FLUSH_DDB_CHANGED_AENS:
1171				DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
1172					      "[%d] state=%04x FLUSHED!\n",
1173					      ha->host_no, ha->aen_out,
1174					      mbox_sts[0], mbox_sts[2],
1175					      mbox_sts[3]));
1176				break;
1177			case PROCESS_ALL_AENS:
1178			default:
1179				/* Specific device. */
1180				if (mbox_sts[1] == 1)
1181					qla4xxx_process_ddb_changed(ha,
1182						mbox_sts[2], mbox_sts[3],
1183						mbox_sts[4]);
1184				break;
1185			}
1186		}
1187		spin_lock_irqsave(&ha->hardware_lock, flags);
1188	}
1189	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1190}
1191
1192int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1193{
1194	int ret;
1195
1196	if (!is_qla8022(ha))
1197		goto try_intx;
1198
1199	if (ql4xenablemsix == 2)
1200		goto try_msi;
1201
1202	if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
1203		goto try_intx;
1204
1205	/* Trying MSI-X */
1206	ret = qla4_8xxx_enable_msix(ha);
1207	if (!ret) {
1208		DEBUG2(ql4_printk(KERN_INFO, ha,
1209		    "MSI-X: Enabled (0x%X).\n", ha->revision_id));
1210		goto irq_attached;
1211	}
1212
1213	ql4_printk(KERN_WARNING, ha,
1214	    "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
1215
1216try_msi:
1217	/* Trying MSI */
1218	ret = pci_enable_msi(ha->pdev);
1219	if (!ret) {
1220		ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
1221			0, DRIVER_NAME, ha);
1222		if (!ret) {
1223			DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1224			set_bit(AF_MSI_ENABLED, &ha->flags);
1225			goto irq_attached;
1226		} else {
1227			ql4_printk(KERN_WARNING, ha,
1228			    "MSI: Failed to reserve interrupt %d "
1229			    "already in use.\n", ha->pdev->irq);
1230			pci_disable_msi(ha->pdev);
1231		}
1232	}
1233	ql4_printk(KERN_WARNING, ha,
1234	    "MSI: Falling back-to INTx mode -- %d.\n", ret);
1235
1236try_intx:
1237	/* Trying INTx */
1238	ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1239	    IRQF_SHARED, DRIVER_NAME, ha);
1240	if (!ret) {
1241		DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
1242		set_bit(AF_INTx_ENABLED, &ha->flags);
1243		goto irq_attached;
1244
1245	} else {
1246		ql4_printk(KERN_WARNING, ha,
1247		    "INTx: Failed to reserve interrupt %d already in"
1248		    " use.\n", ha->pdev->irq);
1249		return ret;
1250	}
1251
1252irq_attached:
1253	set_bit(AF_IRQ_ATTACHED, &ha->flags);
1254	ha->host->irq = ha->pdev->irq;
1255	ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
1256	    __func__, ha->pdev->irq);
1257	return ret;
1258}
1259
1260void qla4xxx_free_irqs(struct scsi_qla_host *ha)
1261{
1262	if (test_bit(AF_MSIX_ENABLED, &ha->flags))
1263		qla4_8xxx_disable_msix(ha);
1264	else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
1265		free_irq(ha->pdev->irq, ha);
1266		pci_disable_msi(ha->pdev);
1267	} else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags))
1268		free_irq(ha->pdev->irq, ha);
1269}
1270