ql4_isr.c revision 33338e31839fe45fa794bcc227d292dd7fab786c
1/*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c)  2003-2012 QLogic Corporation
4 *
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
7
8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11#include "ql4_inline.h"
12
13/**
14 * qla4xxx_copy_sense - copy sense data	into cmd sense buffer
15 * @ha: Pointer to host adapter structure.
16 * @sts_entry: Pointer to status entry structure.
17 * @srb: Pointer to srb structure.
18 **/
19static void qla4xxx_copy_sense(struct scsi_qla_host *ha,
20                               struct status_entry *sts_entry,
21                               struct srb *srb)
22{
23	struct scsi_cmnd *cmd = srb->cmd;
24	uint16_t sense_len;
25
26	memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
27	sense_len = le16_to_cpu(sts_entry->senseDataByteCnt);
28	if (sense_len == 0) {
29		DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld:%d:%d:%d: %s:"
30				  " sense len 0\n", ha->host_no,
31				  cmd->device->channel, cmd->device->id,
32				  cmd->device->lun, __func__));
33		ha->status_srb = NULL;
34		return;
35	}
36	/* Save total available sense length,
37	 * not to exceed cmd's sense buffer size */
38	sense_len = min_t(uint16_t, sense_len, SCSI_SENSE_BUFFERSIZE);
39	srb->req_sense_ptr = cmd->sense_buffer;
40	srb->req_sense_len = sense_len;
41
42	/* Copy sense from sts_entry pkt */
43	sense_len = min_t(uint16_t, sense_len, IOCB_MAX_SENSEDATA_LEN);
44	memcpy(cmd->sense_buffer, sts_entry->senseData, sense_len);
45
46	DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: %s: sense key = %x, "
47		"ASL= %02x, ASC/ASCQ = %02x/%02x\n", ha->host_no,
48		cmd->device->channel, cmd->device->id,
49		cmd->device->lun, __func__,
50		sts_entry->senseData[2] & 0x0f,
51		sts_entry->senseData[7],
52		sts_entry->senseData[12],
53		sts_entry->senseData[13]));
54
55	DEBUG5(qla4xxx_dump_buffer(cmd->sense_buffer, sense_len));
56	srb->flags |= SRB_GOT_SENSE;
57
58	/* Update srb, in case a sts_cont pkt follows */
59	srb->req_sense_ptr += sense_len;
60	srb->req_sense_len -= sense_len;
61	if (srb->req_sense_len != 0)
62		ha->status_srb = srb;
63	else
64		ha->status_srb = NULL;
65}
66
67/**
68 * qla4xxx_status_cont_entry - Process a Status Continuations entry.
69 * @ha: SCSI driver HA context
70 * @sts_cont: Entry pointer
71 *
72 * Extended sense data.
73 */
74static void
75qla4xxx_status_cont_entry(struct scsi_qla_host *ha,
76			  struct status_cont_entry *sts_cont)
77{
78	struct srb *srb = ha->status_srb;
79	struct scsi_cmnd *cmd;
80	uint16_t sense_len;
81
82	if (srb == NULL)
83		return;
84
85	cmd = srb->cmd;
86	if (cmd == NULL) {
87		DEBUG2(printk(KERN_INFO "scsi%ld: %s: Cmd already returned "
88			"back to OS srb=%p srb->state:%d\n", ha->host_no,
89			__func__, srb, srb->state));
90		ha->status_srb = NULL;
91		return;
92	}
93
94	/* Copy sense data. */
95	sense_len = min_t(uint16_t, srb->req_sense_len,
96			  IOCB_MAX_EXT_SENSEDATA_LEN);
97	memcpy(srb->req_sense_ptr, sts_cont->ext_sense_data, sense_len);
98	DEBUG5(qla4xxx_dump_buffer(srb->req_sense_ptr, sense_len));
99
100	srb->req_sense_ptr += sense_len;
101	srb->req_sense_len -= sense_len;
102
103	/* Place command on done queue. */
104	if (srb->req_sense_len == 0) {
105		kref_put(&srb->srb_ref, qla4xxx_srb_compl);
106		ha->status_srb = NULL;
107	}
108}
109
110/**
111 * qla4xxx_status_entry - processes status IOCBs
112 * @ha: Pointer to host adapter structure.
113 * @sts_entry: Pointer to status entry structure.
114 **/
115static void qla4xxx_status_entry(struct scsi_qla_host *ha,
116				 struct status_entry *sts_entry)
117{
118	uint8_t scsi_status;
119	struct scsi_cmnd *cmd;
120	struct srb *srb;
121	struct ddb_entry *ddb_entry;
122	uint32_t residual;
123
124	srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
125	if (!srb) {
126		ql4_printk(KERN_WARNING, ha, "%s invalid status entry: "
127			   "handle=0x%0x, srb=%p\n", __func__,
128			   sts_entry->handle, srb);
129		if (is_qla80XX(ha))
130			set_bit(DPC_RESET_HA_FW_CONTEXT, &ha->dpc_flags);
131		else
132			set_bit(DPC_RESET_HA, &ha->dpc_flags);
133		return;
134	}
135
136	cmd = srb->cmd;
137	if (cmd == NULL) {
138		DEBUG2(printk("scsi%ld: %s: Command already returned back to "
139			      "OS pkt->handle=%d srb=%p srb->state:%d\n",
140			      ha->host_no, __func__, sts_entry->handle,
141			      srb, srb->state));
142		ql4_printk(KERN_WARNING, ha, "Command is NULL:"
143		    " already returned to OS (srb=%p)\n", srb);
144		return;
145	}
146
147	ddb_entry = srb->ddb;
148	if (ddb_entry == NULL) {
149		cmd->result = DID_NO_CONNECT << 16;
150		goto status_entry_exit;
151	}
152
153	residual = le32_to_cpu(sts_entry->residualByteCnt);
154
155	/* Translate ISP error to a Linux SCSI error. */
156	scsi_status = sts_entry->scsiStatus;
157	switch (sts_entry->completionStatus) {
158	case SCS_COMPLETE:
159
160		if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
161			cmd->result = DID_ERROR << 16;
162			break;
163		}
164
165		if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
166			scsi_set_resid(cmd, residual);
167			if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
168				cmd->underflow)) {
169
170				cmd->result = DID_ERROR << 16;
171
172				DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
173					"Mid-layer Data underrun0, "
174					"xferlen = 0x%x, "
175					"residual = 0x%x\n", ha->host_no,
176					cmd->device->channel,
177					cmd->device->id,
178					cmd->device->lun, __func__,
179					scsi_bufflen(cmd), residual));
180				break;
181			}
182		}
183
184		cmd->result = DID_OK << 16 | scsi_status;
185
186		if (scsi_status != SCSI_CHECK_CONDITION)
187			break;
188
189		/* Copy Sense Data into sense buffer. */
190		qla4xxx_copy_sense(ha, sts_entry, srb);
191		break;
192
193	case SCS_INCOMPLETE:
194		/* Always set the status to DID_ERROR, since
195		 * all conditions result in that status anyway */
196		cmd->result = DID_ERROR << 16;
197		break;
198
199	case SCS_RESET_OCCURRED:
200		DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
201			      ha->host_no, cmd->device->channel,
202			      cmd->device->id, cmd->device->lun, __func__));
203
204		cmd->result = DID_RESET << 16;
205		break;
206
207	case SCS_ABORTED:
208		DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
209			      ha->host_no, cmd->device->channel,
210			      cmd->device->id, cmd->device->lun, __func__));
211
212		cmd->result = DID_RESET << 16;
213		break;
214
215	case SCS_TIMEOUT:
216		DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
217			      ha->host_no, cmd->device->channel,
218			      cmd->device->id, cmd->device->lun));
219
220		cmd->result = DID_TRANSPORT_DISRUPTED << 16;
221
222		/*
223		 * Mark device missing so that we won't continue to send
224		 * I/O to this device.	We should get a ddb state change
225		 * AEN soon.
226		 */
227		if (iscsi_is_session_online(ddb_entry->sess))
228			qla4xxx_mark_device_missing(ddb_entry->sess);
229		break;
230
231	case SCS_DATA_UNDERRUN:
232	case SCS_DATA_OVERRUN:
233		if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
234		     (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
235			DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun\n",
236				      ha->host_no,
237				      cmd->device->channel, cmd->device->id,
238				      cmd->device->lun, __func__));
239
240			cmd->result = DID_ERROR << 16;
241			break;
242		}
243
244		scsi_set_resid(cmd, residual);
245
246		if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_UNDER) {
247
248			/* Both the firmware and target reported UNDERRUN:
249			 *
250			 * MID-LAYER UNDERFLOW case:
251			 * Some kernels do not properly detect midlayer
252			 * underflow, so we manually check it and return
253			 * ERROR if the minimum required data was not
254			 * received.
255			 *
256			 * ALL OTHER cases:
257			 * Fall thru to check scsi_status
258			 */
259			if (!scsi_status && (scsi_bufflen(cmd) - residual) <
260			    cmd->underflow) {
261				DEBUG2(ql4_printk(KERN_INFO, ha,
262						  "scsi%ld:%d:%d:%d: %s: Mid-layer Data underrun, xferlen = 0x%x,residual = 0x%x\n",
263						   ha->host_no,
264						   cmd->device->channel,
265						   cmd->device->id,
266						   cmd->device->lun, __func__,
267						   scsi_bufflen(cmd),
268						   residual));
269
270				cmd->result = DID_ERROR << 16;
271				break;
272			}
273
274		} else if (scsi_status != SAM_STAT_TASK_SET_FULL &&
275			   scsi_status != SAM_STAT_BUSY) {
276
277			/*
278			 * The firmware reports UNDERRUN, but the target does
279			 * not report it:
280			 *
281			 *   scsi_status     |    host_byte       device_byte
282			 *                   |     (19:16)          (7:0)
283			 *   =============   |    =========       ===========
284			 *   TASK_SET_FULL   |    DID_OK          scsi_status
285			 *   BUSY            |    DID_OK          scsi_status
286			 *   ALL OTHERS      |    DID_ERROR       scsi_status
287			 *
288			 *   Note: If scsi_status is task set full or busy,
289			 *   then this else if would fall thru to check the
290			 *   scsi_status and return DID_OK.
291			 */
292
293			DEBUG2(ql4_printk(KERN_INFO, ha,
294					  "scsi%ld:%d:%d:%d: %s: Dropped frame(s) detected (0x%x of 0x%x bytes).\n",
295					  ha->host_no,
296					  cmd->device->channel,
297					  cmd->device->id,
298					  cmd->device->lun, __func__,
299					  residual,
300					  scsi_bufflen(cmd)));
301
302			cmd->result = DID_ERROR << 16 | scsi_status;
303			goto check_scsi_status;
304		}
305
306		cmd->result = DID_OK << 16 | scsi_status;
307
308check_scsi_status:
309		if (scsi_status == SAM_STAT_CHECK_CONDITION)
310			qla4xxx_copy_sense(ha, sts_entry, srb);
311
312		break;
313
314	case SCS_DEVICE_LOGGED_OUT:
315	case SCS_DEVICE_UNAVAILABLE:
316		DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: SCS_DEVICE "
317		    "state: 0x%x\n", ha->host_no,
318		    cmd->device->channel, cmd->device->id,
319		    cmd->device->lun, sts_entry->completionStatus));
320		/*
321		 * Mark device missing so that we won't continue to
322		 * send I/O to this device.  We should get a ddb
323		 * state change AEN soon.
324		 */
325		if (iscsi_is_session_online(ddb_entry->sess))
326			qla4xxx_mark_device_missing(ddb_entry->sess);
327
328		cmd->result = DID_TRANSPORT_DISRUPTED << 16;
329		break;
330
331	case SCS_QUEUE_FULL:
332		/*
333		 * SCSI Mid-Layer handles device queue full
334		 */
335		cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
336		DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
337			      "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
338			      " iResp=%02x\n", ha->host_no, cmd->device->id,
339			      cmd->device->lun, __func__,
340			      sts_entry->completionStatus,
341			      sts_entry->scsiStatus, sts_entry->state_flags,
342			      sts_entry->iscsiFlags,
343			      sts_entry->iscsiResponse));
344		break;
345
346	default:
347		cmd->result = DID_ERROR << 16;
348		break;
349	}
350
351status_entry_exit:
352
353	/* complete the request, if not waiting for status_continuation pkt */
354	srb->cc_stat = sts_entry->completionStatus;
355	if (ha->status_srb == NULL)
356		kref_put(&srb->srb_ref, qla4xxx_srb_compl);
357}
358
359/**
360 * qla4xxx_passthru_status_entry - processes passthru status IOCBs (0x3C)
361 * @ha: Pointer to host adapter structure.
362 * @sts_entry: Pointer to status entry structure.
363 **/
364static void qla4xxx_passthru_status_entry(struct scsi_qla_host *ha,
365					  struct passthru_status *sts_entry)
366{
367	struct iscsi_task *task;
368	struct ddb_entry *ddb_entry;
369	struct ql4_task_data *task_data;
370	struct iscsi_cls_conn *cls_conn;
371	struct iscsi_conn *conn;
372	itt_t itt;
373	uint32_t fw_ddb_index;
374
375	itt = sts_entry->handle;
376	fw_ddb_index = le32_to_cpu(sts_entry->target);
377
378	ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index);
379
380	if (ddb_entry == NULL) {
381		ql4_printk(KERN_ERR, ha, "%s: Invalid target index = 0x%x\n",
382			   __func__, sts_entry->target);
383		return;
384	}
385
386	cls_conn = ddb_entry->conn;
387	conn = cls_conn->dd_data;
388	spin_lock(&conn->session->lock);
389	task = iscsi_itt_to_task(conn, itt);
390	spin_unlock(&conn->session->lock);
391
392	if (task == NULL) {
393		ql4_printk(KERN_ERR, ha, "%s: Task is NULL\n", __func__);
394		return;
395	}
396
397	task_data = task->dd_data;
398	memcpy(&task_data->sts, sts_entry, sizeof(struct passthru_status));
399	ha->req_q_count += task_data->iocb_req_cnt;
400	ha->iocb_cnt -= task_data->iocb_req_cnt;
401	queue_work(ha->task_wq, &task_data->task_work);
402}
403
404static struct mrb *qla4xxx_del_mrb_from_active_array(struct scsi_qla_host *ha,
405						     uint32_t index)
406{
407	struct mrb *mrb = NULL;
408
409	/* validate handle and remove from active array */
410	if (index >= MAX_MRB)
411		return mrb;
412
413	mrb = ha->active_mrb_array[index];
414	ha->active_mrb_array[index] = NULL;
415	if (!mrb)
416		return mrb;
417
418	/* update counters */
419	ha->req_q_count += mrb->iocb_cnt;
420	ha->iocb_cnt -= mrb->iocb_cnt;
421
422	return mrb;
423}
424
425static void qla4xxx_mbox_status_entry(struct scsi_qla_host *ha,
426				      struct mbox_status_iocb *mbox_sts_entry)
427{
428	struct mrb *mrb;
429	uint32_t status;
430	uint32_t data_size;
431
432	mrb = qla4xxx_del_mrb_from_active_array(ha,
433					le32_to_cpu(mbox_sts_entry->handle));
434
435	if (mrb == NULL) {
436		ql4_printk(KERN_WARNING, ha, "%s: mrb[%d] is null\n", __func__,
437			   mbox_sts_entry->handle);
438		return;
439	}
440
441	switch (mrb->mbox_cmd) {
442	case MBOX_CMD_PING:
443		DEBUG2(ql4_printk(KERN_INFO, ha, "%s: mbox_cmd = 0x%x, "
444				  "mbox_sts[0] = 0x%x, mbox_sts[6] = 0x%x\n",
445				  __func__, mrb->mbox_cmd,
446				  mbox_sts_entry->out_mbox[0],
447				  mbox_sts_entry->out_mbox[6]));
448
449		if (mbox_sts_entry->out_mbox[0] == MBOX_STS_COMMAND_COMPLETE)
450			status = ISCSI_PING_SUCCESS;
451		else
452			status = mbox_sts_entry->out_mbox[6];
453
454		data_size = sizeof(mbox_sts_entry->out_mbox);
455
456		qla4xxx_post_ping_evt_work(ha, status, mrb->pid, data_size,
457					(uint8_t *) mbox_sts_entry->out_mbox);
458		break;
459
460	default:
461		DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: invalid mbox_cmd = "
462				  "0x%x\n", __func__, mrb->mbox_cmd));
463	}
464
465	kfree(mrb);
466	return;
467}
468
469/**
470 * qla4xxx_process_response_queue - process response queue completions
471 * @ha: Pointer to host adapter structure.
472 *
473 * This routine process response queue completions in interrupt context.
474 * Hardware_lock locked upon entry
475 **/
476void qla4xxx_process_response_queue(struct scsi_qla_host *ha)
477{
478	uint32_t count = 0;
479	struct srb *srb = NULL;
480	struct status_entry *sts_entry;
481
482	/* Process all responses from response queue */
483	while ((ha->response_ptr->signature != RESPONSE_PROCESSED)) {
484		sts_entry = (struct status_entry *) ha->response_ptr;
485		count++;
486
487		/* Advance pointers for next entry */
488		if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
489			ha->response_out = 0;
490			ha->response_ptr = ha->response_ring;
491		} else {
492			ha->response_out++;
493			ha->response_ptr++;
494		}
495
496		/* process entry */
497		switch (sts_entry->hdr.entryType) {
498		case ET_STATUS:
499			/* Common status */
500			qla4xxx_status_entry(ha, sts_entry);
501			break;
502
503		case ET_PASSTHRU_STATUS:
504			if (sts_entry->hdr.systemDefined == SD_ISCSI_PDU)
505				qla4xxx_passthru_status_entry(ha,
506					(struct passthru_status *)sts_entry);
507			else
508				ql4_printk(KERN_ERR, ha,
509					   "%s: Invalid status received\n",
510					   __func__);
511
512			break;
513
514		case ET_STATUS_CONTINUATION:
515			qla4xxx_status_cont_entry(ha,
516				(struct status_cont_entry *) sts_entry);
517			break;
518
519		case ET_COMMAND:
520			/* ISP device queue is full. Command not
521			 * accepted by ISP.  Queue command for
522			 * later */
523
524			srb = qla4xxx_del_from_active_array(ha,
525						    le32_to_cpu(sts_entry->
526								handle));
527			if (srb == NULL)
528				goto exit_prq_invalid_handle;
529
530			DEBUG2(printk("scsi%ld: %s: FW device queue full, "
531				      "srb %p\n", ha->host_no, __func__, srb));
532
533			/* ETRY normally by sending it back with
534			 * DID_BUS_BUSY */
535			srb->cmd->result = DID_BUS_BUSY << 16;
536			kref_put(&srb->srb_ref, qla4xxx_srb_compl);
537			break;
538
539		case ET_CONTINUE:
540			/* Just throw away the continuation entries */
541			DEBUG2(printk("scsi%ld: %s: Continuation entry - "
542				      "ignoring\n", ha->host_no, __func__));
543			break;
544
545		case ET_MBOX_STATUS:
546			DEBUG2(ql4_printk(KERN_INFO, ha,
547					  "%s: mbox status IOCB\n", __func__));
548			qla4xxx_mbox_status_entry(ha,
549					(struct mbox_status_iocb *)sts_entry);
550			break;
551
552		default:
553			/*
554			 * Invalid entry in response queue, reset RISC
555			 * firmware.
556			 */
557			DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
558				      "response queue \n", ha->host_no,
559				      __func__,
560				      sts_entry->hdr.entryType));
561			goto exit_prq_error;
562		}
563		((struct response *)sts_entry)->signature = RESPONSE_PROCESSED;
564		wmb();
565	}
566
567	/*
568	 * Tell ISP we're done with response(s). This also clears the interrupt.
569	 */
570	ha->isp_ops->complete_iocb(ha);
571
572	return;
573
574exit_prq_invalid_handle:
575	DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
576		      ha->host_no, __func__, srb, sts_entry->hdr.entryType,
577		      sts_entry->completionStatus));
578
579exit_prq_error:
580	ha->isp_ops->complete_iocb(ha);
581	set_bit(DPC_RESET_HA, &ha->dpc_flags);
582}
583
584/**
585 * qla4_83xx_loopback_in_progress: Is loopback in progress?
586 * @ha: Pointer to host adapter structure.
587 * @ret: 1 = loopback in progress, 0 = loopback not in progress
588 **/
589static int qla4_83xx_loopback_in_progress(struct scsi_qla_host *ha)
590{
591	int rval = 1;
592
593	if (is_qla8032(ha)) {
594		if ((ha->idc_info.info2 & ENABLE_INTERNAL_LOOPBACK) ||
595		    (ha->idc_info.info2 & ENABLE_EXTERNAL_LOOPBACK)) {
596			DEBUG2(ql4_printk(KERN_INFO, ha,
597					  "%s: Loopback diagnostics in progress\n",
598					  __func__));
599			rval = 1;
600		} else {
601			DEBUG2(ql4_printk(KERN_INFO, ha,
602					  "%s: Loopback diagnostics not in progress\n",
603					  __func__));
604			rval = 0;
605		}
606	}
607
608	return rval;
609}
610
611/**
612 * qla4xxx_isr_decode_mailbox - decodes mailbox status
613 * @ha: Pointer to host adapter structure.
614 * @mailbox_status: Mailbox status.
615 *
616 * This routine decodes the mailbox status during the ISR.
617 * Hardware_lock locked upon entry. runs in interrupt context.
618 **/
619static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
620				       uint32_t mbox_status)
621{
622	int i;
623	uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
624	__le32 __iomem *mailbox_out;
625
626	if (is_qla8032(ha))
627		mailbox_out = &ha->qla4_83xx_reg->mailbox_out[0];
628	else if (is_qla8022(ha))
629		mailbox_out = &ha->qla4_82xx_reg->mailbox_out[0];
630	else
631		mailbox_out = &ha->reg->mailbox[0];
632
633	if ((mbox_status == MBOX_STS_BUSY) ||
634	    (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
635	    (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
636		ha->mbox_status[0] = mbox_status;
637
638		if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
639			/*
640			 * Copy all mailbox registers to a temporary
641			 * location and set mailbox command done flag
642			 */
643			for (i = 0; i < ha->mbox_status_count; i++)
644				ha->mbox_status[i] = readl(&mailbox_out[i]);
645
646			set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
647
648			if (test_bit(AF_MBOX_COMMAND_NOPOLL, &ha->flags))
649				complete(&ha->mbx_intr_comp);
650		}
651	} else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
652		for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
653			mbox_sts[i] = readl(&mailbox_out[i]);
654
655		/* Immediately process the AENs that don't require much work.
656		 * Only queue the database_changed AENs */
657		if (ha->aen_log.count < MAX_AEN_ENTRIES) {
658			for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
659				ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
660				    mbox_sts[i];
661			ha->aen_log.count++;
662		}
663		switch (mbox_status) {
664		case MBOX_ASTS_SYSTEM_ERROR:
665			/* Log Mailbox registers */
666			ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
667			qla4xxx_dump_registers(ha);
668
669			if ((is_qla8022(ha) && ql4xdontresethba) ||
670			    (is_qla8032(ha) && qla4_83xx_idc_dontreset(ha))) {
671				DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
672				    ha->host_no, __func__));
673			} else {
674				set_bit(AF_GET_CRASH_RECORD, &ha->flags);
675				set_bit(DPC_RESET_HA, &ha->dpc_flags);
676			}
677			break;
678
679		case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
680		case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
681		case MBOX_ASTS_NVRAM_INVALID:
682		case MBOX_ASTS_IP_ADDRESS_CHANGED:
683		case MBOX_ASTS_DHCP_LEASE_EXPIRED:
684			DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
685				      "Reset HA\n", ha->host_no, mbox_status));
686			if (is_qla80XX(ha))
687				set_bit(DPC_RESET_HA_FW_CONTEXT,
688					&ha->dpc_flags);
689			else
690				set_bit(DPC_RESET_HA, &ha->dpc_flags);
691			break;
692
693		case MBOX_ASTS_LINK_UP:
694			set_bit(AF_LINK_UP, &ha->flags);
695			if (test_bit(AF_INIT_DONE, &ha->flags))
696				set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
697
698			ql4_printk(KERN_INFO, ha, "%s: LINK UP\n", __func__);
699			qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKUP,
700					      sizeof(mbox_sts),
701					      (uint8_t *) mbox_sts);
702			break;
703
704		case MBOX_ASTS_LINK_DOWN:
705			clear_bit(AF_LINK_UP, &ha->flags);
706			if (test_bit(AF_INIT_DONE, &ha->flags)) {
707				set_bit(DPC_LINK_CHANGED, &ha->dpc_flags);
708				qla4xxx_wake_dpc(ha);
709			}
710
711			ql4_printk(KERN_INFO, ha, "%s: LINK DOWN\n", __func__);
712			qla4xxx_post_aen_work(ha, ISCSI_EVENT_LINKDOWN,
713					      sizeof(mbox_sts),
714					      (uint8_t *) mbox_sts);
715			break;
716
717		case MBOX_ASTS_HEARTBEAT:
718			ha->seconds_since_last_heartbeat = 0;
719			break;
720
721		case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
722			DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
723				      "ACQUIRED\n", ha->host_no, mbox_status));
724			set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
725			break;
726
727		case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
728		case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
729							   * mode
730							   * only */
731		case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED:  /* Connection mode */
732		case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
733		case MBOX_ASTS_SUBNET_STATE_CHANGE:
734		case MBOX_ASTS_DUPLICATE_IP:
735			/* No action */
736			DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
737				      mbox_status));
738			break;
739
740		case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
741			printk("scsi%ld: AEN %04x, mbox_sts[2]=%04x, "
742			    "mbox_sts[3]=%04x\n", ha->host_no, mbox_sts[0],
743			    mbox_sts[2], mbox_sts[3]);
744
745			/* mbox_sts[2] = Old ACB state
746			 * mbox_sts[3] = new ACB state */
747			if ((mbox_sts[3] == ACB_STATE_VALID) &&
748			    ((mbox_sts[2] == ACB_STATE_TENTATIVE) ||
749			    (mbox_sts[2] == ACB_STATE_ACQUIRING)))
750				set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
751			else if ((mbox_sts[3] == ACB_STATE_ACQUIRING) &&
752				 (mbox_sts[2] == ACB_STATE_VALID)) {
753				if (is_qla80XX(ha))
754					set_bit(DPC_RESET_HA_FW_CONTEXT,
755						&ha->dpc_flags);
756				else
757					set_bit(DPC_RESET_HA, &ha->dpc_flags);
758			} else if ((mbox_sts[3] == ACB_STATE_UNCONFIGURED))
759				complete(&ha->disable_acb_comp);
760			break;
761
762		case MBOX_ASTS_MAC_ADDRESS_CHANGED:
763		case MBOX_ASTS_DNS:
764			/* No action */
765			DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
766				      "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
767				      ha->host_no, mbox_sts[0],
768				      mbox_sts[1], mbox_sts[2]));
769			break;
770
771		case MBOX_ASTS_SELF_TEST_FAILED:
772		case MBOX_ASTS_LOGIN_FAILED:
773			/* No action */
774			DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
775				      "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
776				      ha->host_no, mbox_sts[0], mbox_sts[1],
777				      mbox_sts[2], mbox_sts[3]));
778			break;
779
780		case MBOX_ASTS_DATABASE_CHANGED:
781			/* Queue AEN information and process it in the DPC
782			 * routine */
783			if (ha->aen_q_count > 0) {
784
785				/* decrement available counter */
786				ha->aen_q_count--;
787
788				for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
789					ha->aen_q[ha->aen_in].mbox_sts[i] =
790					    mbox_sts[i];
791
792				/* print debug message */
793				DEBUG2(printk("scsi%ld: AEN[%d] %04x queued "
794					      "mb1:0x%x mb2:0x%x mb3:0x%x "
795					      "mb4:0x%x mb5:0x%x\n",
796					      ha->host_no, ha->aen_in,
797					      mbox_sts[0], mbox_sts[1],
798					      mbox_sts[2], mbox_sts[3],
799					      mbox_sts[4], mbox_sts[5]));
800
801				/* advance pointer */
802				ha->aen_in++;
803				if (ha->aen_in == MAX_AEN_ENTRIES)
804					ha->aen_in = 0;
805
806				/* The DPC routine will process the aen */
807				set_bit(DPC_AEN, &ha->dpc_flags);
808			} else {
809				DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
810					      "overflowed!  AEN LOST!!\n",
811					      ha->host_no, __func__,
812					      mbox_sts[0]));
813
814				DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
815					      ha->host_no));
816
817				for (i = 0; i < MAX_AEN_ENTRIES; i++) {
818					DEBUG2(printk("AEN[%d] %04x %04x %04x "
819						      "%04x\n", i, mbox_sts[0],
820						      mbox_sts[1], mbox_sts[2],
821						      mbox_sts[3]));
822				}
823			}
824			break;
825
826		case MBOX_ASTS_TXSCVR_INSERTED:
827			DEBUG2(printk(KERN_WARNING
828			    "scsi%ld: AEN %04x Transceiver"
829			    " inserted\n",  ha->host_no, mbox_sts[0]));
830			break;
831
832		case MBOX_ASTS_TXSCVR_REMOVED:
833			DEBUG2(printk(KERN_WARNING
834			    "scsi%ld: AEN %04x Transceiver"
835			    " removed\n",  ha->host_no, mbox_sts[0]));
836			break;
837
838		case MBOX_ASTS_IDC_REQUEST_NOTIFICATION:
839		{
840			uint32_t opcode;
841			if (is_qla8032(ha)) {
842				DEBUG2(ql4_printk(KERN_INFO, ha,
843						  "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
844						  ha->host_no, mbox_sts[0],
845						  mbox_sts[1], mbox_sts[2],
846						  mbox_sts[3], mbox_sts[4]));
847				opcode = mbox_sts[1] >> 16;
848				if ((opcode == MBOX_CMD_SET_PORT_CONFIG) ||
849				    (opcode == MBOX_CMD_PORT_RESET)) {
850					set_bit(DPC_POST_IDC_ACK,
851						&ha->dpc_flags);
852					ha->idc_info.request_desc = mbox_sts[1];
853					ha->idc_info.info1 = mbox_sts[2];
854					ha->idc_info.info2 = mbox_sts[3];
855					ha->idc_info.info3 = mbox_sts[4];
856					qla4xxx_wake_dpc(ha);
857				}
858			}
859			break;
860		}
861
862		case MBOX_ASTS_IDC_COMPLETE:
863			if (is_qla8032(ha)) {
864				DEBUG2(ql4_printk(KERN_INFO, ha,
865						  "scsi%ld: AEN %04x, mbox_sts[1]=%08x, mbox_sts[2]=%08x, mbox_sts[3]=%08x, mbox_sts[4]=%08x\n",
866						  ha->host_no, mbox_sts[0],
867						  mbox_sts[1], mbox_sts[2],
868						  mbox_sts[3], mbox_sts[4]));
869				DEBUG2(ql4_printk(KERN_INFO, ha,
870						  "scsi:%ld: AEN %04x IDC Complete notification\n",
871						  ha->host_no, mbox_sts[0]));
872
873				if (qla4_83xx_loopback_in_progress(ha))
874					set_bit(AF_LOOPBACK, &ha->flags);
875				else
876					clear_bit(AF_LOOPBACK, &ha->flags);
877			}
878			break;
879
880		default:
881			DEBUG2(printk(KERN_WARNING
882				      "scsi%ld: AEN %04x UNKNOWN\n",
883				      ha->host_no, mbox_sts[0]));
884			break;
885		}
886	} else {
887		DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
888			      ha->host_no, mbox_status));
889
890		ha->mbox_status[0] = mbox_status;
891	}
892}
893
894void qla4_83xx_interrupt_service_routine(struct scsi_qla_host *ha,
895					 uint32_t intr_status)
896{
897	/* Process mailbox/asynch event interrupt.*/
898	if (intr_status) {
899		qla4xxx_isr_decode_mailbox(ha,
900				readl(&ha->qla4_83xx_reg->mailbox_out[0]));
901		/* clear the interrupt */
902		writel(0, &ha->qla4_83xx_reg->risc_intr);
903	} else {
904		qla4xxx_process_response_queue(ha);
905	}
906
907	/* clear the interrupt */
908	writel(0, &ha->qla4_83xx_reg->mb_int_mask);
909}
910
911/**
912 * qla4_82xx_interrupt_service_routine - isr
913 * @ha: pointer to host adapter structure.
914 *
915 * This is the main interrupt service routine.
916 * hardware_lock locked upon entry. runs in interrupt context.
917 **/
918void qla4_82xx_interrupt_service_routine(struct scsi_qla_host *ha,
919    uint32_t intr_status)
920{
921	/* Process response queue interrupt. */
922	if (intr_status & HSRX_RISC_IOCB_INT)
923		qla4xxx_process_response_queue(ha);
924
925	/* Process mailbox/asynch event interrupt.*/
926	if (intr_status & HSRX_RISC_MB_INT)
927		qla4xxx_isr_decode_mailbox(ha,
928		    readl(&ha->qla4_82xx_reg->mailbox_out[0]));
929
930	/* clear the interrupt */
931	writel(0, &ha->qla4_82xx_reg->host_int);
932	readl(&ha->qla4_82xx_reg->host_int);
933}
934
935/**
936 * qla4xxx_interrupt_service_routine - isr
937 * @ha: pointer to host adapter structure.
938 *
939 * This is the main interrupt service routine.
940 * hardware_lock locked upon entry. runs in interrupt context.
941 **/
942void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
943				       uint32_t intr_status)
944{
945	/* Process response queue interrupt. */
946	if (intr_status & CSR_SCSI_COMPLETION_INTR)
947		qla4xxx_process_response_queue(ha);
948
949	/* Process mailbox/asynch event	 interrupt.*/
950	if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
951		qla4xxx_isr_decode_mailbox(ha,
952					   readl(&ha->reg->mailbox[0]));
953
954		/* Clear Mailbox Interrupt */
955		writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
956		       &ha->reg->ctrl_status);
957		readl(&ha->reg->ctrl_status);
958	}
959}
960
961/**
962 * qla4_82xx_spurious_interrupt - processes spurious interrupt
963 * @ha: pointer to host adapter structure.
964 * @reqs_count: .
965 *
966 **/
967static void qla4_82xx_spurious_interrupt(struct scsi_qla_host *ha,
968    uint8_t reqs_count)
969{
970	if (reqs_count)
971		return;
972
973	DEBUG2(ql4_printk(KERN_INFO, ha, "Spurious Interrupt\n"));
974	if (is_qla8022(ha)) {
975		writel(0, &ha->qla4_82xx_reg->host_int);
976		if (test_bit(AF_INTx_ENABLED, &ha->flags))
977			qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg,
978			    0xfbff);
979	}
980	ha->spurious_int_count++;
981}
982
983/**
984 * qla4xxx_intr_handler - hardware interrupt handler.
985 * @irq: Unused
986 * @dev_id: Pointer to host adapter structure
987 **/
988irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
989{
990	struct scsi_qla_host *ha;
991	uint32_t intr_status;
992	unsigned long flags = 0;
993	uint8_t reqs_count = 0;
994
995	ha = (struct scsi_qla_host *) dev_id;
996	if (!ha) {
997		DEBUG2(printk(KERN_INFO
998			      "qla4xxx: Interrupt with NULL host ptr\n"));
999		return IRQ_NONE;
1000	}
1001
1002	spin_lock_irqsave(&ha->hardware_lock, flags);
1003
1004	ha->isr_count++;
1005	/*
1006	 * Repeatedly service interrupts up to a maximum of
1007	 * MAX_REQS_SERVICED_PER_INTR
1008	 */
1009	while (1) {
1010		/*
1011		 * Read interrupt status
1012		 */
1013		if (ha->isp_ops->rd_shdw_rsp_q_in(ha) !=
1014		    ha->response_out)
1015			intr_status = CSR_SCSI_COMPLETION_INTR;
1016		else
1017			intr_status = readl(&ha->reg->ctrl_status);
1018
1019		if ((intr_status &
1020		    (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) == 0) {
1021			if (reqs_count == 0)
1022				ha->spurious_int_count++;
1023			break;
1024		}
1025
1026		if (intr_status & CSR_FATAL_ERROR) {
1027			DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
1028				      "Status 0x%04x\n", ha->host_no,
1029				      readl(isp_port_error_status (ha))));
1030
1031			/* Issue Soft Reset to clear this error condition.
1032			 * This will prevent the RISC from repeatedly
1033			 * interrupting the driver; thus, allowing the DPC to
1034			 * get scheduled to continue error recovery.
1035			 * NOTE: Disabling RISC interrupts does not work in
1036			 * this case, as CSR_FATAL_ERROR overrides
1037			 * CSR_SCSI_INTR_ENABLE */
1038			if ((readl(&ha->reg->ctrl_status) &
1039			     CSR_SCSI_RESET_INTR) == 0) {
1040				writel(set_rmask(CSR_SOFT_RESET),
1041				       &ha->reg->ctrl_status);
1042				readl(&ha->reg->ctrl_status);
1043			}
1044
1045			writel(set_rmask(CSR_FATAL_ERROR),
1046			       &ha->reg->ctrl_status);
1047			readl(&ha->reg->ctrl_status);
1048
1049			__qla4xxx_disable_intrs(ha);
1050
1051			set_bit(DPC_RESET_HA, &ha->dpc_flags);
1052
1053			break;
1054		} else if (intr_status & CSR_SCSI_RESET_INTR) {
1055			clear_bit(AF_ONLINE, &ha->flags);
1056			__qla4xxx_disable_intrs(ha);
1057
1058			writel(set_rmask(CSR_SCSI_RESET_INTR),
1059			       &ha->reg->ctrl_status);
1060			readl(&ha->reg->ctrl_status);
1061
1062			if (!test_bit(AF_HA_REMOVAL, &ha->flags))
1063				set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
1064
1065			break;
1066		} else if (intr_status & INTR_PENDING) {
1067			ha->isp_ops->interrupt_service_routine(ha, intr_status);
1068			ha->total_io_count++;
1069			if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1070				break;
1071		}
1072	}
1073
1074	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1075
1076	return IRQ_HANDLED;
1077}
1078
1079/**
1080 * qla4_82xx_intr_handler - hardware interrupt handler.
1081 * @irq: Unused
1082 * @dev_id: Pointer to host adapter structure
1083 **/
1084irqreturn_t qla4_82xx_intr_handler(int irq, void *dev_id)
1085{
1086	struct scsi_qla_host *ha = dev_id;
1087	uint32_t intr_status;
1088	uint32_t status;
1089	unsigned long flags = 0;
1090	uint8_t reqs_count = 0;
1091
1092	if (unlikely(pci_channel_offline(ha->pdev)))
1093		return IRQ_HANDLED;
1094
1095	ha->isr_count++;
1096	status = qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1097	if (!(status & ha->nx_legacy_intr.int_vec_bit))
1098		return IRQ_NONE;
1099
1100	status = qla4_82xx_rd_32(ha, ISR_INT_STATE_REG);
1101	if (!ISR_IS_LEGACY_INTR_TRIGGERED(status)) {
1102		DEBUG7(ql4_printk(KERN_INFO, ha,
1103				  "%s legacy Int not triggered\n", __func__));
1104		return IRQ_NONE;
1105	}
1106
1107	/* clear the interrupt */
1108	qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
1109
1110	/* read twice to ensure write is flushed */
1111	qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1112	qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1113
1114	spin_lock_irqsave(&ha->hardware_lock, flags);
1115	while (1) {
1116		if (!(readl(&ha->qla4_82xx_reg->host_int) &
1117		    ISRX_82XX_RISC_INT)) {
1118			qla4_82xx_spurious_interrupt(ha, reqs_count);
1119			break;
1120		}
1121		intr_status =  readl(&ha->qla4_82xx_reg->host_status);
1122		if ((intr_status &
1123		    (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0)  {
1124			qla4_82xx_spurious_interrupt(ha, reqs_count);
1125			break;
1126		}
1127
1128		ha->isp_ops->interrupt_service_routine(ha, intr_status);
1129
1130		/* Enable Interrupt */
1131		qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff);
1132
1133		if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1134			break;
1135	}
1136
1137	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1138	return IRQ_HANDLED;
1139}
1140
1141#define LEG_INT_PTR_B31		(1 << 31)
1142#define LEG_INT_PTR_B30		(1 << 30)
1143#define PF_BITS_MASK		(0xF << 16)
1144
1145/**
1146 * qla4_83xx_intr_handler - hardware interrupt handler.
1147 * @irq: Unused
1148 * @dev_id: Pointer to host adapter structure
1149 **/
1150irqreturn_t qla4_83xx_intr_handler(int irq, void *dev_id)
1151{
1152	struct scsi_qla_host *ha = dev_id;
1153	uint32_t leg_int_ptr = 0;
1154	unsigned long flags = 0;
1155
1156	ha->isr_count++;
1157	leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
1158
1159	/* Legacy interrupt is valid if bit31 of leg_int_ptr is set */
1160	if (!(leg_int_ptr & LEG_INT_PTR_B31)) {
1161		DEBUG7(ql4_printk(KERN_ERR, ha,
1162				  "%s: Legacy Interrupt Bit 31 not set, spurious interrupt!\n",
1163				  __func__));
1164		return IRQ_NONE;
1165	}
1166
1167	/* Validate the PCIE function ID set in leg_int_ptr bits [19..16] */
1168	if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit) {
1169		DEBUG7(ql4_printk(KERN_ERR, ha,
1170				  "%s: Incorrect function ID 0x%x in legacy interrupt register, ha->pf_bit = 0x%x\n",
1171				  __func__, (leg_int_ptr & PF_BITS_MASK),
1172				  ha->pf_bit));
1173		return IRQ_NONE;
1174	}
1175
1176	/* To de-assert legacy interrupt, write 0 to Legacy Interrupt Trigger
1177	 * Control register and poll till Legacy Interrupt Pointer register
1178	 * bit30 is 0.
1179	 */
1180	writel(0, &ha->qla4_83xx_reg->leg_int_trig);
1181	do {
1182		leg_int_ptr = readl(&ha->qla4_83xx_reg->leg_int_ptr);
1183		if ((leg_int_ptr & PF_BITS_MASK) != ha->pf_bit)
1184			break;
1185	} while (leg_int_ptr & LEG_INT_PTR_B30);
1186
1187	spin_lock_irqsave(&ha->hardware_lock, flags);
1188	leg_int_ptr = readl(&ha->qla4_83xx_reg->risc_intr);
1189	ha->isp_ops->interrupt_service_routine(ha, leg_int_ptr);
1190	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1191
1192	return IRQ_HANDLED;
1193}
1194
1195irqreturn_t
1196qla4_8xxx_msi_handler(int irq, void *dev_id)
1197{
1198	struct scsi_qla_host *ha;
1199
1200	ha = (struct scsi_qla_host *) dev_id;
1201	if (!ha) {
1202		DEBUG2(printk(KERN_INFO
1203		    "qla4xxx: MSIX: Interrupt with NULL host ptr\n"));
1204		return IRQ_NONE;
1205	}
1206
1207	ha->isr_count++;
1208	/* clear the interrupt */
1209	qla4_82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff);
1210
1211	/* read twice to ensure write is flushed */
1212	qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1213	qla4_82xx_rd_32(ha, ISR_INT_VECTOR);
1214
1215	return qla4_8xxx_default_intr_handler(irq, dev_id);
1216}
1217
1218static irqreturn_t qla4_83xx_mailbox_intr_handler(int irq, void *dev_id)
1219{
1220	struct scsi_qla_host *ha = dev_id;
1221	unsigned long flags;
1222	uint32_t ival = 0;
1223
1224	spin_lock_irqsave(&ha->hardware_lock, flags);
1225
1226	ival = readl(&ha->qla4_83xx_reg->risc_intr);
1227	if (ival == 0) {
1228		ql4_printk(KERN_INFO, ha,
1229			   "%s: It is a spurious mailbox interrupt!\n",
1230			   __func__);
1231		ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
1232		ival &= ~INT_MASK_FW_MB;
1233		writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
1234		goto exit;
1235	}
1236
1237	qla4xxx_isr_decode_mailbox(ha,
1238				   readl(&ha->qla4_83xx_reg->mailbox_out[0]));
1239	writel(0, &ha->qla4_83xx_reg->risc_intr);
1240	ival = readl(&ha->qla4_83xx_reg->mb_int_mask);
1241	ival &= ~INT_MASK_FW_MB;
1242	writel(ival, &ha->qla4_83xx_reg->mb_int_mask);
1243	ha->isr_count++;
1244exit:
1245	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1246	return IRQ_HANDLED;
1247}
1248
1249/**
1250 * qla4_8xxx_default_intr_handler - hardware interrupt handler.
1251 * @irq: Unused
1252 * @dev_id: Pointer to host adapter structure
1253 *
1254 * This interrupt handler is called directly for MSI-X, and
1255 * called indirectly for MSI.
1256 **/
1257irqreturn_t
1258qla4_8xxx_default_intr_handler(int irq, void *dev_id)
1259{
1260	struct scsi_qla_host *ha = dev_id;
1261	unsigned long   flags;
1262	uint32_t intr_status;
1263	uint8_t reqs_count = 0;
1264
1265	if (is_qla8032(ha)) {
1266		qla4_83xx_mailbox_intr_handler(irq, dev_id);
1267	} else {
1268		spin_lock_irqsave(&ha->hardware_lock, flags);
1269		while (1) {
1270			if (!(readl(&ha->qla4_82xx_reg->host_int) &
1271			    ISRX_82XX_RISC_INT)) {
1272				qla4_82xx_spurious_interrupt(ha, reqs_count);
1273				break;
1274			}
1275
1276			intr_status =  readl(&ha->qla4_82xx_reg->host_status);
1277			if ((intr_status &
1278			    (HSRX_RISC_MB_INT | HSRX_RISC_IOCB_INT)) == 0) {
1279				qla4_82xx_spurious_interrupt(ha, reqs_count);
1280				break;
1281			}
1282
1283			ha->isp_ops->interrupt_service_routine(ha, intr_status);
1284
1285			if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
1286				break;
1287		}
1288		ha->isr_count++;
1289		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1290	}
1291	return IRQ_HANDLED;
1292}
1293
1294irqreturn_t
1295qla4_8xxx_msix_rsp_q(int irq, void *dev_id)
1296{
1297	struct scsi_qla_host *ha = dev_id;
1298	unsigned long flags;
1299	uint32_t ival = 0;
1300
1301	spin_lock_irqsave(&ha->hardware_lock, flags);
1302	if (is_qla8032(ha)) {
1303		ival = readl(&ha->qla4_83xx_reg->iocb_int_mask);
1304		if (ival == 0) {
1305			ql4_printk(KERN_INFO, ha, "%s: It is a spurious iocb interrupt!\n",
1306				   __func__);
1307			goto exit_msix_rsp_q;
1308		}
1309		qla4xxx_process_response_queue(ha);
1310		writel(0, &ha->qla4_83xx_reg->iocb_int_mask);
1311	} else {
1312		qla4xxx_process_response_queue(ha);
1313		writel(0, &ha->qla4_82xx_reg->host_int);
1314	}
1315	ha->isr_count++;
1316exit_msix_rsp_q:
1317	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1318	return IRQ_HANDLED;
1319}
1320
1321/**
1322 * qla4xxx_process_aen - processes AENs generated by firmware
1323 * @ha: pointer to host adapter structure.
1324 * @process_aen: type of AENs to process
1325 *
1326 * Processes specific types of Asynchronous Events generated by firmware.
1327 * The type of AENs to process is specified by process_aen and can be
1328 *	PROCESS_ALL_AENS	 0
1329 *	FLUSH_DDB_CHANGED_AENS	 1
1330 *	RELOGIN_DDB_CHANGED_AENS 2
1331 **/
1332void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
1333{
1334	uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
1335	struct aen *aen;
1336	int i;
1337	unsigned long flags;
1338
1339	spin_lock_irqsave(&ha->hardware_lock, flags);
1340	while (ha->aen_out != ha->aen_in) {
1341		aen = &ha->aen_q[ha->aen_out];
1342		/* copy aen information to local structure */
1343		for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
1344			mbox_sts[i] = aen->mbox_sts[i];
1345
1346		ha->aen_q_count++;
1347		ha->aen_out++;
1348
1349		if (ha->aen_out == MAX_AEN_ENTRIES)
1350			ha->aen_out = 0;
1351
1352		spin_unlock_irqrestore(&ha->hardware_lock, flags);
1353
1354		DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
1355			" mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
1356			(ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
1357			mbox_sts[0], mbox_sts[1], mbox_sts[2],
1358			mbox_sts[3], mbox_sts[4]));
1359
1360		switch (mbox_sts[0]) {
1361		case MBOX_ASTS_DATABASE_CHANGED:
1362			switch (process_aen) {
1363			case FLUSH_DDB_CHANGED_AENS:
1364				DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
1365					      "[%d] state=%04x FLUSHED!\n",
1366					      ha->host_no, ha->aen_out,
1367					      mbox_sts[0], mbox_sts[2],
1368					      mbox_sts[3]));
1369				break;
1370			case PROCESS_ALL_AENS:
1371			default:
1372				/* Specific device. */
1373				if (mbox_sts[1] == 1)
1374					qla4xxx_process_ddb_changed(ha,
1375						mbox_sts[2], mbox_sts[3],
1376						mbox_sts[4]);
1377				break;
1378			}
1379		}
1380		spin_lock_irqsave(&ha->hardware_lock, flags);
1381	}
1382	spin_unlock_irqrestore(&ha->hardware_lock, flags);
1383}
1384
1385int qla4xxx_request_irqs(struct scsi_qla_host *ha)
1386{
1387	int ret;
1388
1389	if (is_qla40XX(ha))
1390		goto try_intx;
1391
1392	if (ql4xenablemsix == 2) {
1393		/* Note: MSI Interrupts not supported for ISP8324 */
1394		if (is_qla8032(ha)) {
1395			ql4_printk(KERN_INFO, ha, "%s: MSI Interrupts not supported for ISP8324, Falling back-to INTx mode\n",
1396				   __func__);
1397			goto try_intx;
1398		}
1399		goto try_msi;
1400	}
1401
1402	if (ql4xenablemsix == 0 || ql4xenablemsix != 1)
1403		goto try_intx;
1404
1405	/* Trying MSI-X */
1406	ret = qla4_8xxx_enable_msix(ha);
1407	if (!ret) {
1408		DEBUG2(ql4_printk(KERN_INFO, ha,
1409		    "MSI-X: Enabled (0x%X).\n", ha->revision_id));
1410		goto irq_attached;
1411	} else {
1412		if (is_qla8032(ha)) {
1413			ql4_printk(KERN_INFO, ha, "%s: ISP8324: MSI-X: Falling back-to INTx mode. ret = %d\n",
1414				   __func__, ret);
1415			goto try_intx;
1416		}
1417	}
1418
1419	ql4_printk(KERN_WARNING, ha,
1420	    "MSI-X: Falling back-to MSI mode -- %d.\n", ret);
1421
1422try_msi:
1423	/* Trying MSI */
1424	ret = pci_enable_msi(ha->pdev);
1425	if (!ret) {
1426		ret = request_irq(ha->pdev->irq, qla4_8xxx_msi_handler,
1427			0, DRIVER_NAME, ha);
1428		if (!ret) {
1429			DEBUG2(ql4_printk(KERN_INFO, ha, "MSI: Enabled.\n"));
1430			set_bit(AF_MSI_ENABLED, &ha->flags);
1431			goto irq_attached;
1432		} else {
1433			ql4_printk(KERN_WARNING, ha,
1434			    "MSI: Failed to reserve interrupt %d "
1435			    "already in use.\n", ha->pdev->irq);
1436			pci_disable_msi(ha->pdev);
1437		}
1438	}
1439
1440	/*
1441	 * Prevent interrupts from falling back to INTx mode in cases where
1442	 * interrupts cannot get acquired through MSI-X or MSI mode.
1443	 */
1444	if (is_qla8022(ha)) {
1445		ql4_printk(KERN_WARNING, ha, "IRQ not attached -- %d.\n", ret);
1446		goto irq_not_attached;
1447	}
1448try_intx:
1449	/* Trying INTx */
1450	ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
1451	    IRQF_SHARED, DRIVER_NAME, ha);
1452	if (!ret) {
1453		DEBUG2(ql4_printk(KERN_INFO, ha, "INTx: Enabled.\n"));
1454		set_bit(AF_INTx_ENABLED, &ha->flags);
1455		goto irq_attached;
1456
1457	} else {
1458		ql4_printk(KERN_WARNING, ha,
1459		    "INTx: Failed to reserve interrupt %d already in"
1460		    " use.\n", ha->pdev->irq);
1461		goto irq_not_attached;
1462	}
1463
1464irq_attached:
1465	set_bit(AF_IRQ_ATTACHED, &ha->flags);
1466	ha->host->irq = ha->pdev->irq;
1467	ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
1468	    __func__, ha->pdev->irq);
1469irq_not_attached:
1470	return ret;
1471}
1472
1473void qla4xxx_free_irqs(struct scsi_qla_host *ha)
1474{
1475	if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) {
1476		if (test_bit(AF_MSIX_ENABLED, &ha->flags)) {
1477			qla4_8xxx_disable_msix(ha);
1478		} else if (test_and_clear_bit(AF_MSI_ENABLED, &ha->flags)) {
1479			free_irq(ha->pdev->irq, ha);
1480			pci_disable_msi(ha->pdev);
1481		} else if (test_and_clear_bit(AF_INTx_ENABLED, &ha->flags)) {
1482			free_irq(ha->pdev->irq, ha);
1483		}
1484	}
1485}
1486