mpt2sas_base.c revision cef7a12cd1e0647ce2b566a76bbf4cd132b9118d
1/*
2 * This is the Fusion MPT base driver providing common API layer interface
3 * for access to MPT (Message Passing Technology) firmware.
4 *
5 * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
6 * Copyright (C) 2007-2009  LSI Corporation
7 *  (mailto:DL-MPTFusionLinux@lsi.com)
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17 * GNU General Public License for more details.
18 *
19 * NO WARRANTY
20 * THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 * CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 * LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 * solely responsible for determining the appropriateness of using and
25 * distributing the Program and assumes all risks associated with its
26 * exercise of rights under this Agreement, including but not limited to
27 * the risks and costs of program errors, damage to or loss of data,
28 * programs or equipment, and unavailability or interruption of operations.
29
30 * DISCLAIMER OF LIABILITY
31 * NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 * USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 * HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 * You should have received a copy of the GNU General Public License
40 * along with this program; if not, write to the Free Software
41 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301,
42 * USA.
43 */
44
45#include <linux/version.h>
46#include <linux/kernel.h>
47#include <linux/module.h>
48#include <linux/errno.h>
49#include <linux/init.h>
50#include <linux/slab.h>
51#include <linux/types.h>
52#include <linux/pci.h>
53#include <linux/kdev_t.h>
54#include <linux/blkdev.h>
55#include <linux/delay.h>
56#include <linux/interrupt.h>
57#include <linux/dma-mapping.h>
58#include <linux/sort.h>
59#include <linux/io.h>
60
61#include "mpt2sas_base.h"
62
63static MPT_CALLBACK	mpt_callbacks[MPT_MAX_CALLBACKS];
64
65#define FAULT_POLLING_INTERVAL 1000 /* in milliseconds */
66#define MPT2SAS_MAX_REQUEST_QUEUE 600 /* maximum controller queue depth */
67
68static int max_queue_depth = -1;
69module_param(max_queue_depth, int, 0);
70MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
71
72static int max_sgl_entries = -1;
73module_param(max_sgl_entries, int, 0);
74MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
75
76static int msix_disable = -1;
77module_param(msix_disable, int, 0);
78MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
79
80int mpt2sas_fwfault_debug;
81MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
82    "and halt firmware - (default=0)");
83
84/**
85 * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
86 *
87 */
88static int
89_scsih_set_fwfault_debug(const char *val, struct kernel_param *kp)
90{
91	int ret = param_set_int(val, kp);
92	struct MPT2SAS_ADAPTER *ioc;
93
94	if (ret)
95		return ret;
96
97	printk(KERN_INFO "setting logging_level(0x%08x)\n",
98				mpt2sas_fwfault_debug);
99	list_for_each_entry(ioc, &mpt2sas_ioc_list, list)
100		ioc->fwfault_debug = mpt2sas_fwfault_debug;
101	return 0;
102}
103module_param_call(mpt2sas_fwfault_debug, _scsih_set_fwfault_debug,
104    param_get_int, &mpt2sas_fwfault_debug, 0644);
105
106/**
107 * _base_fault_reset_work - workq handling ioc fault conditions
108 * @work: input argument, used to derive ioc
109 * Context: sleep.
110 *
111 * Return nothing.
112 */
113static void
114_base_fault_reset_work(struct work_struct *work)
115{
116	struct MPT2SAS_ADAPTER *ioc =
117	    container_of(work, struct MPT2SAS_ADAPTER, fault_reset_work.work);
118	unsigned long	 flags;
119	u32 doorbell;
120	int rc;
121
122	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
123	if (ioc->shost_recovery)
124		goto rearm_timer;
125	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
126
127	doorbell = mpt2sas_base_get_iocstate(ioc, 0);
128	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
129		rc = mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
130		    FORCE_BIG_HAMMER);
131		printk(MPT2SAS_WARN_FMT "%s: hard reset: %s\n", ioc->name,
132		    __func__, (rc == 0) ? "success" : "failed");
133		doorbell = mpt2sas_base_get_iocstate(ioc, 0);
134		if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
135			mpt2sas_base_fault_info(ioc, doorbell &
136			    MPI2_DOORBELL_DATA_MASK);
137	}
138
139	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
140 rearm_timer:
141	if (ioc->fault_reset_work_q)
142		queue_delayed_work(ioc->fault_reset_work_q,
143		    &ioc->fault_reset_work,
144		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
145	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
146}
147
148/**
149 * mpt2sas_base_start_watchdog - start the fault_reset_work_q
150 * @ioc: per adapter object
151 * Context: sleep.
152 *
153 * Return nothing.
154 */
155void
156mpt2sas_base_start_watchdog(struct MPT2SAS_ADAPTER *ioc)
157{
158	unsigned long	 flags;
159
160	if (ioc->fault_reset_work_q)
161		return;
162
163	/* initialize fault polling */
164	INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
165	snprintf(ioc->fault_reset_work_q_name,
166	    sizeof(ioc->fault_reset_work_q_name), "poll_%d_status", ioc->id);
167	ioc->fault_reset_work_q =
168		create_singlethread_workqueue(ioc->fault_reset_work_q_name);
169	if (!ioc->fault_reset_work_q) {
170		printk(MPT2SAS_ERR_FMT "%s: failed (line=%d)\n",
171		    ioc->name, __func__, __LINE__);
172			return;
173	}
174	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
175	if (ioc->fault_reset_work_q)
176		queue_delayed_work(ioc->fault_reset_work_q,
177		    &ioc->fault_reset_work,
178		    msecs_to_jiffies(FAULT_POLLING_INTERVAL));
179	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
180}
181
182/**
183 * mpt2sas_base_stop_watchdog - stop the fault_reset_work_q
184 * @ioc: per adapter object
185 * Context: sleep.
186 *
187 * Return nothing.
188 */
189void
190mpt2sas_base_stop_watchdog(struct MPT2SAS_ADAPTER *ioc)
191{
192	unsigned long	 flags;
193	struct workqueue_struct *wq;
194
195	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
196	wq = ioc->fault_reset_work_q;
197	ioc->fault_reset_work_q = NULL;
198	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
199	if (wq) {
200		if (!cancel_delayed_work(&ioc->fault_reset_work))
201			flush_workqueue(wq);
202		destroy_workqueue(wq);
203	}
204}
205
206/**
207 * mpt2sas_base_fault_info - verbose translation of firmware FAULT code
208 * @ioc: per adapter object
209 * @fault_code: fault code
210 *
211 * Return nothing.
212 */
213void
214mpt2sas_base_fault_info(struct MPT2SAS_ADAPTER *ioc , u16 fault_code)
215{
216	printk(MPT2SAS_ERR_FMT "fault_state(0x%04x)!\n",
217	    ioc->name, fault_code);
218}
219
220/**
221 * mpt2sas_halt_firmware - halt's mpt controller firmware
222 * @ioc: per adapter object
223 *
224 * For debugging timeout related issues.  Writing 0xCOFFEE00
225 * to the doorbell register will halt controller firmware. With
226 * the purpose to stop both driver and firmware, the enduser can
227 * obtain a ring buffer from controller UART.
228 */
229void
230mpt2sas_halt_firmware(struct MPT2SAS_ADAPTER *ioc)
231{
232	u32 doorbell;
233
234	if (!ioc->fwfault_debug)
235		return;
236
237	dump_stack();
238
239	doorbell = readl(&ioc->chip->Doorbell);
240	if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
241		mpt2sas_base_fault_info(ioc , doorbell);
242	else {
243		writel(0xC0FFEE00, &ioc->chip->Doorbell);
244		printk(MPT2SAS_ERR_FMT "Firmware is halted due to command "
245		    "timeout\n", ioc->name);
246	}
247
248	panic("panic in %s\n", __func__);
249}
250
251#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
252/**
253 * _base_sas_ioc_info - verbose translation of the ioc status
254 * @ioc: per adapter object
255 * @mpi_reply: reply mf payload returned from firmware
256 * @request_hdr: request mf
257 *
258 * Return nothing.
259 */
260static void
261_base_sas_ioc_info(struct MPT2SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
262     MPI2RequestHeader_t *request_hdr)
263{
264	u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
265	    MPI2_IOCSTATUS_MASK;
266	char *desc = NULL;
267	u16 frame_sz;
268	char *func_str = NULL;
269
270	/* SCSI_IO, RAID_PASS are handled from _scsih_scsi_ioc_info */
271	if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
272	    request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
273	    request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
274		return;
275
276	switch (ioc_status) {
277
278/****************************************************************************
279*  Common IOCStatus values for all replies
280****************************************************************************/
281
282	case MPI2_IOCSTATUS_INVALID_FUNCTION:
283		desc = "invalid function";
284		break;
285	case MPI2_IOCSTATUS_BUSY:
286		desc = "busy";
287		break;
288	case MPI2_IOCSTATUS_INVALID_SGL:
289		desc = "invalid sgl";
290		break;
291	case MPI2_IOCSTATUS_INTERNAL_ERROR:
292		desc = "internal error";
293		break;
294	case MPI2_IOCSTATUS_INVALID_VPID:
295		desc = "invalid vpid";
296		break;
297	case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
298		desc = "insufficient resources";
299		break;
300	case MPI2_IOCSTATUS_INVALID_FIELD:
301		desc = "invalid field";
302		break;
303	case MPI2_IOCSTATUS_INVALID_STATE:
304		desc = "invalid state";
305		break;
306	case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
307		desc = "op state not supported";
308		break;
309
310/****************************************************************************
311*  Config IOCStatus values
312****************************************************************************/
313
314	case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
315		desc = "config invalid action";
316		break;
317	case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
318		desc = "config invalid type";
319		break;
320	case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
321		desc = "config invalid page";
322		break;
323	case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
324		desc = "config invalid data";
325		break;
326	case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
327		desc = "config no defaults";
328		break;
329	case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
330		desc = "config cant commit";
331		break;
332
333/****************************************************************************
334*  SCSI IO Reply
335****************************************************************************/
336
337	case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
338	case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
339	case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
340	case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
341	case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
342	case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
343	case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
344	case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
345	case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
346	case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
347	case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
348	case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
349		break;
350
351/****************************************************************************
352*  For use by SCSI Initiator and SCSI Target end-to-end data protection
353****************************************************************************/
354
355	case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
356		desc = "eedp guard error";
357		break;
358	case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
359		desc = "eedp ref tag error";
360		break;
361	case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
362		desc = "eedp app tag error";
363		break;
364
365/****************************************************************************
366*  SCSI Target values
367****************************************************************************/
368
369	case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
370		desc = "target invalid io index";
371		break;
372	case MPI2_IOCSTATUS_TARGET_ABORTED:
373		desc = "target aborted";
374		break;
375	case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
376		desc = "target no conn retryable";
377		break;
378	case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
379		desc = "target no connection";
380		break;
381	case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
382		desc = "target xfer count mismatch";
383		break;
384	case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
385		desc = "target data offset error";
386		break;
387	case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
388		desc = "target too much write data";
389		break;
390	case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
391		desc = "target iu too short";
392		break;
393	case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
394		desc = "target ack nak timeout";
395		break;
396	case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
397		desc = "target nak received";
398		break;
399
400/****************************************************************************
401*  Serial Attached SCSI values
402****************************************************************************/
403
404	case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
405		desc = "smp request failed";
406		break;
407	case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
408		desc = "smp data overrun";
409		break;
410
411/****************************************************************************
412*  Diagnostic Buffer Post / Diagnostic Release values
413****************************************************************************/
414
415	case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
416		desc = "diagnostic released";
417		break;
418	default:
419		break;
420	}
421
422	if (!desc)
423		return;
424
425	switch (request_hdr->Function) {
426	case MPI2_FUNCTION_CONFIG:
427		frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
428		func_str = "config_page";
429		break;
430	case MPI2_FUNCTION_SCSI_TASK_MGMT:
431		frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
432		func_str = "task_mgmt";
433		break;
434	case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
435		frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
436		func_str = "sas_iounit_ctl";
437		break;
438	case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
439		frame_sz = sizeof(Mpi2SepRequest_t);
440		func_str = "enclosure";
441		break;
442	case MPI2_FUNCTION_IOC_INIT:
443		frame_sz = sizeof(Mpi2IOCInitRequest_t);
444		func_str = "ioc_init";
445		break;
446	case MPI2_FUNCTION_PORT_ENABLE:
447		frame_sz = sizeof(Mpi2PortEnableRequest_t);
448		func_str = "port_enable";
449		break;
450	case MPI2_FUNCTION_SMP_PASSTHROUGH:
451		frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
452		func_str = "smp_passthru";
453		break;
454	default:
455		frame_sz = 32;
456		func_str = "unknown";
457		break;
458	}
459
460	printk(MPT2SAS_WARN_FMT "ioc_status: %s(0x%04x), request(0x%p),"
461	    " (%s)\n", ioc->name, desc, ioc_status, request_hdr, func_str);
462
463	_debug_dump_mf(request_hdr, frame_sz/4);
464}
465
466/**
467 * _base_display_event_data - verbose translation of firmware asyn events
468 * @ioc: per adapter object
469 * @mpi_reply: reply mf payload returned from firmware
470 *
471 * Return nothing.
472 */
473static void
474_base_display_event_data(struct MPT2SAS_ADAPTER *ioc,
475    Mpi2EventNotificationReply_t *mpi_reply)
476{
477	char *desc = NULL;
478	u16 event;
479
480	if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
481		return;
482
483	event = le16_to_cpu(mpi_reply->Event);
484
485	switch (event) {
486	case MPI2_EVENT_LOG_DATA:
487		desc = "Log Data";
488		break;
489	case MPI2_EVENT_STATE_CHANGE:
490		desc = "Status Change";
491		break;
492	case MPI2_EVENT_HARD_RESET_RECEIVED:
493		desc = "Hard Reset Received";
494		break;
495	case MPI2_EVENT_EVENT_CHANGE:
496		desc = "Event Change";
497		break;
498	case MPI2_EVENT_TASK_SET_FULL:
499		desc = "Task Set Full";
500		break;
501	case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
502		desc = "Device Status Change";
503		break;
504	case MPI2_EVENT_IR_OPERATION_STATUS:
505		desc = "IR Operation Status";
506		break;
507	case MPI2_EVENT_SAS_DISCOVERY:
508		desc =  "Discovery";
509		break;
510	case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
511		desc = "SAS Broadcast Primitive";
512		break;
513	case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
514		desc = "SAS Init Device Status Change";
515		break;
516	case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
517		desc = "SAS Init Table Overflow";
518		break;
519	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
520		desc = "SAS Topology Change List";
521		break;
522	case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
523		desc = "SAS Enclosure Device Status Change";
524		break;
525	case MPI2_EVENT_IR_VOLUME:
526		desc = "IR Volume";
527		break;
528	case MPI2_EVENT_IR_PHYSICAL_DISK:
529		desc = "IR Physical Disk";
530		break;
531	case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
532		desc = "IR Configuration Change List";
533		break;
534	case MPI2_EVENT_LOG_ENTRY_ADDED:
535		desc = "Log Entry Added";
536		break;
537	}
538
539	if (!desc)
540		return;
541
542	printk(MPT2SAS_INFO_FMT "%s\n", ioc->name, desc);
543}
544#endif
545
546/**
547 * _base_sas_log_info - verbose translation of firmware log info
548 * @ioc: per adapter object
549 * @log_info: log info
550 *
551 * Return nothing.
552 */
553static void
554_base_sas_log_info(struct MPT2SAS_ADAPTER *ioc , u32 log_info)
555{
556	union loginfo_type {
557		u32	loginfo;
558		struct {
559			u32	subcode:16;
560			u32	code:8;
561			u32	originator:4;
562			u32	bus_type:4;
563		} dw;
564	};
565	union loginfo_type sas_loginfo;
566	char *originator_str = NULL;
567
568	sas_loginfo.loginfo = log_info;
569	if (sas_loginfo.dw.bus_type != 3 /*SAS*/)
570		return;
571
572	/* each nexus loss loginfo */
573	if (log_info == 0x31170000)
574		return;
575
576	/* eat the loginfos associated with task aborts */
577	if (ioc->ignore_loginfos && (log_info == 30050000 || log_info ==
578	    0x31140000 || log_info == 0x31130000))
579		return;
580
581	switch (sas_loginfo.dw.originator) {
582	case 0:
583		originator_str = "IOP";
584		break;
585	case 1:
586		originator_str = "PL";
587		break;
588	case 2:
589		originator_str = "IR";
590		break;
591	}
592
593	printk(MPT2SAS_WARN_FMT "log_info(0x%08x): originator(%s), "
594	    "code(0x%02x), sub_code(0x%04x)\n", ioc->name, log_info,
595	     originator_str, sas_loginfo.dw.code,
596	     sas_loginfo.dw.subcode);
597}
598
599/**
600 * _base_display_reply_info -
601 * @ioc: per adapter object
602 * @smid: system request message index
603 * @msix_index: MSIX table index supplied by the OS
604 * @reply: reply message frame(lower 32bit addr)
605 *
606 * Return nothing.
607 */
608static void
609_base_display_reply_info(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
610    u32 reply)
611{
612	MPI2DefaultReply_t *mpi_reply;
613	u16 ioc_status;
614
615	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
616	ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
617#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
618	if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
619	    (ioc->logging_level & MPT_DEBUG_REPLY)) {
620		_base_sas_ioc_info(ioc , mpi_reply,
621		   mpt2sas_base_get_msg_frame(ioc, smid));
622	}
623#endif
624	if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE)
625		_base_sas_log_info(ioc, le32_to_cpu(mpi_reply->IOCLogInfo));
626}
627
628/**
629 * mpt2sas_base_done - base internal command completion routine
630 * @ioc: per adapter object
631 * @smid: system request message index
632 * @msix_index: MSIX table index supplied by the OS
633 * @reply: reply message frame(lower 32bit addr)
634 *
635 * Return 1 meaning mf should be freed from _base_interrupt
636 *        0 means the mf is freed from this function.
637 */
638u8
639mpt2sas_base_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
640    u32 reply)
641{
642	MPI2DefaultReply_t *mpi_reply;
643
644	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
645	if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
646		return 1;
647
648	if (ioc->base_cmds.status == MPT2_CMD_NOT_USED)
649		return 1;
650
651	ioc->base_cmds.status |= MPT2_CMD_COMPLETE;
652	if (mpi_reply) {
653		ioc->base_cmds.status |= MPT2_CMD_REPLY_VALID;
654		memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
655	}
656	ioc->base_cmds.status &= ~MPT2_CMD_PENDING;
657	complete(&ioc->base_cmds.done);
658	return 1;
659}
660
661/**
662 * _base_async_event - main callback handler for firmware asyn events
663 * @ioc: per adapter object
664 * @msix_index: MSIX table index supplied by the OS
665 * @reply: reply message frame(lower 32bit addr)
666 *
667 * Return 1 meaning mf should be freed from _base_interrupt
668 *        0 means the mf is freed from this function.
669 */
670static u8
671_base_async_event(struct MPT2SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
672{
673	Mpi2EventNotificationReply_t *mpi_reply;
674	Mpi2EventAckRequest_t *ack_request;
675	u16 smid;
676
677	mpi_reply = mpt2sas_base_get_reply_virt_addr(ioc, reply);
678	if (!mpi_reply)
679		return 1;
680	if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
681		return 1;
682#ifdef CONFIG_SCSI_MPT2SAS_LOGGING
683	_base_display_event_data(ioc, mpi_reply);
684#endif
685	if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
686		goto out;
687	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
688	if (!smid) {
689		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
690		    ioc->name, __func__);
691		goto out;
692	}
693
694	ack_request = mpt2sas_base_get_msg_frame(ioc, smid);
695	memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
696	ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
697	ack_request->Event = mpi_reply->Event;
698	ack_request->EventContext = mpi_reply->EventContext;
699	ack_request->VF_ID = 0;  /* TODO */
700	ack_request->VP_ID = 0;
701	mpt2sas_base_put_smid_default(ioc, smid);
702
703 out:
704
705	/* scsih callback handler */
706	mpt2sas_scsih_event_callback(ioc, msix_index, reply);
707
708	/* ctl callback handler */
709	mpt2sas_ctl_event_callback(ioc, msix_index, reply);
710
711	return 1;
712}
713
714/**
715 * _base_get_cb_idx - obtain the callback index
716 * @ioc: per adapter object
717 * @smid: system request message index
718 *
719 * Return callback index.
720 */
721static u8
722_base_get_cb_idx(struct MPT2SAS_ADAPTER *ioc, u16 smid)
723{
724	int i;
725	u8 cb_idx = 0xFF;
726
727	if (smid >= ioc->hi_priority_smid) {
728		if (smid < ioc->internal_smid) {
729			i = smid - ioc->hi_priority_smid;
730			cb_idx = ioc->hpr_lookup[i].cb_idx;
731		} else {
732			i = smid - ioc->internal_smid;
733			cb_idx = ioc->internal_lookup[i].cb_idx;
734		}
735	} else {
736		i = smid - 1;
737		cb_idx = ioc->scsi_lookup[i].cb_idx;
738	}
739	return cb_idx;
740}
741
742/**
743 * _base_mask_interrupts - disable interrupts
744 * @ioc: per adapter object
745 *
746 * Disabling ResetIRQ, Reply and Doorbell Interrupts
747 *
748 * Return nothing.
749 */
750static void
751_base_mask_interrupts(struct MPT2SAS_ADAPTER *ioc)
752{
753	u32 him_register;
754
755	ioc->mask_interrupts = 1;
756	him_register = readl(&ioc->chip->HostInterruptMask);
757	him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
758	writel(him_register, &ioc->chip->HostInterruptMask);
759	readl(&ioc->chip->HostInterruptMask);
760}
761
762/**
763 * _base_unmask_interrupts - enable interrupts
764 * @ioc: per adapter object
765 *
766 * Enabling only Reply Interrupts
767 *
768 * Return nothing.
769 */
770static void
771_base_unmask_interrupts(struct MPT2SAS_ADAPTER *ioc)
772{
773	u32 him_register;
774
775	him_register = readl(&ioc->chip->HostInterruptMask);
776	him_register &= ~MPI2_HIM_RIM;
777	writel(him_register, &ioc->chip->HostInterruptMask);
778	ioc->mask_interrupts = 0;
779}
780
781union reply_descriptor {
782	u64 word;
783	struct {
784		u32 low;
785		u32 high;
786	} u;
787};
788
789/**
790 * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
791 * @irq: irq number (not used)
792 * @bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
793 * @r: pt_regs pointer (not used)
794 *
795 * Return IRQ_HANDLE if processed, else IRQ_NONE.
796 */
797static irqreturn_t
798_base_interrupt(int irq, void *bus_id)
799{
800	union reply_descriptor rd;
801	u32 completed_cmds;
802	u8 request_desript_type;
803	u16 smid;
804	u8 cb_idx;
805	u32 reply;
806	u8 msix_index;
807	struct MPT2SAS_ADAPTER *ioc = bus_id;
808	Mpi2ReplyDescriptorsUnion_t *rpf;
809	u8 rc;
810
811	if (ioc->mask_interrupts)
812		return IRQ_NONE;
813
814	rpf = &ioc->reply_post_free[ioc->reply_post_host_index];
815	request_desript_type = rpf->Default.ReplyFlags
816	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
817	if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
818		return IRQ_NONE;
819
820	completed_cmds = 0;
821	do {
822		rd.word = rpf->Words;
823		if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
824			goto out;
825		reply = 0;
826		cb_idx = 0xFF;
827		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
828		msix_index = rpf->Default.MSIxIndex;
829		if (request_desript_type ==
830		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
831			reply = le32_to_cpu
832				(rpf->AddressReply.ReplyFrameAddress);
833		} else if (request_desript_type ==
834		    MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER)
835			goto next;
836		else if (request_desript_type ==
837		    MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS)
838			goto next;
839		if (smid)
840			cb_idx = _base_get_cb_idx(ioc, smid);
841		if (smid && cb_idx != 0xFF) {
842			rc = mpt_callbacks[cb_idx](ioc, smid, msix_index,
843			    reply);
844			if (reply)
845				_base_display_reply_info(ioc, smid, msix_index,
846				    reply);
847			if (rc)
848				mpt2sas_base_free_smid(ioc, smid);
849		}
850		if (!smid)
851			_base_async_event(ioc, msix_index, reply);
852
853		/* reply free queue handling */
854		if (reply) {
855			ioc->reply_free_host_index =
856			    (ioc->reply_free_host_index ==
857			    (ioc->reply_free_queue_depth - 1)) ?
858			    0 : ioc->reply_free_host_index + 1;
859			ioc->reply_free[ioc->reply_free_host_index] =
860			    cpu_to_le32(reply);
861			wmb();
862			writel(ioc->reply_free_host_index,
863			    &ioc->chip->ReplyFreeHostIndex);
864		}
865
866 next:
867
868		rpf->Words = ULLONG_MAX;
869		ioc->reply_post_host_index = (ioc->reply_post_host_index ==
870		    (ioc->reply_post_queue_depth - 1)) ? 0 :
871		    ioc->reply_post_host_index + 1;
872		request_desript_type =
873		    ioc->reply_post_free[ioc->reply_post_host_index].Default.
874		    ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
875		completed_cmds++;
876		if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
877			goto out;
878		if (!ioc->reply_post_host_index)
879			rpf = ioc->reply_post_free;
880		else
881			rpf++;
882	} while (1);
883
884 out:
885
886	if (!completed_cmds)
887		return IRQ_NONE;
888
889	wmb();
890	writel(ioc->reply_post_host_index, &ioc->chip->ReplyPostHostIndex);
891	return IRQ_HANDLED;
892}
893
894/**
895 * mpt2sas_base_release_callback_handler - clear interupt callback handler
896 * @cb_idx: callback index
897 *
898 * Return nothing.
899 */
900void
901mpt2sas_base_release_callback_handler(u8 cb_idx)
902{
903	mpt_callbacks[cb_idx] = NULL;
904}
905
906/**
907 * mpt2sas_base_register_callback_handler - obtain index for the interrupt callback handler
908 * @cb_func: callback function
909 *
910 * Returns cb_func.
911 */
912u8
913mpt2sas_base_register_callback_handler(MPT_CALLBACK cb_func)
914{
915	u8 cb_idx;
916
917	for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
918		if (mpt_callbacks[cb_idx] == NULL)
919			break;
920
921	mpt_callbacks[cb_idx] = cb_func;
922	return cb_idx;
923}
924
925/**
926 * mpt2sas_base_initialize_callback_handler - initialize the interrupt callback handler
927 *
928 * Return nothing.
929 */
930void
931mpt2sas_base_initialize_callback_handler(void)
932{
933	u8 cb_idx;
934
935	for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
936		mpt2sas_base_release_callback_handler(cb_idx);
937}
938
939/**
940 * mpt2sas_base_build_zero_len_sge - build zero length sg entry
941 * @ioc: per adapter object
942 * @paddr: virtual address for SGE
943 *
944 * Create a zero length scatter gather entry to insure the IOCs hardware has
945 * something to use if the target device goes brain dead and tries
946 * to send data even when none is asked for.
947 *
948 * Return nothing.
949 */
950void
951mpt2sas_base_build_zero_len_sge(struct MPT2SAS_ADAPTER *ioc, void *paddr)
952{
953	u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
954	    MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
955	    MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
956	    MPI2_SGE_FLAGS_SHIFT);
957	ioc->base_add_sg_single(paddr, flags_length, -1);
958}
959
960/**
961 * _base_add_sg_single_32 - Place a simple 32 bit SGE at address pAddr.
962 * @paddr: virtual address for SGE
963 * @flags_length: SGE flags and data transfer length
964 * @dma_addr: Physical address
965 *
966 * Return nothing.
967 */
968static void
969_base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
970{
971	Mpi2SGESimple32_t *sgel = paddr;
972
973	flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
974	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
975	sgel->FlagsLength = cpu_to_le32(flags_length);
976	sgel->Address = cpu_to_le32(dma_addr);
977}
978
979
980/**
981 * _base_add_sg_single_64 - Place a simple 64 bit SGE at address pAddr.
982 * @paddr: virtual address for SGE
983 * @flags_length: SGE flags and data transfer length
984 * @dma_addr: Physical address
985 *
986 * Return nothing.
987 */
988static void
989_base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
990{
991	Mpi2SGESimple64_t *sgel = paddr;
992
993	flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
994	    MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
995	sgel->FlagsLength = cpu_to_le32(flags_length);
996	sgel->Address = cpu_to_le64(dma_addr);
997}
998
999#define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
1000
1001/**
1002 * _base_config_dma_addressing - set dma addressing
1003 * @ioc: per adapter object
1004 * @pdev: PCI device struct
1005 *
1006 * Returns 0 for success, non-zero for failure.
1007 */
1008static int
1009_base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
1010{
1011	struct sysinfo s;
1012	char *desc = NULL;
1013
1014	if (sizeof(dma_addr_t) > 4) {
1015		const uint64_t required_mask =
1016		    dma_get_required_mask(&pdev->dev);
1017		if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev,
1018		    DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev,
1019		    DMA_BIT_MASK(64))) {
1020			ioc->base_add_sg_single = &_base_add_sg_single_64;
1021			ioc->sge_size = sizeof(Mpi2SGESimple64_t);
1022			desc = "64";
1023			goto out;
1024		}
1025	}
1026
1027	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
1028	    && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1029		ioc->base_add_sg_single = &_base_add_sg_single_32;
1030		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
1031		desc = "32";
1032	} else
1033		return -ENODEV;
1034
1035 out:
1036	si_meminfo(&s);
1037	printk(MPT2SAS_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, "
1038	    "total mem (%ld kB)\n", ioc->name, desc, convert_to_kb(s.totalram));
1039
1040	return 0;
1041}
1042
1043/**
1044 * _base_save_msix_table - backup msix vector table
1045 * @ioc: per adapter object
1046 *
1047 * This address an errata where diag reset clears out the table
1048 */
1049static void
1050_base_save_msix_table(struct MPT2SAS_ADAPTER *ioc)
1051{
1052	int i;
1053
1054	if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
1055		return;
1056
1057	for (i = 0; i < ioc->msix_vector_count; i++)
1058		ioc->msix_table_backup[i] = ioc->msix_table[i];
1059}
1060
1061/**
1062 * _base_restore_msix_table - this restores the msix vector table
1063 * @ioc: per adapter object
1064 *
1065 */
1066static void
1067_base_restore_msix_table(struct MPT2SAS_ADAPTER *ioc)
1068{
1069	int i;
1070
1071	if (!ioc->msix_enable || ioc->msix_table_backup == NULL)
1072		return;
1073
1074	for (i = 0; i < ioc->msix_vector_count; i++)
1075		ioc->msix_table[i] = ioc->msix_table_backup[i];
1076}
1077
1078/**
1079 * _base_check_enable_msix - checks MSIX capabable.
1080 * @ioc: per adapter object
1081 *
1082 * Check to see if card is capable of MSIX, and set number
1083 * of avaliable msix vectors
1084 */
1085static int
1086_base_check_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1087{
1088	int base;
1089	u16 message_control;
1090	u32 msix_table_offset;
1091
1092	base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
1093	if (!base) {
1094		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "msix not "
1095		    "supported\n", ioc->name));
1096		return -EINVAL;
1097	}
1098
1099	/* get msix vector count */
1100	pci_read_config_word(ioc->pdev, base + 2, &message_control);
1101	ioc->msix_vector_count = (message_control & 0x3FF) + 1;
1102
1103	/* get msix table  */
1104	pci_read_config_dword(ioc->pdev, base + 4, &msix_table_offset);
1105	msix_table_offset &= 0xFFFFFFF8;
1106	ioc->msix_table = (u32 *)((void *)ioc->chip + msix_table_offset);
1107
1108	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "msix is supported, "
1109	    "vector_count(%d), table_offset(0x%08x), table(%p)\n", ioc->name,
1110	    ioc->msix_vector_count, msix_table_offset, ioc->msix_table));
1111	return 0;
1112}
1113
1114/**
1115 * _base_disable_msix - disables msix
1116 * @ioc: per adapter object
1117 *
1118 */
1119static void
1120_base_disable_msix(struct MPT2SAS_ADAPTER *ioc)
1121{
1122	if (ioc->msix_enable) {
1123		pci_disable_msix(ioc->pdev);
1124		kfree(ioc->msix_table_backup);
1125		ioc->msix_table_backup = NULL;
1126		ioc->msix_enable = 0;
1127	}
1128}
1129
1130/**
1131 * _base_enable_msix - enables msix, failback to io_apic
1132 * @ioc: per adapter object
1133 *
1134 */
1135static int
1136_base_enable_msix(struct MPT2SAS_ADAPTER *ioc)
1137{
1138	struct msix_entry entries;
1139	int r;
1140	u8 try_msix = 0;
1141
1142	if (msix_disable == -1 || msix_disable == 0)
1143		try_msix = 1;
1144
1145	if (!try_msix)
1146		goto try_ioapic;
1147
1148	if (_base_check_enable_msix(ioc) != 0)
1149		goto try_ioapic;
1150
1151	ioc->msix_table_backup = kcalloc(ioc->msix_vector_count,
1152	    sizeof(u32), GFP_KERNEL);
1153	if (!ioc->msix_table_backup) {
1154		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "allocation for "
1155		    "msix_table_backup failed!!!\n", ioc->name));
1156		goto try_ioapic;
1157	}
1158
1159	memset(&entries, 0, sizeof(struct msix_entry));
1160	r = pci_enable_msix(ioc->pdev, &entries, 1);
1161	if (r) {
1162		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix "
1163		    "failed (r=%d) !!!\n", ioc->name, r));
1164		goto try_ioapic;
1165	}
1166
1167	r = request_irq(entries.vector, _base_interrupt, IRQF_SHARED,
1168	    ioc->name, ioc);
1169	if (r) {
1170		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "unable to allocate "
1171		    "interrupt %d !!!\n", ioc->name, entries.vector));
1172		pci_disable_msix(ioc->pdev);
1173		goto try_ioapic;
1174	}
1175
1176	ioc->pci_irq = entries.vector;
1177	ioc->msix_enable = 1;
1178	return 0;
1179
1180/* failback to io_apic interrupt routing */
1181 try_ioapic:
1182
1183	r = request_irq(ioc->pdev->irq, _base_interrupt, IRQF_SHARED,
1184	    ioc->name, ioc);
1185	if (r) {
1186		printk(MPT2SAS_ERR_FMT "unable to allocate interrupt %d!\n",
1187		    ioc->name, ioc->pdev->irq);
1188		r = -EBUSY;
1189		goto out_fail;
1190	}
1191
1192	ioc->pci_irq = ioc->pdev->irq;
1193	return 0;
1194
1195 out_fail:
1196	return r;
1197}
1198
1199/**
1200 * mpt2sas_base_map_resources - map in controller resources (io/irq/memap)
1201 * @ioc: per adapter object
1202 *
1203 * Returns 0 for success, non-zero for failure.
1204 */
1205int
1206mpt2sas_base_map_resources(struct MPT2SAS_ADAPTER *ioc)
1207{
1208	struct pci_dev *pdev = ioc->pdev;
1209	u32 memap_sz;
1210	u32 pio_sz;
1211	int i, r = 0;
1212
1213	dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n",
1214	    ioc->name, __func__));
1215
1216	ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
1217	if (pci_enable_device_mem(pdev)) {
1218		printk(MPT2SAS_WARN_FMT "pci_enable_device_mem: "
1219		    "failed\n", ioc->name);
1220		return -ENODEV;
1221	}
1222
1223
1224	if (pci_request_selected_regions(pdev, ioc->bars,
1225	    MPT2SAS_DRIVER_NAME)) {
1226		printk(MPT2SAS_WARN_FMT "pci_request_selected_regions: "
1227		    "failed\n", ioc->name);
1228		r = -ENODEV;
1229		goto out_fail;
1230	}
1231
1232	pci_set_master(pdev);
1233
1234	if (_base_config_dma_addressing(ioc, pdev) != 0) {
1235		printk(MPT2SAS_WARN_FMT "no suitable DMA mask for %s\n",
1236		    ioc->name, pci_name(pdev));
1237		r = -ENODEV;
1238		goto out_fail;
1239	}
1240
1241	for (i = 0, memap_sz = 0, pio_sz = 0 ; i < DEVICE_COUNT_RESOURCE; i++) {
1242		if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) {
1243			if (pio_sz)
1244				continue;
1245			ioc->pio_chip = pci_resource_start(pdev, i);
1246			pio_sz = pci_resource_len(pdev, i);
1247		} else {
1248			if (memap_sz)
1249				continue;
1250			ioc->chip_phys = pci_resource_start(pdev, i);
1251			memap_sz = pci_resource_len(pdev, i);
1252			ioc->chip = ioremap(ioc->chip_phys, memap_sz);
1253			if (ioc->chip == NULL) {
1254				printk(MPT2SAS_ERR_FMT "unable to map adapter "
1255				    "memory!\n", ioc->name);
1256				r = -EINVAL;
1257				goto out_fail;
1258			}
1259		}
1260	}
1261
1262	_base_mask_interrupts(ioc);
1263	r = _base_enable_msix(ioc);
1264	if (r)
1265		goto out_fail;
1266
1267	printk(MPT2SAS_INFO_FMT "%s: IRQ %d\n",
1268	    ioc->name,  ((ioc->msix_enable) ? "PCI-MSI-X enabled" :
1269	    "IO-APIC enabled"), ioc->pci_irq);
1270	printk(MPT2SAS_INFO_FMT "iomem(0x%lx), mapped(0x%p), size(%d)\n",
1271	    ioc->name, ioc->chip_phys, ioc->chip, memap_sz);
1272	printk(MPT2SAS_INFO_FMT "ioport(0x%lx), size(%d)\n",
1273	    ioc->name, ioc->pio_chip, pio_sz);
1274
1275	return 0;
1276
1277 out_fail:
1278	if (ioc->chip_phys)
1279		iounmap(ioc->chip);
1280	ioc->chip_phys = 0;
1281	ioc->pci_irq = -1;
1282	pci_release_selected_regions(ioc->pdev, ioc->bars);
1283	pci_disable_device(pdev);
1284	return r;
1285}
1286
1287/**
1288 * mpt2sas_base_get_msg_frame - obtain request mf pointer
1289 * @ioc: per adapter object
1290 * @smid: system request message index(smid zero is invalid)
1291 *
1292 * Returns virt pointer to message frame.
1293 */
1294void *
1295mpt2sas_base_get_msg_frame(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1296{
1297	return (void *)(ioc->request + (smid * ioc->request_sz));
1298}
1299
1300/**
1301 * mpt2sas_base_get_sense_buffer - obtain a sense buffer assigned to a mf request
1302 * @ioc: per adapter object
1303 * @smid: system request message index
1304 *
1305 * Returns virt pointer to sense buffer.
1306 */
1307void *
1308mpt2sas_base_get_sense_buffer(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1309{
1310	return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
1311}
1312
1313/**
1314 * mpt2sas_base_get_sense_buffer_dma - obtain a sense buffer assigned to a mf request
1315 * @ioc: per adapter object
1316 * @smid: system request message index
1317 *
1318 * Returns phys pointer to sense buffer.
1319 */
1320dma_addr_t
1321mpt2sas_base_get_sense_buffer_dma(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1322{
1323	return ioc->sense_dma + ((smid - 1) * SCSI_SENSE_BUFFERSIZE);
1324}
1325
1326/**
1327 * mpt2sas_base_get_reply_virt_addr - obtain reply frames virt address
1328 * @ioc: per adapter object
1329 * @phys_addr: lower 32 physical addr of the reply
1330 *
1331 * Converts 32bit lower physical addr into a virt address.
1332 */
1333void *
1334mpt2sas_base_get_reply_virt_addr(struct MPT2SAS_ADAPTER *ioc, u32 phys_addr)
1335{
1336	if (!phys_addr)
1337		return NULL;
1338	return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
1339}
1340
1341/**
1342 * mpt2sas_base_get_smid - obtain a free smid from internal queue
1343 * @ioc: per adapter object
1344 * @cb_idx: callback index
1345 *
1346 * Returns smid (zero is invalid)
1347 */
1348u16
1349mpt2sas_base_get_smid(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1350{
1351	unsigned long flags;
1352	struct request_tracker *request;
1353	u16 smid;
1354
1355	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1356	if (list_empty(&ioc->internal_free_list)) {
1357		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1358		printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1359		    ioc->name, __func__);
1360		return 0;
1361	}
1362
1363	request = list_entry(ioc->internal_free_list.next,
1364	    struct request_tracker, tracker_list);
1365	request->cb_idx = cb_idx;
1366	smid = request->smid;
1367	list_del(&request->tracker_list);
1368	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1369	return smid;
1370}
1371
1372/**
1373 * mpt2sas_base_get_smid_scsiio - obtain a free smid from scsiio queue
1374 * @ioc: per adapter object
1375 * @cb_idx: callback index
1376 * @scmd: pointer to scsi command object
1377 *
1378 * Returns smid (zero is invalid)
1379 */
1380u16
1381mpt2sas_base_get_smid_scsiio(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx,
1382    struct scsi_cmnd *scmd)
1383{
1384	unsigned long flags;
1385	struct request_tracker *request;
1386	u16 smid;
1387
1388	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1389	if (list_empty(&ioc->free_list)) {
1390		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1391		printk(MPT2SAS_ERR_FMT "%s: smid not available\n",
1392		    ioc->name, __func__);
1393		return 0;
1394	}
1395
1396	request = list_entry(ioc->free_list.next,
1397	    struct request_tracker, tracker_list);
1398	request->scmd = scmd;
1399	request->cb_idx = cb_idx;
1400	smid = request->smid;
1401	list_del(&request->tracker_list);
1402	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1403	return smid;
1404}
1405
1406/**
1407 * mpt2sas_base_get_smid_hpr - obtain a free smid from hi-priority queue
1408 * @ioc: per adapter object
1409 * @cb_idx: callback index
1410 *
1411 * Returns smid (zero is invalid)
1412 */
1413u16
1414mpt2sas_base_get_smid_hpr(struct MPT2SAS_ADAPTER *ioc, u8 cb_idx)
1415{
1416	unsigned long flags;
1417	struct request_tracker *request;
1418	u16 smid;
1419
1420	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1421	if (list_empty(&ioc->hpr_free_list)) {
1422		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1423		return 0;
1424	}
1425
1426	request = list_entry(ioc->hpr_free_list.next,
1427	    struct request_tracker, tracker_list);
1428	request->cb_idx = cb_idx;
1429	smid = request->smid;
1430	list_del(&request->tracker_list);
1431	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1432	return smid;
1433}
1434
1435
1436/**
1437 * mpt2sas_base_free_smid - put smid back on free_list
1438 * @ioc: per adapter object
1439 * @smid: system request message index
1440 *
1441 * Return nothing.
1442 */
1443void
1444mpt2sas_base_free_smid(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1445{
1446	unsigned long flags;
1447	int i;
1448
1449	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
1450	if (smid >= ioc->hi_priority_smid) {
1451		if (smid < ioc->internal_smid) {
1452			/* hi-priority */
1453			i = smid - ioc->hi_priority_smid;
1454			ioc->hpr_lookup[i].cb_idx = 0xFF;
1455			list_add_tail(&ioc->hpr_lookup[i].tracker_list,
1456			    &ioc->hpr_free_list);
1457		} else {
1458			/* internal queue */
1459			i = smid - ioc->internal_smid;
1460			ioc->internal_lookup[i].cb_idx = 0xFF;
1461			list_add_tail(&ioc->internal_lookup[i].tracker_list,
1462			    &ioc->internal_free_list);
1463		}
1464		spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1465		return;
1466	}
1467
1468	/* scsiio queue */
1469	i = smid - 1;
1470	ioc->scsi_lookup[i].cb_idx = 0xFF;
1471	ioc->scsi_lookup[i].scmd = NULL;
1472	list_add_tail(&ioc->scsi_lookup[i].tracker_list,
1473	    &ioc->free_list);
1474	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
1475
1476	/*
1477	 * See _wait_for_commands_to_complete() call with regards to this code.
1478	 */
1479	if (ioc->shost_recovery && ioc->pending_io_count) {
1480		if (ioc->pending_io_count == 1)
1481			wake_up(&ioc->reset_wq);
1482		ioc->pending_io_count--;
1483	}
1484}
1485
1486/**
1487 * _base_writeq - 64 bit write to MMIO
1488 * @ioc: per adapter object
1489 * @b: data payload
1490 * @addr: address in MMIO space
1491 * @writeq_lock: spin lock
1492 *
1493 * Glue for handling an atomic 64 bit word to MMIO. This special handling takes
1494 * care of 32 bit environment where its not quarenteed to send the entire word
1495 * in one transfer.
1496 */
1497#ifndef writeq
1498static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1499    spinlock_t *writeq_lock)
1500{
1501	unsigned long flags;
1502	__u64 data_out = cpu_to_le64(b);
1503
1504	spin_lock_irqsave(writeq_lock, flags);
1505	writel((u32)(data_out), addr);
1506	writel((u32)(data_out >> 32), (addr + 4));
1507	spin_unlock_irqrestore(writeq_lock, flags);
1508}
1509#else
1510static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1511    spinlock_t *writeq_lock)
1512{
1513	writeq(cpu_to_le64(b), addr);
1514}
1515#endif
1516
1517/**
1518 * mpt2sas_base_put_smid_scsi_io - send SCSI_IO request to firmware
1519 * @ioc: per adapter object
1520 * @smid: system request message index
1521 * @handle: device handle
1522 *
1523 * Return nothing.
1524 */
1525void
1526mpt2sas_base_put_smid_scsi_io(struct MPT2SAS_ADAPTER *ioc, u16 smid, u16 handle)
1527{
1528	Mpi2RequestDescriptorUnion_t descriptor;
1529	u64 *request = (u64 *)&descriptor;
1530
1531
1532	descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
1533	descriptor.SCSIIO.MSIxIndex = 0; /* TODO */
1534	descriptor.SCSIIO.SMID = cpu_to_le16(smid);
1535	descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
1536	descriptor.SCSIIO.LMID = 0;
1537	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1538	    &ioc->scsi_lookup_lock);
1539}
1540
1541
1542/**
1543 * mpt2sas_base_put_smid_hi_priority - send Task Managment request to firmware
1544 * @ioc: per adapter object
1545 * @smid: system request message index
1546 *
1547 * Return nothing.
1548 */
1549void
1550mpt2sas_base_put_smid_hi_priority(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1551{
1552	Mpi2RequestDescriptorUnion_t descriptor;
1553	u64 *request = (u64 *)&descriptor;
1554
1555	descriptor.HighPriority.RequestFlags =
1556	    MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
1557	descriptor.HighPriority.MSIxIndex = 0; /* TODO */
1558	descriptor.HighPriority.SMID = cpu_to_le16(smid);
1559	descriptor.HighPriority.LMID = 0;
1560	descriptor.HighPriority.Reserved1 = 0;
1561	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1562	    &ioc->scsi_lookup_lock);
1563}
1564
1565/**
1566 * mpt2sas_base_put_smid_default - Default, primarily used for config pages
1567 * @ioc: per adapter object
1568 * @smid: system request message index
1569 *
1570 * Return nothing.
1571 */
1572void
1573mpt2sas_base_put_smid_default(struct MPT2SAS_ADAPTER *ioc, u16 smid)
1574{
1575	Mpi2RequestDescriptorUnion_t descriptor;
1576	u64 *request = (u64 *)&descriptor;
1577
1578	descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
1579	descriptor.Default.MSIxIndex = 0; /* TODO */
1580	descriptor.Default.SMID = cpu_to_le16(smid);
1581	descriptor.Default.LMID = 0;
1582	descriptor.Default.DescriptorTypeDependent = 0;
1583	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1584	    &ioc->scsi_lookup_lock);
1585}
1586
1587/**
1588 * mpt2sas_base_put_smid_target_assist - send Target Assist/Status to firmware
1589 * @ioc: per adapter object
1590 * @smid: system request message index
1591 * @io_index: value used to track the IO
1592 *
1593 * Return nothing.
1594 */
1595void
1596mpt2sas_base_put_smid_target_assist(struct MPT2SAS_ADAPTER *ioc, u16 smid,
1597    u16 io_index)
1598{
1599	Mpi2RequestDescriptorUnion_t descriptor;
1600	u64 *request = (u64 *)&descriptor;
1601
1602	descriptor.SCSITarget.RequestFlags =
1603	    MPI2_REQ_DESCRIPT_FLAGS_SCSI_TARGET;
1604	descriptor.SCSITarget.MSIxIndex = 0; /* TODO */
1605	descriptor.SCSITarget.SMID = cpu_to_le16(smid);
1606	descriptor.SCSITarget.LMID = 0;
1607	descriptor.SCSITarget.IoIndex = cpu_to_le16(io_index);
1608	_base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
1609	    &ioc->scsi_lookup_lock);
1610}
1611
1612/**
1613 * _base_display_dell_branding - Disply branding string
1614 * @ioc: per adapter object
1615 *
1616 * Return nothing.
1617 */
1618static void
1619_base_display_dell_branding(struct MPT2SAS_ADAPTER *ioc)
1620{
1621	char dell_branding[MPT2SAS_DELL_BRANDING_SIZE];
1622
1623	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_DELL)
1624		return;
1625
1626	memset(dell_branding, 0, MPT2SAS_DELL_BRANDING_SIZE);
1627	switch (ioc->pdev->subsystem_device) {
1628	case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
1629		strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING,
1630		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1631		break;
1632	case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
1633		strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING,
1634		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1635		break;
1636	case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
1637		strncpy(dell_branding,
1638		    MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING,
1639		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1640		break;
1641	case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
1642		strncpy(dell_branding,
1643		    MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING,
1644		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1645		break;
1646	case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
1647		strncpy(dell_branding,
1648		    MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING,
1649		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1650		break;
1651	case MPT2SAS_DELL_PERC_H200_SSDID:
1652		strncpy(dell_branding, MPT2SAS_DELL_PERC_H200_BRANDING,
1653		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1654		break;
1655	case MPT2SAS_DELL_6GBPS_SAS_SSDID:
1656		strncpy(dell_branding, MPT2SAS_DELL_6GBPS_SAS_BRANDING,
1657		    MPT2SAS_DELL_BRANDING_SIZE - 1);
1658		break;
1659	default:
1660		sprintf(dell_branding, "0x%4X", ioc->pdev->subsystem_device);
1661		break;
1662	}
1663
1664	printk(MPT2SAS_INFO_FMT "%s: Vendor(0x%04X), Device(0x%04X),"
1665	    " SSVID(0x%04X), SSDID(0x%04X)\n", ioc->name, dell_branding,
1666	    ioc->pdev->vendor, ioc->pdev->device, ioc->pdev->subsystem_vendor,
1667	    ioc->pdev->subsystem_device);
1668}
1669
1670/**
1671 * _base_display_ioc_capabilities - Disply IOC's capabilities.
1672 * @ioc: per adapter object
1673 *
1674 * Return nothing.
1675 */
1676static void
1677_base_display_ioc_capabilities(struct MPT2SAS_ADAPTER *ioc)
1678{
1679	int i = 0;
1680	char desc[16];
1681	u8 revision;
1682	u32 iounit_pg1_flags;
1683
1684	pci_read_config_byte(ioc->pdev, PCI_CLASS_REVISION, &revision);
1685	strncpy(desc, ioc->manu_pg0.ChipName, 16);
1686	printk(MPT2SAS_INFO_FMT "%s: FWVersion(%02d.%02d.%02d.%02d), "
1687	   "ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
1688	    ioc->name, desc,
1689	   (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
1690	   (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
1691	   (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
1692	   ioc->facts.FWVersion.Word & 0x000000FF,
1693	   revision,
1694	   (ioc->bios_pg3.BiosVersion & 0xFF000000) >> 24,
1695	   (ioc->bios_pg3.BiosVersion & 0x00FF0000) >> 16,
1696	   (ioc->bios_pg3.BiosVersion & 0x0000FF00) >> 8,
1697	    ioc->bios_pg3.BiosVersion & 0x000000FF);
1698
1699	_base_display_dell_branding(ioc);
1700
1701	printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
1702
1703	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
1704		printk("Initiator");
1705		i++;
1706	}
1707
1708	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
1709		printk("%sTarget", i ? "," : "");
1710		i++;
1711	}
1712
1713	i = 0;
1714	printk("), ");
1715	printk("Capabilities=(");
1716
1717	if (ioc->facts.IOCCapabilities &
1718	    MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
1719		printk("Raid");
1720		i++;
1721	}
1722
1723	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
1724		printk("%sTLR", i ? "," : "");
1725		i++;
1726	}
1727
1728	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
1729		printk("%sMulticast", i ? "," : "");
1730		i++;
1731	}
1732
1733	if (ioc->facts.IOCCapabilities &
1734	    MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
1735		printk("%sBIDI Target", i ? "," : "");
1736		i++;
1737	}
1738
1739	if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
1740		printk("%sEEDP", i ? "," : "");
1741		i++;
1742	}
1743
1744	if (ioc->facts.IOCCapabilities &
1745	    MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
1746		printk("%sSnapshot Buffer", i ? "," : "");
1747		i++;
1748	}
1749
1750	if (ioc->facts.IOCCapabilities &
1751	    MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
1752		printk("%sDiag Trace Buffer", i ? "," : "");
1753		i++;
1754	}
1755
1756	if (ioc->facts.IOCCapabilities &
1757	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
1758		printk("%sTask Set Full", i ? "," : "");
1759		i++;
1760	}
1761
1762	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
1763	if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
1764		printk("%sNCQ", i ? "," : "");
1765		i++;
1766	}
1767
1768	printk(")\n");
1769}
1770
1771/**
1772 * _base_static_config_pages - static start of day config pages
1773 * @ioc: per adapter object
1774 *
1775 * Return nothing.
1776 */
1777static void
1778_base_static_config_pages(struct MPT2SAS_ADAPTER *ioc)
1779{
1780	Mpi2ConfigReply_t mpi_reply;
1781	u32 iounit_pg1_flags;
1782
1783	mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
1784	if (ioc->ir_firmware)
1785		mpt2sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
1786		    &ioc->manu_pg10);
1787	mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
1788	mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
1789	mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
1790	mpt2sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
1791	mpt2sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
1792	_base_display_ioc_capabilities(ioc);
1793
1794	/*
1795	 * Enable task_set_full handling in iounit_pg1 when the
1796	 * facts capabilities indicate that its supported.
1797	 */
1798	iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
1799	if ((ioc->facts.IOCCapabilities &
1800	    MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
1801		iounit_pg1_flags &=
1802		    ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
1803	else
1804		iounit_pg1_flags |=
1805		    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
1806	ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
1807	mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
1808}
1809
1810/**
1811 * _base_release_memory_pools - release memory
1812 * @ioc: per adapter object
1813 *
1814 * Free memory allocated from _base_allocate_memory_pools.
1815 *
1816 * Return nothing.
1817 */
1818static void
1819_base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
1820{
1821	dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
1822	    __func__));
1823
1824	if (ioc->request) {
1825		pci_free_consistent(ioc->pdev, ioc->request_dma_sz,
1826		    ioc->request,  ioc->request_dma);
1827		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "request_pool(0x%p)"
1828		    ": free\n", ioc->name, ioc->request));
1829		ioc->request = NULL;
1830	}
1831
1832	if (ioc->sense) {
1833		pci_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
1834		if (ioc->sense_dma_pool)
1835			pci_pool_destroy(ioc->sense_dma_pool);
1836		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_pool(0x%p)"
1837		    ": free\n", ioc->name, ioc->sense));
1838		ioc->sense = NULL;
1839	}
1840
1841	if (ioc->reply) {
1842		pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
1843		if (ioc->reply_dma_pool)
1844			pci_pool_destroy(ioc->reply_dma_pool);
1845		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_pool(0x%p)"
1846		     ": free\n", ioc->name, ioc->reply));
1847		ioc->reply = NULL;
1848	}
1849
1850	if (ioc->reply_free) {
1851		pci_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
1852		    ioc->reply_free_dma);
1853		if (ioc->reply_free_dma_pool)
1854			pci_pool_destroy(ioc->reply_free_dma_pool);
1855		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_pool"
1856		    "(0x%p): free\n", ioc->name, ioc->reply_free));
1857		ioc->reply_free = NULL;
1858	}
1859
1860	if (ioc->reply_post_free) {
1861		pci_pool_free(ioc->reply_post_free_dma_pool,
1862		    ioc->reply_post_free, ioc->reply_post_free_dma);
1863		if (ioc->reply_post_free_dma_pool)
1864			pci_pool_destroy(ioc->reply_post_free_dma_pool);
1865		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
1866		    "reply_post_free_pool(0x%p): free\n", ioc->name,
1867		    ioc->reply_post_free));
1868		ioc->reply_post_free = NULL;
1869	}
1870
1871	if (ioc->config_page) {
1872		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
1873		    "config_page(0x%p): free\n", ioc->name,
1874		    ioc->config_page));
1875		pci_free_consistent(ioc->pdev, ioc->config_page_sz,
1876		    ioc->config_page, ioc->config_page_dma);
1877	}
1878
1879	kfree(ioc->scsi_lookup);
1880	kfree(ioc->hpr_lookup);
1881	kfree(ioc->internal_lookup);
1882}
1883
1884
1885/**
1886 * _base_allocate_memory_pools - allocate start of day memory pools
1887 * @ioc: per adapter object
1888 * @sleep_flag: CAN_SLEEP or NO_SLEEP
1889 *
1890 * Returns 0 success, anything else error
1891 */
1892static int
1893_base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
1894{
1895	Mpi2IOCFactsReply_t *facts;
1896	u32 queue_size, queue_diff;
1897	u16 max_sge_elements;
1898	u16 num_of_reply_frames;
1899	u16 chains_needed_per_io;
1900	u32 sz, total_sz;
1901	u32 retry_sz;
1902	u16 max_request_credit;
1903
1904	dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
1905	    __func__));
1906
1907	retry_sz = 0;
1908	facts = &ioc->facts;
1909
1910	/* command line tunables  for max sgl entries */
1911	if (max_sgl_entries != -1) {
1912		ioc->shost->sg_tablesize = (max_sgl_entries <
1913		    MPT2SAS_SG_DEPTH) ? max_sgl_entries :
1914		    MPT2SAS_SG_DEPTH;
1915	} else {
1916		ioc->shost->sg_tablesize = MPT2SAS_SG_DEPTH;
1917	}
1918
1919	/* command line tunables  for max controller queue depth */
1920	if (max_queue_depth != -1) {
1921		max_request_credit = (max_queue_depth < facts->RequestCredit)
1922		    ? max_queue_depth : facts->RequestCredit;
1923	} else {
1924		max_request_credit = (facts->RequestCredit >
1925		    MPT2SAS_MAX_REQUEST_QUEUE) ? MPT2SAS_MAX_REQUEST_QUEUE :
1926		    facts->RequestCredit;
1927	}
1928
1929	ioc->hba_queue_depth = max_request_credit;
1930	ioc->hi_priority_depth = facts->HighPriorityCredit;
1931	ioc->internal_depth = ioc->hi_priority_depth + 5;
1932
1933	/* request frame size */
1934	ioc->request_sz = facts->IOCRequestFrameSize * 4;
1935
1936	/* reply frame size */
1937	ioc->reply_sz = facts->ReplyFrameSize * 4;
1938
1939 retry_allocation:
1940	total_sz = 0;
1941	/* calculate number of sg elements left over in the 1st frame */
1942	max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
1943	    sizeof(Mpi2SGEIOUnion_t)) + ioc->sge_size);
1944	ioc->max_sges_in_main_message = max_sge_elements/ioc->sge_size;
1945
1946	/* now do the same for a chain buffer */
1947	max_sge_elements = ioc->request_sz - ioc->sge_size;
1948	ioc->max_sges_in_chain_message = max_sge_elements/ioc->sge_size;
1949
1950	ioc->chain_offset_value_for_main_message =
1951	    ((sizeof(Mpi2SCSIIORequest_t) - sizeof(Mpi2SGEIOUnion_t)) +
1952	     (ioc->max_sges_in_chain_message * ioc->sge_size)) / 4;
1953
1954	/*
1955	 *  MPT2SAS_SG_DEPTH = CONFIG_FUSION_MAX_SGE
1956	 */
1957	chains_needed_per_io = ((ioc->shost->sg_tablesize -
1958	   ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
1959	    + 1;
1960	if (chains_needed_per_io > facts->MaxChainDepth) {
1961		chains_needed_per_io = facts->MaxChainDepth;
1962		ioc->shost->sg_tablesize = min_t(u16,
1963		ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
1964		* chains_needed_per_io), ioc->shost->sg_tablesize);
1965	}
1966	ioc->chains_needed_per_io = chains_needed_per_io;
1967
1968	/* reply free queue sizing - taking into account for events */
1969	num_of_reply_frames = ioc->hba_queue_depth + 32;
1970
1971	/* number of replies frames can't be a multiple of 16 */
1972	/* decrease number of reply frames by 1 */
1973	if (!(num_of_reply_frames % 16))
1974		num_of_reply_frames--;
1975
1976	/* calculate number of reply free queue entries
1977	 *  (must be multiple of 16)
1978	 */
1979
1980	/* (we know reply_free_queue_depth is not a multiple of 16) */
1981	queue_size = num_of_reply_frames;
1982	queue_size += 16 - (queue_size % 16);
1983	ioc->reply_free_queue_depth = queue_size;
1984
1985	/* reply descriptor post queue sizing */
1986	/* this size should be the number of request frames + number of reply
1987	 * frames
1988	 */
1989
1990	queue_size = ioc->hba_queue_depth + num_of_reply_frames + 1;
1991	/* round up to 16 byte boundary */
1992	if (queue_size % 16)
1993		queue_size += 16 - (queue_size % 16);
1994
1995	/* check against IOC maximum reply post queue depth */
1996	if (queue_size > facts->MaxReplyDescriptorPostQueueDepth) {
1997		queue_diff = queue_size -
1998		    facts->MaxReplyDescriptorPostQueueDepth;
1999
2000		/* round queue_diff up to multiple of 16 */
2001		if (queue_diff % 16)
2002			queue_diff += 16 - (queue_diff % 16);
2003
2004		/* adjust hba_queue_depth, reply_free_queue_depth,
2005		 * and queue_size
2006		 */
2007		ioc->hba_queue_depth -= queue_diff;
2008		ioc->reply_free_queue_depth -= queue_diff;
2009		queue_size -= queue_diff;
2010	}
2011	ioc->reply_post_queue_depth = queue_size;
2012
2013	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scatter gather: "
2014	    "sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), "
2015	    "chains_per_io(%d)\n", ioc->name, ioc->max_sges_in_main_message,
2016	    ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
2017	    ioc->chains_needed_per_io));
2018
2019	ioc->scsiio_depth = ioc->hba_queue_depth -
2020	    ioc->hi_priority_depth - ioc->internal_depth;
2021
2022	/* set the scsi host can_queue depth
2023	 * with some internal commands that could be outstanding
2024	 */
2025	ioc->shost->can_queue = ioc->scsiio_depth - (2);
2026	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: "
2027	    "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue));
2028
2029	/* contiguous pool for request and chains, 16 byte align, one extra "
2030	 * "frame for smid=0
2031	 */
2032	ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
2033	sz = ((ioc->scsiio_depth + 1 + ioc->chain_depth) * ioc->request_sz);
2034
2035	/* hi-priority queue */
2036	sz += (ioc->hi_priority_depth * ioc->request_sz);
2037
2038	/* internal queue */
2039	sz += (ioc->internal_depth * ioc->request_sz);
2040
2041	ioc->request_dma_sz = sz;
2042	ioc->request = pci_alloc_consistent(ioc->pdev, sz, &ioc->request_dma);
2043	if (!ioc->request) {
2044		printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
2045		    "failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2046		    "total(%d kB)\n", ioc->name, ioc->hba_queue_depth,
2047		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2048		if (ioc->scsiio_depth < MPT2SAS_SAS_QUEUE_DEPTH)
2049			goto out;
2050		retry_sz += 64;
2051		ioc->hba_queue_depth = max_request_credit - retry_sz;
2052		goto retry_allocation;
2053	}
2054
2055	if (retry_sz)
2056		printk(MPT2SAS_ERR_FMT "request pool: pci_alloc_consistent "
2057		    "succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), "
2058		    "total(%d kb)\n", ioc->name, ioc->hba_queue_depth,
2059		    ioc->chains_needed_per_io, ioc->request_sz, sz/1024);
2060
2061
2062	/* hi-priority queue */
2063	ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
2064	    ioc->request_sz);
2065	ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
2066	    ioc->request_sz);
2067
2068	/* internal queue */
2069	ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
2070	    ioc->request_sz);
2071	ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
2072	    ioc->request_sz);
2073
2074	ioc->chain = ioc->internal + (ioc->internal_depth *
2075	    ioc->request_sz);
2076	ioc->chain_dma = ioc->internal_dma + (ioc->internal_depth *
2077	    ioc->request_sz);
2078
2079	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool(0x%p): "
2080	    "depth(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name,
2081	    ioc->request, ioc->hba_queue_depth, ioc->request_sz,
2082	    (ioc->hba_queue_depth * ioc->request_sz)/1024));
2083	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "chain pool(0x%p): depth"
2084	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->chain,
2085	    ioc->chain_depth, ioc->request_sz, ((ioc->chain_depth *
2086	    ioc->request_sz))/1024));
2087	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request pool: dma(0x%llx)\n",
2088	    ioc->name, (unsigned long long) ioc->request_dma));
2089	total_sz += sz;
2090
2091	ioc->scsi_lookup = kcalloc(ioc->scsiio_depth,
2092	    sizeof(struct request_tracker), GFP_KERNEL);
2093	if (!ioc->scsi_lookup) {
2094		printk(MPT2SAS_ERR_FMT "scsi_lookup: kcalloc failed\n",
2095		    ioc->name);
2096		goto out;
2097	}
2098
2099	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsiio(0x%p): "
2100	    "depth(%d)\n", ioc->name, ioc->request,
2101	    ioc->scsiio_depth));
2102
2103	/* initialize hi-priority queue smid's */
2104	ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
2105	    sizeof(struct request_tracker), GFP_KERNEL);
2106	if (!ioc->hpr_lookup) {
2107		printk(MPT2SAS_ERR_FMT "hpr_lookup: kcalloc failed\n",
2108		    ioc->name);
2109		goto out;
2110	}
2111	ioc->hi_priority_smid = ioc->scsiio_depth + 1;
2112	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hi_priority(0x%p): "
2113	    "depth(%d), start smid(%d)\n", ioc->name, ioc->hi_priority,
2114	    ioc->hi_priority_depth, ioc->hi_priority_smid));
2115
2116	/* initialize internal queue smid's */
2117	ioc->internal_lookup = kcalloc(ioc->internal_depth,
2118	    sizeof(struct request_tracker), GFP_KERNEL);
2119	if (!ioc->internal_lookup) {
2120		printk(MPT2SAS_ERR_FMT "internal_lookup: kcalloc failed\n",
2121		    ioc->name);
2122		goto out;
2123	}
2124	ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
2125	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "internal(0x%p): "
2126	    "depth(%d), start smid(%d)\n", ioc->name, ioc->internal,
2127	     ioc->internal_depth, ioc->internal_smid));
2128
2129	/* sense buffers, 4 byte align */
2130	sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
2131	ioc->sense_dma_pool = pci_pool_create("sense pool", ioc->pdev, sz, 4,
2132	    0);
2133	if (!ioc->sense_dma_pool) {
2134		printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_create failed\n",
2135		    ioc->name);
2136		goto out;
2137	}
2138	ioc->sense = pci_pool_alloc(ioc->sense_dma_pool , GFP_KERNEL,
2139	    &ioc->sense_dma);
2140	if (!ioc->sense) {
2141		printk(MPT2SAS_ERR_FMT "sense pool: pci_pool_alloc failed\n",
2142		    ioc->name);
2143		goto out;
2144	}
2145	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
2146	    "sense pool(0x%p): depth(%d), element_size(%d), pool_size"
2147	    "(%d kB)\n", ioc->name, ioc->sense, ioc->scsiio_depth,
2148	    SCSI_SENSE_BUFFERSIZE, sz/1024));
2149	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "sense_dma(0x%llx)\n",
2150	    ioc->name, (unsigned long long)ioc->sense_dma));
2151	total_sz += sz;
2152
2153	/* reply pool, 4 byte align */
2154	sz = ioc->reply_free_queue_depth * ioc->reply_sz;
2155	ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4,
2156	    0);
2157	if (!ioc->reply_dma_pool) {
2158		printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_create failed\n",
2159		    ioc->name);
2160		goto out;
2161	}
2162	ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL,
2163	    &ioc->reply_dma);
2164	if (!ioc->reply) {
2165		printk(MPT2SAS_ERR_FMT "reply pool: pci_pool_alloc failed\n",
2166		    ioc->name);
2167		goto out;
2168	}
2169	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply pool(0x%p): depth"
2170	    "(%d), frame_size(%d), pool_size(%d kB)\n", ioc->name, ioc->reply,
2171	    ioc->reply_free_queue_depth, ioc->reply_sz, sz/1024));
2172	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_dma(0x%llx)\n",
2173	    ioc->name, (unsigned long long)ioc->reply_dma));
2174	total_sz += sz;
2175
2176	/* reply free queue, 16 byte align */
2177	sz = ioc->reply_free_queue_depth * 4;
2178	ioc->reply_free_dma_pool = pci_pool_create("reply_free pool",
2179	    ioc->pdev, sz, 16, 0);
2180	if (!ioc->reply_free_dma_pool) {
2181		printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_create "
2182		    "failed\n", ioc->name);
2183		goto out;
2184	}
2185	ioc->reply_free = pci_pool_alloc(ioc->reply_free_dma_pool , GFP_KERNEL,
2186	    &ioc->reply_free_dma);
2187	if (!ioc->reply_free) {
2188		printk(MPT2SAS_ERR_FMT "reply_free pool: pci_pool_alloc "
2189		    "failed\n", ioc->name);
2190		goto out;
2191	}
2192	memset(ioc->reply_free, 0, sz);
2193	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free pool(0x%p): "
2194	    "depth(%d), element_size(%d), pool_size(%d kB)\n", ioc->name,
2195	    ioc->reply_free, ioc->reply_free_queue_depth, 4, sz/1024));
2196	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_free_dma"
2197	    "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma));
2198	total_sz += sz;
2199
2200	/* reply post queue, 16 byte align */
2201	sz = ioc->reply_post_queue_depth * sizeof(Mpi2DefaultReplyDescriptor_t);
2202	ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
2203	    ioc->pdev, sz, 16, 0);
2204	if (!ioc->reply_post_free_dma_pool) {
2205		printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_create "
2206		    "failed\n", ioc->name);
2207		goto out;
2208	}
2209	ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
2210	    GFP_KERNEL, &ioc->reply_post_free_dma);
2211	if (!ioc->reply_post_free) {
2212		printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_alloc "
2213		    "failed\n", ioc->name);
2214		goto out;
2215	}
2216	memset(ioc->reply_post_free, 0, sz);
2217	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply post free pool"
2218	    "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
2219	    ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
2220	    sz/1024));
2221	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_post_free_dma = "
2222	    "(0x%llx)\n", ioc->name, (unsigned long long)
2223	    ioc->reply_post_free_dma));
2224	total_sz += sz;
2225
2226	ioc->config_page_sz = 512;
2227	ioc->config_page = pci_alloc_consistent(ioc->pdev,
2228	    ioc->config_page_sz, &ioc->config_page_dma);
2229	if (!ioc->config_page) {
2230		printk(MPT2SAS_ERR_FMT "config page: pci_pool_alloc "
2231		    "failed\n", ioc->name);
2232		goto out;
2233	}
2234	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config page(0x%p): size"
2235	    "(%d)\n", ioc->name, ioc->config_page, ioc->config_page_sz));
2236	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "config_page_dma"
2237	    "(0x%llx)\n", ioc->name, (unsigned long long)ioc->config_page_dma));
2238	total_sz += ioc->config_page_sz;
2239
2240	printk(MPT2SAS_INFO_FMT "Allocated physical memory: size(%d kB)\n",
2241	    ioc->name, total_sz/1024);
2242	printk(MPT2SAS_INFO_FMT "Current Controller Queue Depth(%d), "
2243	    "Max Controller Queue Depth(%d)\n",
2244	    ioc->name, ioc->shost->can_queue, facts->RequestCredit);
2245	printk(MPT2SAS_INFO_FMT "Scatter Gather Elements per IO(%d)\n",
2246	    ioc->name, ioc->shost->sg_tablesize);
2247	return 0;
2248
2249 out:
2250	_base_release_memory_pools(ioc);
2251	return -ENOMEM;
2252}
2253
2254
2255/**
2256 * mpt2sas_base_get_iocstate - Get the current state of a MPT adapter.
2257 * @ioc: Pointer to MPT_ADAPTER structure
2258 * @cooked: Request raw or cooked IOC state
2259 *
2260 * Returns all IOC Doorbell register bits if cooked==0, else just the
2261 * Doorbell bits in MPI_IOC_STATE_MASK.
2262 */
2263u32
2264mpt2sas_base_get_iocstate(struct MPT2SAS_ADAPTER *ioc, int cooked)
2265{
2266	u32 s, sc;
2267
2268	s = readl(&ioc->chip->Doorbell);
2269	sc = s & MPI2_IOC_STATE_MASK;
2270	return cooked ? sc : s;
2271}
2272
2273/**
2274 * _base_wait_on_iocstate - waiting on a particular ioc state
2275 * @ioc_state: controller state { READY, OPERATIONAL, or RESET }
2276 * @timeout: timeout in second
2277 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2278 *
2279 * Returns 0 for success, non-zero for failure.
2280 */
2281static int
2282_base_wait_on_iocstate(struct MPT2SAS_ADAPTER *ioc, u32 ioc_state, int timeout,
2283    int sleep_flag)
2284{
2285	u32 count, cntdn;
2286	u32 current_state;
2287
2288	count = 0;
2289	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2290	do {
2291		current_state = mpt2sas_base_get_iocstate(ioc, 1);
2292		if (current_state == ioc_state)
2293			return 0;
2294		if (count && current_state == MPI2_IOC_STATE_FAULT)
2295			break;
2296		if (sleep_flag == CAN_SLEEP)
2297			msleep(1);
2298		else
2299			udelay(500);
2300		count++;
2301	} while (--cntdn);
2302
2303	return current_state;
2304}
2305
2306/**
2307 * _base_wait_for_doorbell_int - waiting for controller interrupt(generated by
2308 * a write to the doorbell)
2309 * @ioc: per adapter object
2310 * @timeout: timeout in second
2311 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2312 *
2313 * Returns 0 for success, non-zero for failure.
2314 *
2315 * Notes: MPI2_HIS_IOC2SYS_DB_STATUS - set to one when IOC writes to doorbell.
2316 */
2317static int
2318_base_wait_for_doorbell_int(struct MPT2SAS_ADAPTER *ioc, int timeout,
2319    int sleep_flag)
2320{
2321	u32 cntdn, count;
2322	u32 int_status;
2323
2324	count = 0;
2325	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2326	do {
2327		int_status = readl(&ioc->chip->HostInterruptStatus);
2328		if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
2329			dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
2330			    "successfull count(%d), timeout(%d)\n", ioc->name,
2331			    __func__, count, timeout));
2332			return 0;
2333		}
2334		if (sleep_flag == CAN_SLEEP)
2335			msleep(1);
2336		else
2337			udelay(500);
2338		count++;
2339	} while (--cntdn);
2340
2341	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2342	    "int_status(%x)!\n", ioc->name, __func__, count, int_status);
2343	return -EFAULT;
2344}
2345
2346/**
2347 * _base_wait_for_doorbell_ack - waiting for controller to read the doorbell.
2348 * @ioc: per adapter object
2349 * @timeout: timeout in second
2350 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2351 *
2352 * Returns 0 for success, non-zero for failure.
2353 *
2354 * Notes: MPI2_HIS_SYS2IOC_DB_STATUS - set to one when host writes to
2355 * doorbell.
2356 */
2357static int
2358_base_wait_for_doorbell_ack(struct MPT2SAS_ADAPTER *ioc, int timeout,
2359    int sleep_flag)
2360{
2361	u32 cntdn, count;
2362	u32 int_status;
2363	u32 doorbell;
2364
2365	count = 0;
2366	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2367	do {
2368		int_status = readl(&ioc->chip->HostInterruptStatus);
2369		if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
2370			dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
2371			    "successfull count(%d), timeout(%d)\n", ioc->name,
2372			    __func__, count, timeout));
2373			return 0;
2374		} else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
2375			doorbell = readl(&ioc->chip->Doorbell);
2376			if ((doorbell & MPI2_IOC_STATE_MASK) ==
2377			    MPI2_IOC_STATE_FAULT) {
2378				mpt2sas_base_fault_info(ioc , doorbell);
2379				return -EFAULT;
2380			}
2381		} else if (int_status == 0xFFFFFFFF)
2382			goto out;
2383
2384		if (sleep_flag == CAN_SLEEP)
2385			msleep(1);
2386		else
2387			udelay(500);
2388		count++;
2389	} while (--cntdn);
2390
2391 out:
2392	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2393	    "int_status(%x)!\n", ioc->name, __func__, count, int_status);
2394	return -EFAULT;
2395}
2396
2397/**
2398 * _base_wait_for_doorbell_not_used - waiting for doorbell to not be in use
2399 * @ioc: per adapter object
2400 * @timeout: timeout in second
2401 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2402 *
2403 * Returns 0 for success, non-zero for failure.
2404 *
2405 */
2406static int
2407_base_wait_for_doorbell_not_used(struct MPT2SAS_ADAPTER *ioc, int timeout,
2408    int sleep_flag)
2409{
2410	u32 cntdn, count;
2411	u32 doorbell_reg;
2412
2413	count = 0;
2414	cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout;
2415	do {
2416		doorbell_reg = readl(&ioc->chip->Doorbell);
2417		if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
2418			dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
2419			    "successfull count(%d), timeout(%d)\n", ioc->name,
2420			    __func__, count, timeout));
2421			return 0;
2422		}
2423		if (sleep_flag == CAN_SLEEP)
2424			msleep(1);
2425		else
2426			udelay(500);
2427		count++;
2428	} while (--cntdn);
2429
2430	printk(MPT2SAS_ERR_FMT "%s: failed due to timeout count(%d), "
2431	    "doorbell_reg(%x)!\n", ioc->name, __func__, count, doorbell_reg);
2432	return -EFAULT;
2433}
2434
2435/**
2436 * _base_send_ioc_reset - send doorbell reset
2437 * @ioc: per adapter object
2438 * @reset_type: currently only supports: MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET
2439 * @timeout: timeout in second
2440 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2441 *
2442 * Returns 0 for success, non-zero for failure.
2443 */
2444static int
2445_base_send_ioc_reset(struct MPT2SAS_ADAPTER *ioc, u8 reset_type, int timeout,
2446    int sleep_flag)
2447{
2448	u32 ioc_state;
2449	int r = 0;
2450
2451	if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
2452		printk(MPT2SAS_ERR_FMT "%s: unknown reset_type\n",
2453		    ioc->name, __func__);
2454		return -EFAULT;
2455	}
2456
2457	if (!(ioc->facts.IOCCapabilities &
2458	   MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
2459		return -EFAULT;
2460
2461	printk(MPT2SAS_INFO_FMT "sending message unit reset !!\n", ioc->name);
2462
2463	writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
2464	    &ioc->chip->Doorbell);
2465	if ((_base_wait_for_doorbell_ack(ioc, 15, sleep_flag))) {
2466		r = -EFAULT;
2467		goto out;
2468	}
2469	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
2470	    timeout, sleep_flag);
2471	if (ioc_state) {
2472		printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
2473		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
2474		r = -EFAULT;
2475		goto out;
2476	}
2477 out:
2478	printk(MPT2SAS_INFO_FMT "message unit reset: %s\n",
2479	    ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
2480	return r;
2481}
2482
2483/**
2484 * _base_handshake_req_reply_wait - send request thru doorbell interface
2485 * @ioc: per adapter object
2486 * @request_bytes: request length
2487 * @request: pointer having request payload
2488 * @reply_bytes: reply length
2489 * @reply: pointer to reply payload
2490 * @timeout: timeout in second
2491 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2492 *
2493 * Returns 0 for success, non-zero for failure.
2494 */
2495static int
2496_base_handshake_req_reply_wait(struct MPT2SAS_ADAPTER *ioc, int request_bytes,
2497    u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag)
2498{
2499	MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
2500	int i;
2501	u8 failed;
2502	u16 dummy;
2503	u32 *mfp;
2504
2505	/* make sure doorbell is not in use */
2506	if ((readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
2507		printk(MPT2SAS_ERR_FMT "doorbell is in use "
2508		    " (line=%d)\n", ioc->name, __LINE__);
2509		return -EFAULT;
2510	}
2511
2512	/* clear pending doorbell interrupts from previous state changes */
2513	if (readl(&ioc->chip->HostInterruptStatus) &
2514	    MPI2_HIS_IOC2SYS_DB_STATUS)
2515		writel(0, &ioc->chip->HostInterruptStatus);
2516
2517	/* send message to ioc */
2518	writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
2519	    ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
2520	    &ioc->chip->Doorbell);
2521
2522	if ((_base_wait_for_doorbell_int(ioc, 5, NO_SLEEP))) {
2523		printk(MPT2SAS_ERR_FMT "doorbell handshake "
2524		   "int failed (line=%d)\n", ioc->name, __LINE__);
2525		return -EFAULT;
2526	}
2527	writel(0, &ioc->chip->HostInterruptStatus);
2528
2529	if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag))) {
2530		printk(MPT2SAS_ERR_FMT "doorbell handshake "
2531		    "ack failed (line=%d)\n", ioc->name, __LINE__);
2532		return -EFAULT;
2533	}
2534
2535	/* send message 32-bits at a time */
2536	for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
2537		writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
2538		if ((_base_wait_for_doorbell_ack(ioc, 5, sleep_flag)))
2539			failed = 1;
2540	}
2541
2542	if (failed) {
2543		printk(MPT2SAS_ERR_FMT "doorbell handshake "
2544		    "sending request failed (line=%d)\n", ioc->name, __LINE__);
2545		return -EFAULT;
2546	}
2547
2548	/* now wait for the reply */
2549	if ((_base_wait_for_doorbell_int(ioc, timeout, sleep_flag))) {
2550		printk(MPT2SAS_ERR_FMT "doorbell handshake "
2551		   "int failed (line=%d)\n", ioc->name, __LINE__);
2552		return -EFAULT;
2553	}
2554
2555	/* read the first two 16-bits, it gives the total length of the reply */
2556	reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell)
2557	    & MPI2_DOORBELL_DATA_MASK);
2558	writel(0, &ioc->chip->HostInterruptStatus);
2559	if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
2560		printk(MPT2SAS_ERR_FMT "doorbell handshake "
2561		   "int failed (line=%d)\n", ioc->name, __LINE__);
2562		return -EFAULT;
2563	}
2564	reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell)
2565	    & MPI2_DOORBELL_DATA_MASK);
2566	writel(0, &ioc->chip->HostInterruptStatus);
2567
2568	for (i = 2; i < default_reply->MsgLength * 2; i++)  {
2569		if ((_base_wait_for_doorbell_int(ioc, 5, sleep_flag))) {
2570			printk(MPT2SAS_ERR_FMT "doorbell "
2571			    "handshake int failed (line=%d)\n", ioc->name,
2572			    __LINE__);
2573			return -EFAULT;
2574		}
2575		if (i >=  reply_bytes/2) /* overflow case */
2576			dummy = readl(&ioc->chip->Doorbell);
2577		else
2578			reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell)
2579			    & MPI2_DOORBELL_DATA_MASK);
2580		writel(0, &ioc->chip->HostInterruptStatus);
2581	}
2582
2583	_base_wait_for_doorbell_int(ioc, 5, sleep_flag);
2584	if (_base_wait_for_doorbell_not_used(ioc, 5, sleep_flag) != 0) {
2585		dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "doorbell is in use "
2586		    " (line=%d)\n", ioc->name, __LINE__));
2587	}
2588	writel(0, &ioc->chip->HostInterruptStatus);
2589
2590	if (ioc->logging_level & MPT_DEBUG_INIT) {
2591		mfp = (u32 *)reply;
2592		printk(KERN_DEBUG "\toffset:data\n");
2593		for (i = 0; i < reply_bytes/4; i++)
2594			printk(KERN_DEBUG "\t[0x%02x]:%08x\n", i*4,
2595			    le32_to_cpu(mfp[i]));
2596	}
2597	return 0;
2598}
2599
2600/**
2601 * mpt2sas_base_sas_iounit_control - send sas iounit control to FW
2602 * @ioc: per adapter object
2603 * @mpi_reply: the reply payload from FW
2604 * @mpi_request: the request payload sent to FW
2605 *
2606 * The SAS IO Unit Control Request message allows the host to perform low-level
2607 * operations, such as resets on the PHYs of the IO Unit, also allows the host
2608 * to obtain the IOC assigned device handles for a device if it has other
2609 * identifying information about the device, in addition allows the host to
2610 * remove IOC resources associated with the device.
2611 *
2612 * Returns 0 for success, non-zero for failure.
2613 */
2614int
2615mpt2sas_base_sas_iounit_control(struct MPT2SAS_ADAPTER *ioc,
2616    Mpi2SasIoUnitControlReply_t *mpi_reply,
2617    Mpi2SasIoUnitControlRequest_t *mpi_request)
2618{
2619	u16 smid;
2620	u32 ioc_state;
2621	unsigned long timeleft;
2622	u8 issue_reset;
2623	int rc;
2624	void *request;
2625	u16 wait_state_count;
2626
2627	dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
2628	    __func__));
2629
2630	mutex_lock(&ioc->base_cmds.mutex);
2631
2632	if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
2633		printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
2634		    ioc->name, __func__);
2635		rc = -EAGAIN;
2636		goto out;
2637	}
2638
2639	wait_state_count = 0;
2640	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
2641	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
2642		if (wait_state_count++ == 10) {
2643			printk(MPT2SAS_ERR_FMT
2644			    "%s: failed due to ioc not operational\n",
2645			    ioc->name, __func__);
2646			rc = -EFAULT;
2647			goto out;
2648		}
2649		ssleep(1);
2650		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
2651		printk(MPT2SAS_INFO_FMT "%s: waiting for "
2652		    "operational state(count=%d)\n", ioc->name,
2653		    __func__, wait_state_count);
2654	}
2655
2656	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
2657	if (!smid) {
2658		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
2659		    ioc->name, __func__);
2660		rc = -EAGAIN;
2661		goto out;
2662	}
2663
2664	rc = 0;
2665	ioc->base_cmds.status = MPT2_CMD_PENDING;
2666	request = mpt2sas_base_get_msg_frame(ioc, smid);
2667	ioc->base_cmds.smid = smid;
2668	memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
2669	if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
2670	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
2671		ioc->ioc_link_reset_in_progress = 1;
2672	mpt2sas_base_put_smid_default(ioc, smid);
2673	init_completion(&ioc->base_cmds.done);
2674	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
2675	    msecs_to_jiffies(10000));
2676	if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
2677	    mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
2678	    ioc->ioc_link_reset_in_progress)
2679		ioc->ioc_link_reset_in_progress = 0;
2680	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
2681		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
2682		    ioc->name, __func__);
2683		_debug_dump_mf(mpi_request,
2684		    sizeof(Mpi2SasIoUnitControlRequest_t)/4);
2685		if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
2686			issue_reset = 1;
2687		goto issue_host_reset;
2688	}
2689	if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
2690		memcpy(mpi_reply, ioc->base_cmds.reply,
2691		    sizeof(Mpi2SasIoUnitControlReply_t));
2692	else
2693		memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
2694	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
2695	goto out;
2696
2697 issue_host_reset:
2698	if (issue_reset)
2699		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
2700		    FORCE_BIG_HAMMER);
2701	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
2702	rc = -EFAULT;
2703 out:
2704	mutex_unlock(&ioc->base_cmds.mutex);
2705	return rc;
2706}
2707
2708
2709/**
2710 * mpt2sas_base_scsi_enclosure_processor - sending request to sep device
2711 * @ioc: per adapter object
2712 * @mpi_reply: the reply payload from FW
2713 * @mpi_request: the request payload sent to FW
2714 *
2715 * The SCSI Enclosure Processor request message causes the IOC to
2716 * communicate with SES devices to control LED status signals.
2717 *
2718 * Returns 0 for success, non-zero for failure.
2719 */
2720int
2721mpt2sas_base_scsi_enclosure_processor(struct MPT2SAS_ADAPTER *ioc,
2722    Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
2723{
2724	u16 smid;
2725	u32 ioc_state;
2726	unsigned long timeleft;
2727	u8 issue_reset;
2728	int rc;
2729	void *request;
2730	u16 wait_state_count;
2731
2732	dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
2733	    __func__));
2734
2735	mutex_lock(&ioc->base_cmds.mutex);
2736
2737	if (ioc->base_cmds.status != MPT2_CMD_NOT_USED) {
2738		printk(MPT2SAS_ERR_FMT "%s: base_cmd in use\n",
2739		    ioc->name, __func__);
2740		rc = -EAGAIN;
2741		goto out;
2742	}
2743
2744	wait_state_count = 0;
2745	ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
2746	while (ioc_state != MPI2_IOC_STATE_OPERATIONAL) {
2747		if (wait_state_count++ == 10) {
2748			printk(MPT2SAS_ERR_FMT
2749			    "%s: failed due to ioc not operational\n",
2750			    ioc->name, __func__);
2751			rc = -EFAULT;
2752			goto out;
2753		}
2754		ssleep(1);
2755		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
2756		printk(MPT2SAS_INFO_FMT "%s: waiting for "
2757		    "operational state(count=%d)\n", ioc->name,
2758		    __func__, wait_state_count);
2759	}
2760
2761	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
2762	if (!smid) {
2763		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
2764		    ioc->name, __func__);
2765		rc = -EAGAIN;
2766		goto out;
2767	}
2768
2769	rc = 0;
2770	ioc->base_cmds.status = MPT2_CMD_PENDING;
2771	request = mpt2sas_base_get_msg_frame(ioc, smid);
2772	ioc->base_cmds.smid = smid;
2773	memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
2774	mpt2sas_base_put_smid_default(ioc, smid);
2775	init_completion(&ioc->base_cmds.done);
2776	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
2777	    msecs_to_jiffies(10000));
2778	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
2779		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
2780		    ioc->name, __func__);
2781		_debug_dump_mf(mpi_request,
2782		    sizeof(Mpi2SepRequest_t)/4);
2783		if (!(ioc->base_cmds.status & MPT2_CMD_RESET))
2784			issue_reset = 1;
2785		goto issue_host_reset;
2786	}
2787	if (ioc->base_cmds.status & MPT2_CMD_REPLY_VALID)
2788		memcpy(mpi_reply, ioc->base_cmds.reply,
2789		    sizeof(Mpi2SepReply_t));
2790	else
2791		memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
2792	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
2793	goto out;
2794
2795 issue_host_reset:
2796	if (issue_reset)
2797		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
2798		    FORCE_BIG_HAMMER);
2799	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
2800	rc = -EFAULT;
2801 out:
2802	mutex_unlock(&ioc->base_cmds.mutex);
2803	return rc;
2804}
2805
2806/**
2807 * _base_get_port_facts - obtain port facts reply and save in ioc
2808 * @ioc: per adapter object
2809 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2810 *
2811 * Returns 0 for success, non-zero for failure.
2812 */
2813static int
2814_base_get_port_facts(struct MPT2SAS_ADAPTER *ioc, int port, int sleep_flag)
2815{
2816	Mpi2PortFactsRequest_t mpi_request;
2817	Mpi2PortFactsReply_t mpi_reply, *pfacts;
2818	int mpi_reply_sz, mpi_request_sz, r;
2819
2820	dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
2821	    __func__));
2822
2823	mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
2824	mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
2825	memset(&mpi_request, 0, mpi_request_sz);
2826	mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
2827	mpi_request.PortNumber = port;
2828	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
2829	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
2830
2831	if (r != 0) {
2832		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
2833		    ioc->name, __func__, r);
2834		return r;
2835	}
2836
2837	pfacts = &ioc->pfacts[port];
2838	memset(pfacts, 0, sizeof(Mpi2PortFactsReply_t));
2839	pfacts->PortNumber = mpi_reply.PortNumber;
2840	pfacts->VP_ID = mpi_reply.VP_ID;
2841	pfacts->VF_ID = mpi_reply.VF_ID;
2842	pfacts->MaxPostedCmdBuffers =
2843	    le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
2844
2845	return 0;
2846}
2847
2848/**
2849 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
2850 * @ioc: per adapter object
2851 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2852 *
2853 * Returns 0 for success, non-zero for failure.
2854 */
2855static int
2856_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2857{
2858	Mpi2IOCFactsRequest_t mpi_request;
2859	Mpi2IOCFactsReply_t mpi_reply, *facts;
2860	int mpi_reply_sz, mpi_request_sz, r;
2861
2862	dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
2863	    __func__));
2864
2865	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
2866	mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
2867	memset(&mpi_request, 0, mpi_request_sz);
2868	mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
2869	r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
2870	    (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5, CAN_SLEEP);
2871
2872	if (r != 0) {
2873		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
2874		    ioc->name, __func__, r);
2875		return r;
2876	}
2877
2878	facts = &ioc->facts;
2879	memset(facts, 0, sizeof(Mpi2IOCFactsReply_t));
2880	facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
2881	facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
2882	facts->VP_ID = mpi_reply.VP_ID;
2883	facts->VF_ID = mpi_reply.VF_ID;
2884	facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
2885	facts->MaxChainDepth = mpi_reply.MaxChainDepth;
2886	facts->WhoInit = mpi_reply.WhoInit;
2887	facts->NumberOfPorts = mpi_reply.NumberOfPorts;
2888	facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
2889	facts->MaxReplyDescriptorPostQueueDepth =
2890	    le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
2891	facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
2892	facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
2893	if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
2894		ioc->ir_firmware = 1;
2895	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
2896	facts->IOCRequestFrameSize =
2897	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
2898	facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
2899	facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
2900	ioc->shost->max_id = -1;
2901	facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
2902	facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
2903	facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
2904	facts->HighPriorityCredit =
2905	    le16_to_cpu(mpi_reply.HighPriorityCredit);
2906	facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
2907	facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
2908
2909	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "hba queue depth(%d), "
2910	    "max chains per io(%d)\n", ioc->name, facts->RequestCredit,
2911	    facts->MaxChainDepth));
2912	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "request frame size(%d), "
2913	    "reply frame size(%d)\n", ioc->name,
2914	    facts->IOCRequestFrameSize * 4, facts->ReplyFrameSize * 4));
2915	return 0;
2916}
2917
2918/**
2919 * _base_send_ioc_init - send ioc_init to firmware
2920 * @ioc: per adapter object
2921 * @sleep_flag: CAN_SLEEP or NO_SLEEP
2922 *
2923 * Returns 0 for success, non-zero for failure.
2924 */
2925static int
2926_base_send_ioc_init(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
2927{
2928	Mpi2IOCInitRequest_t mpi_request;
2929	Mpi2IOCInitReply_t mpi_reply;
2930	int r;
2931
2932	dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
2933	    __func__));
2934
2935	memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
2936	mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
2937	mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
2938	mpi_request.VF_ID = 0; /* TODO */
2939	mpi_request.VP_ID = 0;
2940	mpi_request.MsgVersion = cpu_to_le16(MPI2_VERSION);
2941	mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
2942
2943	/* In MPI Revision I (0xA), the SystemReplyFrameSize(offset 0x18) was
2944	 * removed and made reserved.  For those with older firmware will need
2945	 * this fix. It was decided that the Reply and Request frame sizes are
2946	 * the same.
2947	 */
2948	if ((ioc->facts.HeaderVersion >> 8) < 0xA) {
2949		mpi_request.Reserved7 = cpu_to_le16(ioc->reply_sz);
2950/*		mpi_request.SystemReplyFrameSize =
2951 *		 cpu_to_le16(ioc->reply_sz);
2952 */
2953	}
2954
2955	mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
2956	mpi_request.ReplyDescriptorPostQueueDepth =
2957	    cpu_to_le16(ioc->reply_post_queue_depth);
2958	mpi_request.ReplyFreeQueueDepth =
2959	    cpu_to_le16(ioc->reply_free_queue_depth);
2960
2961#if BITS_PER_LONG > 32
2962	mpi_request.SenseBufferAddressHigh =
2963	    cpu_to_le32(ioc->sense_dma >> 32);
2964	mpi_request.SystemReplyAddressHigh =
2965	    cpu_to_le32(ioc->reply_dma >> 32);
2966	mpi_request.SystemRequestFrameBaseAddress =
2967	    cpu_to_le64(ioc->request_dma);
2968	mpi_request.ReplyFreeQueueAddress =
2969	    cpu_to_le64(ioc->reply_free_dma);
2970	mpi_request.ReplyDescriptorPostQueueAddress =
2971	    cpu_to_le64(ioc->reply_post_free_dma);
2972#else
2973	mpi_request.SystemRequestFrameBaseAddress =
2974	    cpu_to_le32(ioc->request_dma);
2975	mpi_request.ReplyFreeQueueAddress =
2976	    cpu_to_le32(ioc->reply_free_dma);
2977	mpi_request.ReplyDescriptorPostQueueAddress =
2978	    cpu_to_le32(ioc->reply_post_free_dma);
2979#endif
2980
2981	if (ioc->logging_level & MPT_DEBUG_INIT) {
2982		u32 *mfp;
2983		int i;
2984
2985		mfp = (u32 *)&mpi_request;
2986		printk(KERN_DEBUG "\toffset:data\n");
2987		for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
2988			printk(KERN_DEBUG "\t[0x%02x]:%08x\n", i*4,
2989			    le32_to_cpu(mfp[i]));
2990	}
2991
2992	r = _base_handshake_req_reply_wait(ioc,
2993	    sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
2994	    sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10,
2995	    sleep_flag);
2996
2997	if (r != 0) {
2998		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
2999		    ioc->name, __func__, r);
3000		return r;
3001	}
3002
3003	if (mpi_reply.IOCStatus != MPI2_IOCSTATUS_SUCCESS ||
3004	    mpi_reply.IOCLogInfo) {
3005		printk(MPT2SAS_ERR_FMT "%s: failed\n", ioc->name, __func__);
3006		r = -EIO;
3007	}
3008
3009	return 0;
3010}
3011
3012/**
3013 * _base_send_port_enable - send port_enable(discovery stuff) to firmware
3014 * @ioc: per adapter object
3015 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3016 *
3017 * Returns 0 for success, non-zero for failure.
3018 */
3019static int
3020_base_send_port_enable(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3021{
3022	Mpi2PortEnableRequest_t *mpi_request;
3023	u32 ioc_state;
3024	unsigned long timeleft;
3025	int r = 0;
3026	u16 smid;
3027
3028	printk(MPT2SAS_INFO_FMT "sending port enable !!\n", ioc->name);
3029
3030	if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
3031		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3032		    ioc->name, __func__);
3033		return -EAGAIN;
3034	}
3035
3036	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3037	if (!smid) {
3038		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3039		    ioc->name, __func__);
3040		return -EAGAIN;
3041	}
3042
3043	ioc->base_cmds.status = MPT2_CMD_PENDING;
3044	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3045	ioc->base_cmds.smid = smid;
3046	memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
3047	mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
3048	mpi_request->VF_ID = 0; /* TODO */
3049	mpi_request->VP_ID = 0;
3050
3051	mpt2sas_base_put_smid_default(ioc, smid);
3052	init_completion(&ioc->base_cmds.done);
3053	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done,
3054	    300*HZ);
3055	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3056		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3057		    ioc->name, __func__);
3058		_debug_dump_mf(mpi_request,
3059		    sizeof(Mpi2PortEnableRequest_t)/4);
3060		if (ioc->base_cmds.status & MPT2_CMD_RESET)
3061			r = -EFAULT;
3062		else
3063			r = -ETIME;
3064		goto out;
3065	} else
3066		dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: complete\n",
3067		    ioc->name, __func__));
3068
3069	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_OPERATIONAL,
3070	    60, sleep_flag);
3071	if (ioc_state) {
3072		printk(MPT2SAS_ERR_FMT "%s: failed going to operational state "
3073		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
3074		r = -EFAULT;
3075	}
3076 out:
3077	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3078	printk(MPT2SAS_INFO_FMT "port enable: %s\n",
3079	    ioc->name, ((r == 0) ? "SUCCESS" : "FAILED"));
3080	return r;
3081}
3082
3083/**
3084 * _base_unmask_events - turn on notification for this event
3085 * @ioc: per adapter object
3086 * @event: firmware event
3087 *
3088 * The mask is stored in ioc->event_masks.
3089 */
3090static void
3091_base_unmask_events(struct MPT2SAS_ADAPTER *ioc, u16 event)
3092{
3093	u32 desired_event;
3094
3095	if (event >= 128)
3096		return;
3097
3098	desired_event = (1 << (event % 32));
3099
3100	if (event < 32)
3101		ioc->event_masks[0] &= ~desired_event;
3102	else if (event < 64)
3103		ioc->event_masks[1] &= ~desired_event;
3104	else if (event < 96)
3105		ioc->event_masks[2] &= ~desired_event;
3106	else if (event < 128)
3107		ioc->event_masks[3] &= ~desired_event;
3108}
3109
3110/**
3111 * _base_event_notification - send event notification
3112 * @ioc: per adapter object
3113 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3114 *
3115 * Returns 0 for success, non-zero for failure.
3116 */
3117static int
3118_base_event_notification(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3119{
3120	Mpi2EventNotificationRequest_t *mpi_request;
3121	unsigned long timeleft;
3122	u16 smid;
3123	int r = 0;
3124	int i;
3125
3126	dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
3127	    __func__));
3128
3129	if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
3130		printk(MPT2SAS_ERR_FMT "%s: internal command already in use\n",
3131		    ioc->name, __func__);
3132		return -EAGAIN;
3133	}
3134
3135	smid = mpt2sas_base_get_smid(ioc, ioc->base_cb_idx);
3136	if (!smid) {
3137		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
3138		    ioc->name, __func__);
3139		return -EAGAIN;
3140	}
3141	ioc->base_cmds.status = MPT2_CMD_PENDING;
3142	mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
3143	ioc->base_cmds.smid = smid;
3144	memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
3145	mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
3146	mpi_request->VF_ID = 0; /* TODO */
3147	mpi_request->VP_ID = 0;
3148	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3149		mpi_request->EventMasks[i] =
3150		    le32_to_cpu(ioc->event_masks[i]);
3151	mpt2sas_base_put_smid_default(ioc, smid);
3152	init_completion(&ioc->base_cmds.done);
3153	timeleft = wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
3154	if (!(ioc->base_cmds.status & MPT2_CMD_COMPLETE)) {
3155		printk(MPT2SAS_ERR_FMT "%s: timeout\n",
3156		    ioc->name, __func__);
3157		_debug_dump_mf(mpi_request,
3158		    sizeof(Mpi2EventNotificationRequest_t)/4);
3159		if (ioc->base_cmds.status & MPT2_CMD_RESET)
3160			r = -EFAULT;
3161		else
3162			r = -ETIME;
3163	} else
3164		dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: complete\n",
3165		    ioc->name, __func__));
3166	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3167	return r;
3168}
3169
3170/**
3171 * mpt2sas_base_validate_event_type - validating event types
3172 * @ioc: per adapter object
3173 * @event: firmware event
3174 *
3175 * This will turn on firmware event notification when application
3176 * ask for that event. We don't mask events that are already enabled.
3177 */
3178void
3179mpt2sas_base_validate_event_type(struct MPT2SAS_ADAPTER *ioc, u32 *event_type)
3180{
3181	int i, j;
3182	u32 event_mask, desired_event;
3183	u8 send_update_to_fw;
3184
3185	for (i = 0, send_update_to_fw = 0; i <
3186	    MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
3187		event_mask = ~event_type[i];
3188		desired_event = 1;
3189		for (j = 0; j < 32; j++) {
3190			if (!(event_mask & desired_event) &&
3191			    (ioc->event_masks[i] & desired_event)) {
3192				ioc->event_masks[i] &= ~desired_event;
3193				send_update_to_fw = 1;
3194			}
3195			desired_event = (desired_event << 1);
3196		}
3197	}
3198
3199	if (!send_update_to_fw)
3200		return;
3201
3202	mutex_lock(&ioc->base_cmds.mutex);
3203	_base_event_notification(ioc, CAN_SLEEP);
3204	mutex_unlock(&ioc->base_cmds.mutex);
3205}
3206
3207/**
3208 * _base_diag_reset - the "big hammer" start of day reset
3209 * @ioc: per adapter object
3210 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3211 *
3212 * Returns 0 for success, non-zero for failure.
3213 */
3214static int
3215_base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3216{
3217	u32 host_diagnostic;
3218	u32 ioc_state;
3219	u32 count;
3220	u32 hcb_size;
3221
3222	printk(MPT2SAS_INFO_FMT "sending diag reset !!\n", ioc->name);
3223
3224	_base_save_msix_table(ioc);
3225
3226	drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "clear interrupts\n",
3227	    ioc->name));
3228
3229	count = 0;
3230	do {
3231		/* Write magic sequence to WriteSequence register
3232		 * Loop until in diagnostic mode
3233		 */
3234		drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "write magic "
3235		    "sequence\n", ioc->name));
3236		writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
3237		writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
3238		writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
3239		writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
3240		writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
3241		writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
3242		writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
3243
3244		/* wait 100 msec */
3245		if (sleep_flag == CAN_SLEEP)
3246			msleep(100);
3247		else
3248			mdelay(100);
3249
3250		if (count++ > 20)
3251			goto out;
3252
3253		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
3254		drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "wrote magic "
3255		    "sequence: count(%d), host_diagnostic(0x%08x)\n",
3256		    ioc->name, count, host_diagnostic));
3257
3258	} while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
3259
3260	hcb_size = readl(&ioc->chip->HCBSize);
3261
3262	drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "diag reset: issued\n",
3263	    ioc->name));
3264	writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
3265	     &ioc->chip->HostDiagnostic);
3266
3267	/* don't access any registers for 50 milliseconds */
3268	msleep(50);
3269
3270	/* 300 second max wait */
3271	for (count = 0; count < 3000000 ; count++) {
3272
3273		host_diagnostic = readl(&ioc->chip->HostDiagnostic);
3274
3275		if (host_diagnostic == 0xFFFFFFFF)
3276			goto out;
3277		if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
3278			break;
3279
3280		/* wait 100 msec */
3281		if (sleep_flag == CAN_SLEEP)
3282			msleep(1);
3283		else
3284			mdelay(1);
3285	}
3286
3287	if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
3288
3289		drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "restart the adapter "
3290		    "assuming the HCB Address points to good F/W\n",
3291		    ioc->name));
3292		host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
3293		host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
3294		writel(host_diagnostic, &ioc->chip->HostDiagnostic);
3295
3296		drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT
3297		    "re-enable the HCDW\n", ioc->name));
3298		writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
3299		    &ioc->chip->HCBSize);
3300	}
3301
3302	drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "restart the adapter\n",
3303	    ioc->name));
3304	writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
3305	    &ioc->chip->HostDiagnostic);
3306
3307	drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "disable writes to the "
3308	    "diagnostic register\n", ioc->name));
3309	writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
3310
3311	drsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "Wait for FW to go to the "
3312	    "READY state\n", ioc->name));
3313	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20,
3314	    sleep_flag);
3315	if (ioc_state) {
3316		printk(MPT2SAS_ERR_FMT "%s: failed going to ready state "
3317		    " (ioc_state=0x%x)\n", ioc->name, __func__, ioc_state);
3318		goto out;
3319	}
3320
3321	_base_restore_msix_table(ioc);
3322	printk(MPT2SAS_INFO_FMT "diag reset: SUCCESS\n", ioc->name);
3323	return 0;
3324
3325 out:
3326	printk(MPT2SAS_ERR_FMT "diag reset: FAILED\n", ioc->name);
3327	return -EFAULT;
3328}
3329
3330/**
3331 * _base_make_ioc_ready - put controller in READY state
3332 * @ioc: per adapter object
3333 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3334 * @type: FORCE_BIG_HAMMER or SOFT_RESET
3335 *
3336 * Returns 0 for success, non-zero for failure.
3337 */
3338static int
3339_base_make_ioc_ready(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3340    enum reset_type type)
3341{
3342	u32 ioc_state;
3343
3344	dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
3345	    __func__));
3346
3347	ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
3348	dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: ioc_state(0x%08x)\n",
3349	    ioc->name, __func__, ioc_state));
3350
3351	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
3352		return 0;
3353
3354	if (ioc_state & MPI2_DOORBELL_USED) {
3355		dhsprintk(ioc, printk(MPT2SAS_DEBUG_FMT "unexpected doorbell "
3356		    "active!\n", ioc->name));
3357		goto issue_diag_reset;
3358	}
3359
3360	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3361		mpt2sas_base_fault_info(ioc, ioc_state &
3362		    MPI2_DOORBELL_DATA_MASK);
3363		goto issue_diag_reset;
3364	}
3365
3366	if (type == FORCE_BIG_HAMMER)
3367		goto issue_diag_reset;
3368
3369	if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
3370		if (!(_base_send_ioc_reset(ioc,
3371		    MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15, CAN_SLEEP)))
3372			return 0;
3373
3374 issue_diag_reset:
3375	return _base_diag_reset(ioc, CAN_SLEEP);
3376}
3377
3378/**
3379 * _base_make_ioc_operational - put controller in OPERATIONAL state
3380 * @ioc: per adapter object
3381 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3382 *
3383 * Returns 0 for success, non-zero for failure.
3384 */
3385static int
3386_base_make_ioc_operational(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3387{
3388	int r, i;
3389	unsigned long	flags;
3390	u32 reply_address;
3391	u16 smid;
3392	struct _tr_list *delayed_tr, *delayed_tr_next;
3393
3394	dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
3395	    __func__));
3396
3397	/* clean the delayed target reset list */
3398	list_for_each_entry_safe(delayed_tr, delayed_tr_next,
3399	    &ioc->delayed_tr_list, list) {
3400		list_del(&delayed_tr->list);
3401		kfree(delayed_tr);
3402	}
3403
3404	/* initialize the scsi lookup free list */
3405	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3406	INIT_LIST_HEAD(&ioc->free_list);
3407	smid = 1;
3408	for (i = 0; i < ioc->scsiio_depth; i++, smid++) {
3409		ioc->scsi_lookup[i].cb_idx = 0xFF;
3410		ioc->scsi_lookup[i].smid = smid;
3411		ioc->scsi_lookup[i].scmd = NULL;
3412		list_add_tail(&ioc->scsi_lookup[i].tracker_list,
3413		    &ioc->free_list);
3414	}
3415
3416	/* hi-priority queue */
3417	INIT_LIST_HEAD(&ioc->hpr_free_list);
3418	smid = ioc->hi_priority_smid;
3419	for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
3420		ioc->hpr_lookup[i].cb_idx = 0xFF;
3421		ioc->hpr_lookup[i].smid = smid;
3422		list_add_tail(&ioc->hpr_lookup[i].tracker_list,
3423		    &ioc->hpr_free_list);
3424	}
3425
3426	/* internal queue */
3427	INIT_LIST_HEAD(&ioc->internal_free_list);
3428	smid = ioc->internal_smid;
3429	for (i = 0; i < ioc->internal_depth; i++, smid++) {
3430		ioc->internal_lookup[i].cb_idx = 0xFF;
3431		ioc->internal_lookup[i].smid = smid;
3432		list_add_tail(&ioc->internal_lookup[i].tracker_list,
3433		    &ioc->internal_free_list);
3434	}
3435	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3436
3437	/* initialize Reply Free Queue */
3438	for (i = 0, reply_address = (u32)ioc->reply_dma ;
3439	    i < ioc->reply_free_queue_depth ; i++, reply_address +=
3440	    ioc->reply_sz)
3441		ioc->reply_free[i] = cpu_to_le32(reply_address);
3442
3443	/* initialize Reply Post Free Queue */
3444	for (i = 0; i < ioc->reply_post_queue_depth; i++)
3445		ioc->reply_post_free[i].Words = ULLONG_MAX;
3446
3447	r = _base_send_ioc_init(ioc, sleep_flag);
3448	if (r)
3449		return r;
3450
3451	/* initialize the index's */
3452	ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
3453	ioc->reply_post_host_index = 0;
3454	writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
3455	writel(0, &ioc->chip->ReplyPostHostIndex);
3456
3457	_base_unmask_interrupts(ioc);
3458	r = _base_event_notification(ioc, sleep_flag);
3459	if (r)
3460		return r;
3461
3462	if (sleep_flag == CAN_SLEEP)
3463		_base_static_config_pages(ioc);
3464
3465	r = _base_send_port_enable(ioc, sleep_flag);
3466	if (r)
3467		return r;
3468
3469	return r;
3470}
3471
3472/**
3473 * mpt2sas_base_free_resources - free resources controller resources (io/irq/memap)
3474 * @ioc: per adapter object
3475 *
3476 * Return nothing.
3477 */
3478void
3479mpt2sas_base_free_resources(struct MPT2SAS_ADAPTER *ioc)
3480{
3481	struct pci_dev *pdev = ioc->pdev;
3482
3483	dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
3484	    __func__));
3485
3486	_base_mask_interrupts(ioc);
3487	_base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
3488	if (ioc->pci_irq) {
3489		synchronize_irq(pdev->irq);
3490		free_irq(ioc->pci_irq, ioc);
3491	}
3492	_base_disable_msix(ioc);
3493	if (ioc->chip_phys)
3494		iounmap(ioc->chip);
3495	ioc->pci_irq = -1;
3496	ioc->chip_phys = 0;
3497	pci_release_selected_regions(ioc->pdev, ioc->bars);
3498	pci_disable_device(pdev);
3499	return;
3500}
3501
3502/**
3503 * mpt2sas_base_attach - attach controller instance
3504 * @ioc: per adapter object
3505 *
3506 * Returns 0 for success, non-zero for failure.
3507 */
3508int
3509mpt2sas_base_attach(struct MPT2SAS_ADAPTER *ioc)
3510{
3511	int r, i;
3512
3513	dinitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
3514	    __func__));
3515
3516	r = mpt2sas_base_map_resources(ioc);
3517	if (r)
3518		return r;
3519
3520	pci_set_drvdata(ioc->pdev, ioc->shost);
3521	r = _base_make_ioc_ready(ioc, CAN_SLEEP, SOFT_RESET);
3522	if (r)
3523		goto out_free_resources;
3524
3525	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
3526	if (r)
3527		goto out_free_resources;
3528
3529	ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
3530	    sizeof(Mpi2PortFactsReply_t), GFP_KERNEL);
3531	if (!ioc->pfacts)
3532		goto out_free_resources;
3533
3534	for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
3535		r = _base_get_port_facts(ioc, i, CAN_SLEEP);
3536		if (r)
3537			goto out_free_resources;
3538	}
3539
3540	r = _base_allocate_memory_pools(ioc, CAN_SLEEP);
3541	if (r)
3542		goto out_free_resources;
3543
3544	init_waitqueue_head(&ioc->reset_wq);
3545
3546	/* base internal command bits */
3547	mutex_init(&ioc->base_cmds.mutex);
3548	ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3549	ioc->base_cmds.status = MPT2_CMD_NOT_USED;
3550
3551	/* transport internal command bits */
3552	ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3553	ioc->transport_cmds.status = MPT2_CMD_NOT_USED;
3554	mutex_init(&ioc->transport_cmds.mutex);
3555
3556	/* task management internal command bits */
3557	ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3558	ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
3559	mutex_init(&ioc->tm_cmds.mutex);
3560
3561	/* config page internal command bits */
3562	ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3563	ioc->config_cmds.status = MPT2_CMD_NOT_USED;
3564	mutex_init(&ioc->config_cmds.mutex);
3565
3566	/* ctl module internal command bits */
3567	ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
3568	ioc->ctl_cmds.status = MPT2_CMD_NOT_USED;
3569	mutex_init(&ioc->ctl_cmds.mutex);
3570
3571	for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
3572		ioc->event_masks[i] = -1;
3573
3574	/* here we enable the events we care about */
3575	_base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
3576	_base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
3577	_base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
3578	_base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
3579	_base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
3580	_base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
3581	_base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
3582	_base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
3583	_base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
3584	_base_unmask_events(ioc, MPI2_EVENT_TASK_SET_FULL);
3585	_base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
3586	r = _base_make_ioc_operational(ioc, CAN_SLEEP);
3587	if (r)
3588		goto out_free_resources;
3589
3590	mpt2sas_base_start_watchdog(ioc);
3591	return 0;
3592
3593 out_free_resources:
3594
3595	ioc->remove_host = 1;
3596	mpt2sas_base_free_resources(ioc);
3597	_base_release_memory_pools(ioc);
3598	pci_set_drvdata(ioc->pdev, NULL);
3599	kfree(ioc->tm_cmds.reply);
3600	kfree(ioc->transport_cmds.reply);
3601	kfree(ioc->config_cmds.reply);
3602	kfree(ioc->base_cmds.reply);
3603	kfree(ioc->ctl_cmds.reply);
3604	kfree(ioc->pfacts);
3605	ioc->ctl_cmds.reply = NULL;
3606	ioc->base_cmds.reply = NULL;
3607	ioc->tm_cmds.reply = NULL;
3608	ioc->transport_cmds.reply = NULL;
3609	ioc->config_cmds.reply = NULL;
3610	ioc->pfacts = NULL;
3611	return r;
3612}
3613
3614
3615/**
3616 * mpt2sas_base_detach - remove controller instance
3617 * @ioc: per adapter object
3618 *
3619 * Return nothing.
3620 */
3621void
3622mpt2sas_base_detach(struct MPT2SAS_ADAPTER *ioc)
3623{
3624
3625	dexitprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s\n", ioc->name,
3626	    __func__));
3627
3628	mpt2sas_base_stop_watchdog(ioc);
3629	mpt2sas_base_free_resources(ioc);
3630	_base_release_memory_pools(ioc);
3631	pci_set_drvdata(ioc->pdev, NULL);
3632	kfree(ioc->pfacts);
3633	kfree(ioc->ctl_cmds.reply);
3634	kfree(ioc->base_cmds.reply);
3635	kfree(ioc->tm_cmds.reply);
3636	kfree(ioc->transport_cmds.reply);
3637	kfree(ioc->config_cmds.reply);
3638}
3639
3640/**
3641 * _base_reset_handler - reset callback handler (for base)
3642 * @ioc: per adapter object
3643 * @reset_phase: phase
3644 *
3645 * The handler for doing any required cleanup or initialization.
3646 *
3647 * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
3648 * MPT2_IOC_DONE_RESET
3649 *
3650 * Return nothing.
3651 */
3652static void
3653_base_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
3654{
3655	switch (reset_phase) {
3656	case MPT2_IOC_PRE_RESET:
3657		dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
3658		    "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
3659		break;
3660	case MPT2_IOC_AFTER_RESET:
3661		dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
3662		    "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
3663		if (ioc->transport_cmds.status & MPT2_CMD_PENDING) {
3664			ioc->transport_cmds.status |= MPT2_CMD_RESET;
3665			mpt2sas_base_free_smid(ioc, ioc->transport_cmds.smid);
3666			complete(&ioc->transport_cmds.done);
3667		}
3668		if (ioc->base_cmds.status & MPT2_CMD_PENDING) {
3669			ioc->base_cmds.status |= MPT2_CMD_RESET;
3670			mpt2sas_base_free_smid(ioc, ioc->base_cmds.smid);
3671			complete(&ioc->base_cmds.done);
3672		}
3673		if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
3674			ioc->config_cmds.status |= MPT2_CMD_RESET;
3675			mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
3676			ioc->config_cmds.smid = USHORT_MAX;
3677			complete(&ioc->config_cmds.done);
3678		}
3679		break;
3680	case MPT2_IOC_DONE_RESET:
3681		dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
3682		    "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
3683		break;
3684	}
3685	mpt2sas_scsih_reset_handler(ioc, reset_phase);
3686	mpt2sas_ctl_reset_handler(ioc, reset_phase);
3687}
3688
3689/**
3690 * _wait_for_commands_to_complete - reset controller
3691 * @ioc: Pointer to MPT_ADAPTER structure
3692 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3693 *
3694 * This function waiting(3s) for all pending commands to complete
3695 * prior to putting controller in reset.
3696 */
3697static void
3698_wait_for_commands_to_complete(struct MPT2SAS_ADAPTER *ioc, int sleep_flag)
3699{
3700	u32 ioc_state;
3701	unsigned long flags;
3702	u16 i;
3703
3704	ioc->pending_io_count = 0;
3705	if (sleep_flag != CAN_SLEEP)
3706		return;
3707
3708	ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
3709	if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
3710		return;
3711
3712	/* pending command count */
3713	spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3714	for (i = 0; i < ioc->scsiio_depth; i++)
3715		if (ioc->scsi_lookup[i].cb_idx != 0xFF)
3716			ioc->pending_io_count++;
3717	spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3718
3719	if (!ioc->pending_io_count)
3720		return;
3721
3722	/* wait for pending commands to complete */
3723	wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 3 * HZ);
3724}
3725
3726/**
3727 * mpt2sas_base_hard_reset_handler - reset controller
3728 * @ioc: Pointer to MPT_ADAPTER structure
3729 * @sleep_flag: CAN_SLEEP or NO_SLEEP
3730 * @type: FORCE_BIG_HAMMER or SOFT_RESET
3731 *
3732 * Returns 0 for success, non-zero for failure.
3733 */
3734int
3735mpt2sas_base_hard_reset_handler(struct MPT2SAS_ADAPTER *ioc, int sleep_flag,
3736    enum reset_type type)
3737{
3738	int r;
3739	unsigned long flags;
3740
3741	dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: enter\n", ioc->name,
3742	    __func__));
3743
3744	if (mpt2sas_fwfault_debug)
3745		mpt2sas_halt_firmware(ioc);
3746
3747	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
3748	if (ioc->shost_recovery) {
3749		spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
3750		printk(MPT2SAS_ERR_FMT "%s: busy\n",
3751		    ioc->name, __func__);
3752		return -EBUSY;
3753	}
3754	ioc->shost_recovery = 1;
3755	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
3756
3757	_base_reset_handler(ioc, MPT2_IOC_PRE_RESET);
3758	_wait_for_commands_to_complete(ioc, sleep_flag);
3759	_base_mask_interrupts(ioc);
3760	r = _base_make_ioc_ready(ioc, sleep_flag, type);
3761	if (r)
3762		goto out;
3763	_base_reset_handler(ioc, MPT2_IOC_AFTER_RESET);
3764	r = _base_make_ioc_operational(ioc, sleep_flag);
3765	if (!r)
3766		_base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
3767 out:
3768	dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: %s\n",
3769	    ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
3770
3771	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
3772	ioc->shost_recovery = 0;
3773	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
3774
3775	if (!r)
3776		_base_reset_handler(ioc, MPT2_IOC_RUNNING);
3777	return r;
3778}
3779