be_main.c revision 587a1f1659e8b330b8738ef4901832a2b63f0bed
1/**
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation.  The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
11 *
12 * Contact Information:
13 * linux-drivers@emulex.com
14 *
15 * Emulex
16 * 3333 Susan Street
17 * Costa Mesa, CA 92626
18 */
19
20#include <linux/reboot.h>
21#include <linux/delay.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/string.h>
27#include <linux/kernel.h>
28#include <linux/semaphore.h>
29#include <linux/iscsi_boot_sysfs.h>
30#include <linux/module.h>
31
32#include <scsi/libiscsi.h>
33#include <scsi/scsi_transport_iscsi.h>
34#include <scsi/scsi_transport.h>
35#include <scsi/scsi_cmnd.h>
36#include <scsi/scsi_device.h>
37#include <scsi/scsi_host.h>
38#include <scsi/scsi.h>
39#include "be_main.h"
40#include "be_iscsi.h"
41#include "be_mgmt.h"
42
43static unsigned int be_iopoll_budget = 10;
44static unsigned int be_max_phys_size = 64;
45static unsigned int enable_msix = 1;
46static unsigned int gcrashmode = 0;
47static unsigned int num_hba = 0;
48
49MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
50MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
51MODULE_AUTHOR("ServerEngines Corporation");
52MODULE_LICENSE("GPL");
53module_param(be_iopoll_budget, int, 0);
54module_param(enable_msix, int, 0);
55module_param(be_max_phys_size, uint, S_IRUGO);
56MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
57				   "contiguous memory that can be allocated."
58				   "Range is 16 - 128");
59
60static int beiscsi_slave_configure(struct scsi_device *sdev)
61{
62	blk_queue_max_segment_size(sdev->request_queue, 65536);
63	return 0;
64}
65
66static int beiscsi_eh_abort(struct scsi_cmnd *sc)
67{
68	struct iscsi_cls_session *cls_session;
69	struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
70	struct beiscsi_io_task *aborted_io_task;
71	struct iscsi_conn *conn;
72	struct beiscsi_conn *beiscsi_conn;
73	struct beiscsi_hba *phba;
74	struct iscsi_session *session;
75	struct invalidate_command_table *inv_tbl;
76	struct be_dma_mem nonemb_cmd;
77	unsigned int cid, tag, num_invalidate;
78
79	cls_session = starget_to_session(scsi_target(sc->device));
80	session = cls_session->dd_data;
81
82	spin_lock_bh(&session->lock);
83	if (!aborted_task || !aborted_task->sc) {
84		/* we raced */
85		spin_unlock_bh(&session->lock);
86		return SUCCESS;
87	}
88
89	aborted_io_task = aborted_task->dd_data;
90	if (!aborted_io_task->scsi_cmnd) {
91		/* raced or invalid command */
92		spin_unlock_bh(&session->lock);
93		return SUCCESS;
94	}
95	spin_unlock_bh(&session->lock);
96	conn = aborted_task->conn;
97	beiscsi_conn = conn->dd_data;
98	phba = beiscsi_conn->phba;
99
100	/* invalidate iocb */
101	cid = beiscsi_conn->beiscsi_conn_cid;
102	inv_tbl = phba->inv_tbl;
103	memset(inv_tbl, 0x0, sizeof(*inv_tbl));
104	inv_tbl->cid = cid;
105	inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
106	num_invalidate = 1;
107	nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
108				sizeof(struct invalidate_commands_params_in),
109				&nonemb_cmd.dma);
110	if (nonemb_cmd.va == NULL) {
111		SE_DEBUG(DBG_LVL_1,
112			 "Failed to allocate memory for"
113			 "mgmt_invalidate_icds\n");
114		return FAILED;
115	}
116	nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
117
118	tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
119				   cid, &nonemb_cmd);
120	if (!tag) {
121		shost_printk(KERN_WARNING, phba->shost,
122			     "mgmt_invalidate_icds could not be"
123			     " submitted\n");
124		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
125				    nonemb_cmd.va, nonemb_cmd.dma);
126
127		return FAILED;
128	} else {
129		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
130					 phba->ctrl.mcc_numtag[tag]);
131		free_mcc_tag(&phba->ctrl, tag);
132	}
133	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
134			    nonemb_cmd.va, nonemb_cmd.dma);
135	return iscsi_eh_abort(sc);
136}
137
138static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
139{
140	struct iscsi_task *abrt_task;
141	struct beiscsi_io_task *abrt_io_task;
142	struct iscsi_conn *conn;
143	struct beiscsi_conn *beiscsi_conn;
144	struct beiscsi_hba *phba;
145	struct iscsi_session *session;
146	struct iscsi_cls_session *cls_session;
147	struct invalidate_command_table *inv_tbl;
148	struct be_dma_mem nonemb_cmd;
149	unsigned int cid, tag, i, num_invalidate;
150	int rc = FAILED;
151
152	/* invalidate iocbs */
153	cls_session = starget_to_session(scsi_target(sc->device));
154	session = cls_session->dd_data;
155	spin_lock_bh(&session->lock);
156	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
157		goto unlock;
158
159	conn = session->leadconn;
160	beiscsi_conn = conn->dd_data;
161	phba = beiscsi_conn->phba;
162	cid = beiscsi_conn->beiscsi_conn_cid;
163	inv_tbl = phba->inv_tbl;
164	memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
165	num_invalidate = 0;
166	for (i = 0; i < conn->session->cmds_max; i++) {
167		abrt_task = conn->session->cmds[i];
168		abrt_io_task = abrt_task->dd_data;
169		if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
170			continue;
171
172		if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
173			continue;
174
175		inv_tbl->cid = cid;
176		inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
177		num_invalidate++;
178		inv_tbl++;
179	}
180	spin_unlock_bh(&session->lock);
181	inv_tbl = phba->inv_tbl;
182
183	nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
184				sizeof(struct invalidate_commands_params_in),
185				&nonemb_cmd.dma);
186	if (nonemb_cmd.va == NULL) {
187		SE_DEBUG(DBG_LVL_1,
188			 "Failed to allocate memory for"
189			 "mgmt_invalidate_icds\n");
190		return FAILED;
191	}
192	nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
193	memset(nonemb_cmd.va, 0, nonemb_cmd.size);
194	tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate,
195				   cid, &nonemb_cmd);
196	if (!tag) {
197		shost_printk(KERN_WARNING, phba->shost,
198			     "mgmt_invalidate_icds could not be"
199			     " submitted\n");
200		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
201				    nonemb_cmd.va, nonemb_cmd.dma);
202		return FAILED;
203	} else {
204		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
205					 phba->ctrl.mcc_numtag[tag]);
206		free_mcc_tag(&phba->ctrl, tag);
207	}
208	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
209			    nonemb_cmd.va, nonemb_cmd.dma);
210	return iscsi_eh_device_reset(sc);
211unlock:
212	spin_unlock_bh(&session->lock);
213	return rc;
214}
215
216static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
217{
218	struct beiscsi_hba *phba = data;
219	struct mgmt_session_info *boot_sess = &phba->boot_sess;
220	struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
221	char *str = buf;
222	int rc;
223
224	switch (type) {
225	case ISCSI_BOOT_TGT_NAME:
226		rc = sprintf(buf, "%.*s\n",
227			    (int)strlen(boot_sess->target_name),
228			    (char *)&boot_sess->target_name);
229		break;
230	case ISCSI_BOOT_TGT_IP_ADDR:
231		if (boot_conn->dest_ipaddr.ip_type == 0x1)
232			rc = sprintf(buf, "%pI4\n",
233				(char *)&boot_conn->dest_ipaddr.ip_address);
234		else
235			rc = sprintf(str, "%pI6\n",
236				(char *)&boot_conn->dest_ipaddr.ip_address);
237		break;
238	case ISCSI_BOOT_TGT_PORT:
239		rc = sprintf(str, "%d\n", boot_conn->dest_port);
240		break;
241
242	case ISCSI_BOOT_TGT_CHAP_NAME:
243		rc = sprintf(str,  "%.*s\n",
244			     boot_conn->negotiated_login_options.auth_data.chap.
245			     target_chap_name_length,
246			     (char *)&boot_conn->negotiated_login_options.
247			     auth_data.chap.target_chap_name);
248		break;
249	case ISCSI_BOOT_TGT_CHAP_SECRET:
250		rc = sprintf(str,  "%.*s\n",
251			     boot_conn->negotiated_login_options.auth_data.chap.
252			     target_secret_length,
253			     (char *)&boot_conn->negotiated_login_options.
254			     auth_data.chap.target_secret);
255		break;
256	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
257		rc = sprintf(str,  "%.*s\n",
258			     boot_conn->negotiated_login_options.auth_data.chap.
259			     intr_chap_name_length,
260			     (char *)&boot_conn->negotiated_login_options.
261			     auth_data.chap.intr_chap_name);
262		break;
263	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
264		rc = sprintf(str,  "%.*s\n",
265			     boot_conn->negotiated_login_options.auth_data.chap.
266			     intr_secret_length,
267			     (char *)&boot_conn->negotiated_login_options.
268			     auth_data.chap.intr_secret);
269		break;
270	case ISCSI_BOOT_TGT_FLAGS:
271		rc = sprintf(str, "2\n");
272		break;
273	case ISCSI_BOOT_TGT_NIC_ASSOC:
274		rc = sprintf(str, "0\n");
275		break;
276	default:
277		rc = -ENOSYS;
278		break;
279	}
280	return rc;
281}
282
283static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
284{
285	struct beiscsi_hba *phba = data;
286	char *str = buf;
287	int rc;
288
289	switch (type) {
290	case ISCSI_BOOT_INI_INITIATOR_NAME:
291		rc = sprintf(str, "%s\n", phba->boot_sess.initiator_iscsiname);
292		break;
293	default:
294		rc = -ENOSYS;
295		break;
296	}
297	return rc;
298}
299
300static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
301{
302	struct beiscsi_hba *phba = data;
303	char *str = buf;
304	int rc;
305
306	switch (type) {
307	case ISCSI_BOOT_ETH_FLAGS:
308		rc = sprintf(str, "2\n");
309		break;
310	case ISCSI_BOOT_ETH_INDEX:
311		rc = sprintf(str, "0\n");
312		break;
313	case ISCSI_BOOT_ETH_MAC:
314		rc  = beiscsi_get_macaddr(buf, phba);
315		if (rc < 0) {
316			SE_DEBUG(DBG_LVL_1, "beiscsi_get_macaddr Failed\n");
317			return rc;
318		}
319	break;
320	default:
321		rc = -ENOSYS;
322		break;
323	}
324	return rc;
325}
326
327
328static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
329{
330	umode_t rc;
331
332	switch (type) {
333	case ISCSI_BOOT_TGT_NAME:
334	case ISCSI_BOOT_TGT_IP_ADDR:
335	case ISCSI_BOOT_TGT_PORT:
336	case ISCSI_BOOT_TGT_CHAP_NAME:
337	case ISCSI_BOOT_TGT_CHAP_SECRET:
338	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
339	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
340	case ISCSI_BOOT_TGT_NIC_ASSOC:
341	case ISCSI_BOOT_TGT_FLAGS:
342		rc = S_IRUGO;
343		break;
344	default:
345		rc = 0;
346		break;
347	}
348	return rc;
349}
350
351static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
352{
353	umode_t rc;
354
355	switch (type) {
356	case ISCSI_BOOT_INI_INITIATOR_NAME:
357		rc = S_IRUGO;
358		break;
359	default:
360		rc = 0;
361		break;
362	}
363	return rc;
364}
365
366
367static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
368{
369	umode_t rc;
370
371	switch (type) {
372	case ISCSI_BOOT_ETH_FLAGS:
373	case ISCSI_BOOT_ETH_MAC:
374	case ISCSI_BOOT_ETH_INDEX:
375		rc = S_IRUGO;
376		break;
377	default:
378		rc = 0;
379		break;
380	}
381	return rc;
382}
383
384/*------------------- PCI Driver operations and data ----------------- */
385static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
386	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
387	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
388	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
389	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
390	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
391	{ 0 }
392};
393MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
394
395static struct scsi_host_template beiscsi_sht = {
396	.module = THIS_MODULE,
397	.name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
398	.proc_name = DRV_NAME,
399	.queuecommand = iscsi_queuecommand,
400	.change_queue_depth = iscsi_change_queue_depth,
401	.slave_configure = beiscsi_slave_configure,
402	.target_alloc = iscsi_target_alloc,
403	.eh_abort_handler = beiscsi_eh_abort,
404	.eh_device_reset_handler = beiscsi_eh_device_reset,
405	.eh_target_reset_handler = iscsi_eh_session_reset,
406	.sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
407	.can_queue = BE2_IO_DEPTH,
408	.this_id = -1,
409	.max_sectors = BEISCSI_MAX_SECTORS,
410	.cmd_per_lun = BEISCSI_CMD_PER_LUN,
411	.use_clustering = ENABLE_CLUSTERING,
412};
413
414static struct scsi_transport_template *beiscsi_scsi_transport;
415
416static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
417{
418	struct beiscsi_hba *phba;
419	struct Scsi_Host *shost;
420
421	shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
422	if (!shost) {
423		dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
424			"iscsi_host_alloc failed\n");
425		return NULL;
426	}
427	shost->dma_boundary = pcidev->dma_mask;
428	shost->max_id = BE2_MAX_SESSIONS;
429	shost->max_channel = 0;
430	shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
431	shost->max_lun = BEISCSI_NUM_MAX_LUN;
432	shost->transportt = beiscsi_scsi_transport;
433	phba = iscsi_host_priv(shost);
434	memset(phba, 0, sizeof(*phba));
435	phba->shost = shost;
436	phba->pcidev = pci_dev_get(pcidev);
437	pci_set_drvdata(pcidev, phba);
438
439	if (iscsi_host_add(shost, &phba->pcidev->dev))
440		goto free_devices;
441
442	return phba;
443
444free_devices:
445	pci_dev_put(phba->pcidev);
446	iscsi_host_free(phba->shost);
447	return NULL;
448}
449
450static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
451{
452	if (phba->csr_va) {
453		iounmap(phba->csr_va);
454		phba->csr_va = NULL;
455	}
456	if (phba->db_va) {
457		iounmap(phba->db_va);
458		phba->db_va = NULL;
459	}
460	if (phba->pci_va) {
461		iounmap(phba->pci_va);
462		phba->pci_va = NULL;
463	}
464}
465
466static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
467				struct pci_dev *pcidev)
468{
469	u8 __iomem *addr;
470	int pcicfg_reg;
471
472	addr = ioremap_nocache(pci_resource_start(pcidev, 2),
473			       pci_resource_len(pcidev, 2));
474	if (addr == NULL)
475		return -ENOMEM;
476	phba->ctrl.csr = addr;
477	phba->csr_va = addr;
478	phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
479
480	addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
481	if (addr == NULL)
482		goto pci_map_err;
483	phba->ctrl.db = addr;
484	phba->db_va = addr;
485	phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
486
487	if (phba->generation == BE_GEN2)
488		pcicfg_reg = 1;
489	else
490		pcicfg_reg = 0;
491
492	addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
493			       pci_resource_len(pcidev, pcicfg_reg));
494
495	if (addr == NULL)
496		goto pci_map_err;
497	phba->ctrl.pcicfg = addr;
498	phba->pci_va = addr;
499	phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
500	return 0;
501
502pci_map_err:
503	beiscsi_unmap_pci_function(phba);
504	return -ENOMEM;
505}
506
507static int beiscsi_enable_pci(struct pci_dev *pcidev)
508{
509	int ret;
510
511	ret = pci_enable_device(pcidev);
512	if (ret) {
513		dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
514			"failed. Returning -ENODEV\n");
515		return ret;
516	}
517
518	pci_set_master(pcidev);
519	if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
520		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
521		if (ret) {
522			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
523			pci_disable_device(pcidev);
524			return ret;
525		}
526	}
527	return 0;
528}
529
530static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
531{
532	struct be_ctrl_info *ctrl = &phba->ctrl;
533	struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
534	struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
535	int status = 0;
536
537	ctrl->pdev = pdev;
538	status = beiscsi_map_pci_bars(phba, pdev);
539	if (status)
540		return status;
541	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
542	mbox_mem_alloc->va = pci_alloc_consistent(pdev,
543						  mbox_mem_alloc->size,
544						  &mbox_mem_alloc->dma);
545	if (!mbox_mem_alloc->va) {
546		beiscsi_unmap_pci_function(phba);
547		status = -ENOMEM;
548		return status;
549	}
550
551	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
552	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
553	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
554	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
555	spin_lock_init(&ctrl->mbox_lock);
556	spin_lock_init(&phba->ctrl.mcc_lock);
557	spin_lock_init(&phba->ctrl.mcc_cq_lock);
558
559	return status;
560}
561
562static void beiscsi_get_params(struct beiscsi_hba *phba)
563{
564	phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
565				    - (phba->fw_config.iscsi_cid_count
566				    + BE2_TMFS
567				    + BE2_NOPOUT_REQ));
568	phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
569	phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
570	phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;
571	phba->params.num_sge_per_io = BE2_SGE;
572	phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
573	phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
574	phba->params.eq_timer = 64;
575	phba->params.num_eq_entries =
576	    (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
577				    + BE2_TMFS) / 512) + 1) * 512;
578	phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
579				? 1024 : phba->params.num_eq_entries;
580	SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d\n",
581			     phba->params.num_eq_entries);
582	phba->params.num_cq_entries =
583	    (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
584				    + BE2_TMFS) / 512) + 1) * 512;
585	phba->params.wrbs_per_cxn = 256;
586}
587
588static void hwi_ring_eq_db(struct beiscsi_hba *phba,
589			   unsigned int id, unsigned int clr_interrupt,
590			   unsigned int num_processed,
591			   unsigned char rearm, unsigned char event)
592{
593	u32 val = 0;
594	val |= id & DB_EQ_RING_ID_MASK;
595	if (rearm)
596		val |= 1 << DB_EQ_REARM_SHIFT;
597	if (clr_interrupt)
598		val |= 1 << DB_EQ_CLR_SHIFT;
599	if (event)
600		val |= 1 << DB_EQ_EVNT_SHIFT;
601	val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
602	iowrite32(val, phba->db_va + DB_EQ_OFFSET);
603}
604
605/**
606 * be_isr_mcc - The isr routine of the driver.
607 * @irq: Not used
608 * @dev_id: Pointer to host adapter structure
609 */
610static irqreturn_t be_isr_mcc(int irq, void *dev_id)
611{
612	struct beiscsi_hba *phba;
613	struct be_eq_entry *eqe = NULL;
614	struct be_queue_info *eq;
615	struct be_queue_info *mcc;
616	unsigned int num_eq_processed;
617	struct be_eq_obj *pbe_eq;
618	unsigned long flags;
619
620	pbe_eq = dev_id;
621	eq = &pbe_eq->q;
622	phba =  pbe_eq->phba;
623	mcc = &phba->ctrl.mcc_obj.cq;
624	eqe = queue_tail_node(eq);
625	if (!eqe)
626		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
627
628	num_eq_processed = 0;
629
630	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
631				& EQE_VALID_MASK) {
632		if (((eqe->dw[offsetof(struct amap_eq_entry,
633		     resource_id) / 32] &
634		     EQE_RESID_MASK) >> 16) == mcc->id) {
635			spin_lock_irqsave(&phba->isr_lock, flags);
636			phba->todo_mcc_cq = 1;
637			spin_unlock_irqrestore(&phba->isr_lock, flags);
638		}
639		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
640		queue_tail_inc(eq);
641		eqe = queue_tail_node(eq);
642		num_eq_processed++;
643	}
644	if (phba->todo_mcc_cq)
645		queue_work(phba->wq, &phba->work_cqs);
646	if (num_eq_processed)
647		hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 1, 1);
648
649	return IRQ_HANDLED;
650}
651
652/**
653 * be_isr_msix - The isr routine of the driver.
654 * @irq: Not used
655 * @dev_id: Pointer to host adapter structure
656 */
657static irqreturn_t be_isr_msix(int irq, void *dev_id)
658{
659	struct beiscsi_hba *phba;
660	struct be_eq_entry *eqe = NULL;
661	struct be_queue_info *eq;
662	struct be_queue_info *cq;
663	unsigned int num_eq_processed;
664	struct be_eq_obj *pbe_eq;
665	unsigned long flags;
666
667	pbe_eq = dev_id;
668	eq = &pbe_eq->q;
669	cq = pbe_eq->cq;
670	eqe = queue_tail_node(eq);
671	if (!eqe)
672		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
673
674	phba = pbe_eq->phba;
675	num_eq_processed = 0;
676	if (blk_iopoll_enabled) {
677		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
678					& EQE_VALID_MASK) {
679			if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
680				blk_iopoll_sched(&pbe_eq->iopoll);
681
682			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
683			queue_tail_inc(eq);
684			eqe = queue_tail_node(eq);
685			num_eq_processed++;
686		}
687		if (num_eq_processed)
688			hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 0, 1);
689
690		return IRQ_HANDLED;
691	} else {
692		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
693						& EQE_VALID_MASK) {
694			spin_lock_irqsave(&phba->isr_lock, flags);
695			phba->todo_cq = 1;
696			spin_unlock_irqrestore(&phba->isr_lock, flags);
697			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
698			queue_tail_inc(eq);
699			eqe = queue_tail_node(eq);
700			num_eq_processed++;
701		}
702		if (phba->todo_cq)
703			queue_work(phba->wq, &phba->work_cqs);
704
705		if (num_eq_processed)
706			hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
707
708		return IRQ_HANDLED;
709	}
710}
711
712/**
713 * be_isr - The isr routine of the driver.
714 * @irq: Not used
715 * @dev_id: Pointer to host adapter structure
716 */
717static irqreturn_t be_isr(int irq, void *dev_id)
718{
719	struct beiscsi_hba *phba;
720	struct hwi_controller *phwi_ctrlr;
721	struct hwi_context_memory *phwi_context;
722	struct be_eq_entry *eqe = NULL;
723	struct be_queue_info *eq;
724	struct be_queue_info *cq;
725	struct be_queue_info *mcc;
726	unsigned long flags, index;
727	unsigned int num_mcceq_processed, num_ioeq_processed;
728	struct be_ctrl_info *ctrl;
729	struct be_eq_obj *pbe_eq;
730	int isr;
731
732	phba = dev_id;
733	ctrl = &phba->ctrl;
734	isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
735		       (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
736	if (!isr)
737		return IRQ_NONE;
738
739	phwi_ctrlr = phba->phwi_ctrlr;
740	phwi_context = phwi_ctrlr->phwi_ctxt;
741	pbe_eq = &phwi_context->be_eq[0];
742
743	eq = &phwi_context->be_eq[0].q;
744	mcc = &phba->ctrl.mcc_obj.cq;
745	index = 0;
746	eqe = queue_tail_node(eq);
747	if (!eqe)
748		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
749
750	num_ioeq_processed = 0;
751	num_mcceq_processed = 0;
752	if (blk_iopoll_enabled) {
753		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
754					& EQE_VALID_MASK) {
755			if (((eqe->dw[offsetof(struct amap_eq_entry,
756			     resource_id) / 32] &
757			     EQE_RESID_MASK) >> 16) == mcc->id) {
758				spin_lock_irqsave(&phba->isr_lock, flags);
759				phba->todo_mcc_cq = 1;
760				spin_unlock_irqrestore(&phba->isr_lock, flags);
761				num_mcceq_processed++;
762			} else {
763				if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
764					blk_iopoll_sched(&pbe_eq->iopoll);
765				num_ioeq_processed++;
766			}
767			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
768			queue_tail_inc(eq);
769			eqe = queue_tail_node(eq);
770		}
771		if (num_ioeq_processed || num_mcceq_processed) {
772			if (phba->todo_mcc_cq)
773				queue_work(phba->wq, &phba->work_cqs);
774
775			if ((num_mcceq_processed) && (!num_ioeq_processed))
776				hwi_ring_eq_db(phba, eq->id, 0,
777					      (num_ioeq_processed +
778					       num_mcceq_processed) , 1, 1);
779			else
780				hwi_ring_eq_db(phba, eq->id, 0,
781					       (num_ioeq_processed +
782						num_mcceq_processed), 0, 1);
783
784			return IRQ_HANDLED;
785		} else
786			return IRQ_NONE;
787	} else {
788		cq = &phwi_context->be_cq[0];
789		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
790						& EQE_VALID_MASK) {
791
792			if (((eqe->dw[offsetof(struct amap_eq_entry,
793			     resource_id) / 32] &
794			     EQE_RESID_MASK) >> 16) != cq->id) {
795				spin_lock_irqsave(&phba->isr_lock, flags);
796				phba->todo_mcc_cq = 1;
797				spin_unlock_irqrestore(&phba->isr_lock, flags);
798			} else {
799				spin_lock_irqsave(&phba->isr_lock, flags);
800				phba->todo_cq = 1;
801				spin_unlock_irqrestore(&phba->isr_lock, flags);
802			}
803			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
804			queue_tail_inc(eq);
805			eqe = queue_tail_node(eq);
806			num_ioeq_processed++;
807		}
808		if (phba->todo_cq || phba->todo_mcc_cq)
809			queue_work(phba->wq, &phba->work_cqs);
810
811		if (num_ioeq_processed) {
812			hwi_ring_eq_db(phba, eq->id, 0,
813				       num_ioeq_processed, 1, 1);
814			return IRQ_HANDLED;
815		} else
816			return IRQ_NONE;
817	}
818}
819
820static int beiscsi_init_irqs(struct beiscsi_hba *phba)
821{
822	struct pci_dev *pcidev = phba->pcidev;
823	struct hwi_controller *phwi_ctrlr;
824	struct hwi_context_memory *phwi_context;
825	int ret, msix_vec, i, j;
826
827	phwi_ctrlr = phba->phwi_ctrlr;
828	phwi_context = phwi_ctrlr->phwi_ctxt;
829
830	if (phba->msix_enabled) {
831		for (i = 0; i < phba->num_cpus; i++) {
832			phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
833						    GFP_KERNEL);
834			if (!phba->msi_name[i]) {
835				ret = -ENOMEM;
836				goto free_msix_irqs;
837			}
838
839			sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
840				phba->shost->host_no, i);
841			msix_vec = phba->msix_entries[i].vector;
842			ret = request_irq(msix_vec, be_isr_msix, 0,
843					  phba->msi_name[i],
844					  &phwi_context->be_eq[i]);
845			if (ret) {
846				shost_printk(KERN_ERR, phba->shost,
847					     "beiscsi_init_irqs-Failed to"
848					     "register msix for i = %d\n", i);
849				kfree(phba->msi_name[i]);
850				goto free_msix_irqs;
851			}
852		}
853		phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
854		if (!phba->msi_name[i]) {
855			ret = -ENOMEM;
856			goto free_msix_irqs;
857		}
858		sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
859			phba->shost->host_no);
860		msix_vec = phba->msix_entries[i].vector;
861		ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
862				  &phwi_context->be_eq[i]);
863		if (ret) {
864			shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
865				     "Failed to register beiscsi_msix_mcc\n");
866			kfree(phba->msi_name[i]);
867			goto free_msix_irqs;
868		}
869
870	} else {
871		ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
872				  "beiscsi", phba);
873		if (ret) {
874			shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
875				     "Failed to register irq\\n");
876			return ret;
877		}
878	}
879	return 0;
880free_msix_irqs:
881	for (j = i - 1; j >= 0; j--) {
882		kfree(phba->msi_name[j]);
883		msix_vec = phba->msix_entries[j].vector;
884		free_irq(msix_vec, &phwi_context->be_eq[j]);
885	}
886	return ret;
887}
888
889static void hwi_ring_cq_db(struct beiscsi_hba *phba,
890			   unsigned int id, unsigned int num_processed,
891			   unsigned char rearm, unsigned char event)
892{
893	u32 val = 0;
894	val |= id & DB_CQ_RING_ID_MASK;
895	if (rearm)
896		val |= 1 << DB_CQ_REARM_SHIFT;
897	val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
898	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
899}
900
901static unsigned int
902beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
903			  struct beiscsi_hba *phba,
904			  unsigned short cid,
905			  struct pdu_base *ppdu,
906			  unsigned long pdu_len,
907			  void *pbuffer, unsigned long buf_len)
908{
909	struct iscsi_conn *conn = beiscsi_conn->conn;
910	struct iscsi_session *session = conn->session;
911	struct iscsi_task *task;
912	struct beiscsi_io_task *io_task;
913	struct iscsi_hdr *login_hdr;
914
915	switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
916						PDUBASE_OPCODE_MASK) {
917	case ISCSI_OP_NOOP_IN:
918		pbuffer = NULL;
919		buf_len = 0;
920		break;
921	case ISCSI_OP_ASYNC_EVENT:
922		break;
923	case ISCSI_OP_REJECT:
924		WARN_ON(!pbuffer);
925		WARN_ON(!(buf_len == 48));
926		SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
927		break;
928	case ISCSI_OP_LOGIN_RSP:
929	case ISCSI_OP_TEXT_RSP:
930		task = conn->login_task;
931		io_task = task->dd_data;
932		login_hdr = (struct iscsi_hdr *)ppdu;
933		login_hdr->itt = io_task->libiscsi_itt;
934		break;
935	default:
936		shost_printk(KERN_WARNING, phba->shost,
937			     "Unrecognized opcode 0x%x in async msg\n",
938			     (ppdu->
939			     dw[offsetof(struct amap_pdu_base, opcode) / 32]
940						& PDUBASE_OPCODE_MASK));
941		return 1;
942	}
943
944	spin_lock_bh(&session->lock);
945	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
946	spin_unlock_bh(&session->lock);
947	return 0;
948}
949
950static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
951{
952	struct sgl_handle *psgl_handle;
953
954	if (phba->io_sgl_hndl_avbl) {
955		SE_DEBUG(DBG_LVL_8,
956			 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d\n",
957			 phba->io_sgl_alloc_index);
958		psgl_handle = phba->io_sgl_hndl_base[phba->
959						io_sgl_alloc_index];
960		phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
961		phba->io_sgl_hndl_avbl--;
962		if (phba->io_sgl_alloc_index == (phba->params.
963						 ios_per_ctrl - 1))
964			phba->io_sgl_alloc_index = 0;
965		else
966			phba->io_sgl_alloc_index++;
967	} else
968		psgl_handle = NULL;
969	return psgl_handle;
970}
971
972static void
973free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
974{
975	SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d\n",
976		 phba->io_sgl_free_index);
977	if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
978		/*
979		 * this can happen if clean_task is called on a task that
980		 * failed in xmit_task or alloc_pdu.
981		 */
982		 SE_DEBUG(DBG_LVL_8,
983			 "Double Free in IO SGL io_sgl_free_index=%d,"
984			 "value there=%p\n", phba->io_sgl_free_index,
985			 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
986		return;
987	}
988	phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
989	phba->io_sgl_hndl_avbl++;
990	if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
991		phba->io_sgl_free_index = 0;
992	else
993		phba->io_sgl_free_index++;
994}
995
996/**
997 * alloc_wrb_handle - To allocate a wrb handle
998 * @phba: The hba pointer
999 * @cid: The cid to use for allocation
1000 *
1001 * This happens under session_lock until submission to chip
1002 */
1003struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
1004{
1005	struct hwi_wrb_context *pwrb_context;
1006	struct hwi_controller *phwi_ctrlr;
1007	struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
1008
1009	phwi_ctrlr = phba->phwi_ctrlr;
1010	pwrb_context = &phwi_ctrlr->wrb_context[cid];
1011	if (pwrb_context->wrb_handles_available >= 2) {
1012		pwrb_handle = pwrb_context->pwrb_handle_base[
1013					    pwrb_context->alloc_index];
1014		pwrb_context->wrb_handles_available--;
1015		if (pwrb_context->alloc_index ==
1016						(phba->params.wrbs_per_cxn - 1))
1017			pwrb_context->alloc_index = 0;
1018		else
1019			pwrb_context->alloc_index++;
1020		pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
1021						pwrb_context->alloc_index];
1022		pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
1023	} else
1024		pwrb_handle = NULL;
1025	return pwrb_handle;
1026}
1027
1028/**
1029 * free_wrb_handle - To free the wrb handle back to pool
1030 * @phba: The hba pointer
1031 * @pwrb_context: The context to free from
1032 * @pwrb_handle: The wrb_handle to free
1033 *
1034 * This happens under session_lock until submission to chip
1035 */
1036static void
1037free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1038		struct wrb_handle *pwrb_handle)
1039{
1040	pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
1041	pwrb_context->wrb_handles_available++;
1042	if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
1043		pwrb_context->free_index = 0;
1044	else
1045		pwrb_context->free_index++;
1046
1047	SE_DEBUG(DBG_LVL_8,
1048		 "FREE WRB: pwrb_handle=%p free_index=0x%x"
1049		 "wrb_handles_available=%d\n",
1050		 pwrb_handle, pwrb_context->free_index,
1051		 pwrb_context->wrb_handles_available);
1052}
1053
1054static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1055{
1056	struct sgl_handle *psgl_handle;
1057
1058	if (phba->eh_sgl_hndl_avbl) {
1059		psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1060		phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
1061		SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x\n",
1062			 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
1063		phba->eh_sgl_hndl_avbl--;
1064		if (phba->eh_sgl_alloc_index ==
1065		    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1066		     1))
1067			phba->eh_sgl_alloc_index = 0;
1068		else
1069			phba->eh_sgl_alloc_index++;
1070	} else
1071		psgl_handle = NULL;
1072	return psgl_handle;
1073}
1074
1075void
1076free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1077{
1078
1079	SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d\n",
1080			     phba->eh_sgl_free_index);
1081	if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1082		/*
1083		 * this can happen if clean_task is called on a task that
1084		 * failed in xmit_task or alloc_pdu.
1085		 */
1086		SE_DEBUG(DBG_LVL_8,
1087			 "Double Free in eh SGL ,eh_sgl_free_index=%d\n",
1088			 phba->eh_sgl_free_index);
1089		return;
1090	}
1091	phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1092	phba->eh_sgl_hndl_avbl++;
1093	if (phba->eh_sgl_free_index ==
1094	    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1095		phba->eh_sgl_free_index = 0;
1096	else
1097		phba->eh_sgl_free_index++;
1098}
1099
1100static void
1101be_complete_io(struct beiscsi_conn *beiscsi_conn,
1102	       struct iscsi_task *task, struct sol_cqe *psol)
1103{
1104	struct beiscsi_io_task *io_task = task->dd_data;
1105	struct be_status_bhs *sts_bhs =
1106				(struct be_status_bhs *)io_task->cmd_bhs;
1107	struct iscsi_conn *conn = beiscsi_conn->conn;
1108	unsigned int sense_len;
1109	unsigned char *sense;
1110	u32 resid = 0, exp_cmdsn, max_cmdsn;
1111	u8 rsp, status, flags;
1112
1113	exp_cmdsn = (psol->
1114			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1115			& SOL_EXP_CMD_SN_MASK);
1116	max_cmdsn = ((psol->
1117			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1118			& SOL_EXP_CMD_SN_MASK) +
1119			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1120				/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1121	rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
1122						& SOL_RESP_MASK) >> 16);
1123	status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
1124						& SOL_STS_MASK) >> 8);
1125	flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1126					& SOL_FLAGS_MASK) >> 24) | 0x80;
1127	if (!task->sc) {
1128		if (io_task->scsi_cmnd)
1129			scsi_dma_unmap(io_task->scsi_cmnd);
1130
1131		return;
1132	}
1133	task->sc->result = (DID_OK << 16) | status;
1134	if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1135		task->sc->result = DID_ERROR << 16;
1136		goto unmap;
1137	}
1138
1139	/* bidi not initially supported */
1140	if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1141		resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
1142				32] & SOL_RES_CNT_MASK);
1143
1144		if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1145			task->sc->result = DID_ERROR << 16;
1146
1147		if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1148			scsi_set_resid(task->sc, resid);
1149			if (!status && (scsi_bufflen(task->sc) - resid <
1150			    task->sc->underflow))
1151				task->sc->result = DID_ERROR << 16;
1152		}
1153	}
1154
1155	if (status == SAM_STAT_CHECK_CONDITION) {
1156		unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
1157		sense = sts_bhs->sense_info + sizeof(unsigned short);
1158		sense_len =  cpu_to_be16(*slen);
1159		memcpy(task->sc->sense_buffer, sense,
1160		       min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1161	}
1162
1163	if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
1164		if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1165							& SOL_RES_CNT_MASK)
1166			 conn->rxdata_octets += (psol->
1167			     dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
1168			     & SOL_RES_CNT_MASK);
1169	}
1170unmap:
1171	scsi_dma_unmap(io_task->scsi_cmnd);
1172	iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1173}
1174
1175static void
1176be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1177		   struct iscsi_task *task, struct sol_cqe *psol)
1178{
1179	struct iscsi_logout_rsp *hdr;
1180	struct beiscsi_io_task *io_task = task->dd_data;
1181	struct iscsi_conn *conn = beiscsi_conn->conn;
1182
1183	hdr = (struct iscsi_logout_rsp *)task->hdr;
1184	hdr->opcode = ISCSI_OP_LOGOUT_RSP;
1185	hdr->t2wait = 5;
1186	hdr->t2retain = 0;
1187	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1188					& SOL_FLAGS_MASK) >> 24) | 0x80;
1189	hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1190					32] & SOL_RESP_MASK);
1191	hdr->exp_cmdsn = cpu_to_be32(psol->
1192			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1193					& SOL_EXP_CMD_SN_MASK);
1194	hdr->max_cmdsn = be32_to_cpu((psol->
1195			 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
1196					& SOL_EXP_CMD_SN_MASK) +
1197			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1198					/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1199	hdr->dlength[0] = 0;
1200	hdr->dlength[1] = 0;
1201	hdr->dlength[2] = 0;
1202	hdr->hlength = 0;
1203	hdr->itt = io_task->libiscsi_itt;
1204	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1205}
1206
1207static void
1208be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1209		struct iscsi_task *task, struct sol_cqe *psol)
1210{
1211	struct iscsi_tm_rsp *hdr;
1212	struct iscsi_conn *conn = beiscsi_conn->conn;
1213	struct beiscsi_io_task *io_task = task->dd_data;
1214
1215	hdr = (struct iscsi_tm_rsp *)task->hdr;
1216	hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
1217	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1218					& SOL_FLAGS_MASK) >> 24) | 0x80;
1219	hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
1220					32] & SOL_RESP_MASK);
1221	hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1222				    i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1223	hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1224			i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1225			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1226			/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1227	hdr->itt = io_task->libiscsi_itt;
1228	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1229}
1230
1231static void
1232hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1233		       struct beiscsi_hba *phba, struct sol_cqe *psol)
1234{
1235	struct hwi_wrb_context *pwrb_context;
1236	struct wrb_handle *pwrb_handle = NULL;
1237	struct hwi_controller *phwi_ctrlr;
1238	struct iscsi_task *task;
1239	struct beiscsi_io_task *io_task;
1240	struct iscsi_conn *conn = beiscsi_conn->conn;
1241	struct iscsi_session *session = conn->session;
1242
1243	phwi_ctrlr = phba->phwi_ctrlr;
1244	pwrb_context = &phwi_ctrlr->wrb_context[((psol->
1245				dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1246				SOL_CID_MASK) >> 6) -
1247				phba->fw_config.iscsi_cid_start];
1248	pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1249				dw[offsetof(struct amap_sol_cqe, wrb_index) /
1250				32] & SOL_WRB_INDEX_MASK) >> 16)];
1251	task = pwrb_handle->pio_handle;
1252
1253	io_task = task->dd_data;
1254	spin_lock(&phba->mgmt_sgl_lock);
1255	free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1256	spin_unlock(&phba->mgmt_sgl_lock);
1257	spin_lock_bh(&session->lock);
1258	free_wrb_handle(phba, pwrb_context, pwrb_handle);
1259	spin_unlock_bh(&session->lock);
1260}
1261
1262static void
1263be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1264		       struct iscsi_task *task, struct sol_cqe *psol)
1265{
1266	struct iscsi_nopin *hdr;
1267	struct iscsi_conn *conn = beiscsi_conn->conn;
1268	struct beiscsi_io_task *io_task = task->dd_data;
1269
1270	hdr = (struct iscsi_nopin *)task->hdr;
1271	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1272			& SOL_FLAGS_MASK) >> 24) | 0x80;
1273	hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1274				     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1275	hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1276			i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1277			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1278			/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1279	hdr->opcode = ISCSI_OP_NOOP_IN;
1280	hdr->itt = io_task->libiscsi_itt;
1281	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1282}
1283
1284static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1285			     struct beiscsi_hba *phba, struct sol_cqe *psol)
1286{
1287	struct hwi_wrb_context *pwrb_context;
1288	struct wrb_handle *pwrb_handle;
1289	struct iscsi_wrb *pwrb = NULL;
1290	struct hwi_controller *phwi_ctrlr;
1291	struct iscsi_task *task;
1292	unsigned int type;
1293	struct iscsi_conn *conn = beiscsi_conn->conn;
1294	struct iscsi_session *session = conn->session;
1295
1296	phwi_ctrlr = phba->phwi_ctrlr;
1297	pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
1298				(struct amap_sol_cqe, cid) / 32]
1299				& SOL_CID_MASK) >> 6) -
1300				phba->fw_config.iscsi_cid_start];
1301	pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1302				dw[offsetof(struct amap_sol_cqe, wrb_index) /
1303				32] & SOL_WRB_INDEX_MASK) >> 16)];
1304	task = pwrb_handle->pio_handle;
1305	pwrb = pwrb_handle->pwrb;
1306	type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1307				 WRB_TYPE_MASK) >> 28;
1308
1309	spin_lock_bh(&session->lock);
1310	switch (type) {
1311	case HWH_TYPE_IO:
1312	case HWH_TYPE_IO_RD:
1313		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1314		     ISCSI_OP_NOOP_OUT)
1315			be_complete_nopin_resp(beiscsi_conn, task, psol);
1316		else
1317			be_complete_io(beiscsi_conn, task, psol);
1318		break;
1319
1320	case HWH_TYPE_LOGOUT:
1321		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1322			be_complete_logout(beiscsi_conn, task, psol);
1323		else
1324			be_complete_tmf(beiscsi_conn, task, psol);
1325
1326		break;
1327
1328	case HWH_TYPE_LOGIN:
1329		SE_DEBUG(DBG_LVL_1,
1330			 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
1331			 "- Solicited path\n");
1332		break;
1333
1334	case HWH_TYPE_NOP:
1335		be_complete_nopin_resp(beiscsi_conn, task, psol);
1336		break;
1337
1338	default:
1339		shost_printk(KERN_WARNING, phba->shost,
1340				"In hwi_complete_cmd, unknown type = %d"
1341				"wrb_index 0x%x CID 0x%x\n", type,
1342				((psol->dw[offsetof(struct amap_iscsi_wrb,
1343				type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1344				((psol->dw[offsetof(struct amap_sol_cqe,
1345				cid) / 32] & SOL_CID_MASK) >> 6));
1346		break;
1347	}
1348
1349	spin_unlock_bh(&session->lock);
1350}
1351
1352static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1353					  *pasync_ctx, unsigned int is_header,
1354					  unsigned int host_write_ptr)
1355{
1356	if (is_header)
1357		return &pasync_ctx->async_entry[host_write_ptr].
1358		    header_busy_list;
1359	else
1360		return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1361}
1362
1363static struct async_pdu_handle *
1364hwi_get_async_handle(struct beiscsi_hba *phba,
1365		     struct beiscsi_conn *beiscsi_conn,
1366		     struct hwi_async_pdu_context *pasync_ctx,
1367		     struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1368{
1369	struct be_bus_address phys_addr;
1370	struct list_head *pbusy_list;
1371	struct async_pdu_handle *pasync_handle = NULL;
1372	int buffer_len = 0;
1373	unsigned char buffer_index = -1;
1374	unsigned char is_header = 0;
1375
1376	phys_addr.u.a32.address_lo =
1377	    pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1378	    ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1379						& PDUCQE_DPL_MASK) >> 16);
1380	phys_addr.u.a32.address_hi =
1381	    pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1382
1383	phys_addr.u.a64.address =
1384			*((unsigned long long *)(&phys_addr.u.a64.address));
1385
1386	switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1387			& PDUCQE_CODE_MASK) {
1388	case UNSOL_HDR_NOTIFY:
1389		is_header = 1;
1390
1391		pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1392			(pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1393			index) / 32] & PDUCQE_INDEX_MASK));
1394
1395		buffer_len = (unsigned int)(phys_addr.u.a64.address -
1396				pasync_ctx->async_header.pa_base.u.a64.address);
1397
1398		buffer_index = buffer_len /
1399				pasync_ctx->async_header.buffer_size;
1400
1401		break;
1402	case UNSOL_DATA_NOTIFY:
1403		pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1404					dw[offsetof(struct amap_i_t_dpdu_cqe,
1405					index) / 32] & PDUCQE_INDEX_MASK));
1406		buffer_len = (unsigned long)(phys_addr.u.a64.address -
1407					pasync_ctx->async_data.pa_base.u.
1408					a64.address);
1409		buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1410		break;
1411	default:
1412		pbusy_list = NULL;
1413		shost_printk(KERN_WARNING, phba->shost,
1414			"Unexpected code=%d\n",
1415			 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1416					code) / 32] & PDUCQE_CODE_MASK);
1417		return NULL;
1418	}
1419
1420	WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1421	WARN_ON(list_empty(pbusy_list));
1422	list_for_each_entry(pasync_handle, pbusy_list, link) {
1423		WARN_ON(pasync_handle->consumed);
1424		if (pasync_handle->index == buffer_index)
1425			break;
1426	}
1427
1428	WARN_ON(!pasync_handle);
1429
1430	pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1431					     phba->fw_config.iscsi_cid_start;
1432	pasync_handle->is_header = is_header;
1433	pasync_handle->buffer_len = ((pdpdu_cqe->
1434			dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1435			& PDUCQE_DPL_MASK) >> 16);
1436
1437	*pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1438			index) / 32] & PDUCQE_INDEX_MASK);
1439	return pasync_handle;
1440}
1441
1442static unsigned int
1443hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1444			   unsigned int is_header, unsigned int cq_index)
1445{
1446	struct list_head *pbusy_list;
1447	struct async_pdu_handle *pasync_handle;
1448	unsigned int num_entries, writables = 0;
1449	unsigned int *pep_read_ptr, *pwritables;
1450
1451
1452	if (is_header) {
1453		pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1454		pwritables = &pasync_ctx->async_header.writables;
1455		num_entries = pasync_ctx->async_header.num_entries;
1456	} else {
1457		pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1458		pwritables = &pasync_ctx->async_data.writables;
1459		num_entries = pasync_ctx->async_data.num_entries;
1460	}
1461
1462	while ((*pep_read_ptr) != cq_index) {
1463		(*pep_read_ptr)++;
1464		*pep_read_ptr = (*pep_read_ptr) % num_entries;
1465
1466		pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1467						     *pep_read_ptr);
1468		if (writables == 0)
1469			WARN_ON(list_empty(pbusy_list));
1470
1471		if (!list_empty(pbusy_list)) {
1472			pasync_handle = list_entry(pbusy_list->next,
1473						   struct async_pdu_handle,
1474						   link);
1475			WARN_ON(!pasync_handle);
1476			pasync_handle->consumed = 1;
1477		}
1478
1479		writables++;
1480	}
1481
1482	if (!writables) {
1483		SE_DEBUG(DBG_LVL_1,
1484			 "Duplicate notification received - index 0x%x!!\n",
1485			 cq_index);
1486		WARN_ON(1);
1487	}
1488
1489	*pwritables = *pwritables + writables;
1490	return 0;
1491}
1492
1493static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1494				       unsigned int cri)
1495{
1496	struct hwi_controller *phwi_ctrlr;
1497	struct hwi_async_pdu_context *pasync_ctx;
1498	struct async_pdu_handle *pasync_handle, *tmp_handle;
1499	struct list_head *plist;
1500	unsigned int i = 0;
1501
1502	phwi_ctrlr = phba->phwi_ctrlr;
1503	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1504
1505	plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1506
1507	list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1508		list_del(&pasync_handle->link);
1509
1510		if (i == 0) {
1511			list_add_tail(&pasync_handle->link,
1512				      &pasync_ctx->async_header.free_list);
1513			pasync_ctx->async_header.free_entries++;
1514			i++;
1515		} else {
1516			list_add_tail(&pasync_handle->link,
1517				      &pasync_ctx->async_data.free_list);
1518			pasync_ctx->async_data.free_entries++;
1519			i++;
1520		}
1521	}
1522
1523	INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1524	pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1525	pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1526	return 0;
1527}
1528
1529static struct phys_addr *
1530hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1531		     unsigned int is_header, unsigned int host_write_ptr)
1532{
1533	struct phys_addr *pasync_sge = NULL;
1534
1535	if (is_header)
1536		pasync_sge = pasync_ctx->async_header.ring_base;
1537	else
1538		pasync_sge = pasync_ctx->async_data.ring_base;
1539
1540	return pasync_sge + host_write_ptr;
1541}
1542
1543static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1544				   unsigned int is_header)
1545{
1546	struct hwi_controller *phwi_ctrlr;
1547	struct hwi_async_pdu_context *pasync_ctx;
1548	struct async_pdu_handle *pasync_handle;
1549	struct list_head *pfree_link, *pbusy_list;
1550	struct phys_addr *pasync_sge;
1551	unsigned int ring_id, num_entries;
1552	unsigned int host_write_num;
1553	unsigned int writables;
1554	unsigned int i = 0;
1555	u32 doorbell = 0;
1556
1557	phwi_ctrlr = phba->phwi_ctrlr;
1558	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1559
1560	if (is_header) {
1561		num_entries = pasync_ctx->async_header.num_entries;
1562		writables = min(pasync_ctx->async_header.writables,
1563				pasync_ctx->async_header.free_entries);
1564		pfree_link = pasync_ctx->async_header.free_list.next;
1565		host_write_num = pasync_ctx->async_header.host_write_ptr;
1566		ring_id = phwi_ctrlr->default_pdu_hdr.id;
1567	} else {
1568		num_entries = pasync_ctx->async_data.num_entries;
1569		writables = min(pasync_ctx->async_data.writables,
1570				pasync_ctx->async_data.free_entries);
1571		pfree_link = pasync_ctx->async_data.free_list.next;
1572		host_write_num = pasync_ctx->async_data.host_write_ptr;
1573		ring_id = phwi_ctrlr->default_pdu_data.id;
1574	}
1575
1576	writables = (writables / 8) * 8;
1577	if (writables) {
1578		for (i = 0; i < writables; i++) {
1579			pbusy_list =
1580			    hwi_get_async_busy_list(pasync_ctx, is_header,
1581						    host_write_num);
1582			pasync_handle =
1583			    list_entry(pfree_link, struct async_pdu_handle,
1584								link);
1585			WARN_ON(!pasync_handle);
1586			pasync_handle->consumed = 0;
1587
1588			pfree_link = pfree_link->next;
1589
1590			pasync_sge = hwi_get_ring_address(pasync_ctx,
1591						is_header, host_write_num);
1592
1593			pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1594			pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1595
1596			list_move(&pasync_handle->link, pbusy_list);
1597
1598			host_write_num++;
1599			host_write_num = host_write_num % num_entries;
1600		}
1601
1602		if (is_header) {
1603			pasync_ctx->async_header.host_write_ptr =
1604							host_write_num;
1605			pasync_ctx->async_header.free_entries -= writables;
1606			pasync_ctx->async_header.writables -= writables;
1607			pasync_ctx->async_header.busy_entries += writables;
1608		} else {
1609			pasync_ctx->async_data.host_write_ptr = host_write_num;
1610			pasync_ctx->async_data.free_entries -= writables;
1611			pasync_ctx->async_data.writables -= writables;
1612			pasync_ctx->async_data.busy_entries += writables;
1613		}
1614
1615		doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1616		doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1617		doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1618		doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1619					<< DB_DEF_PDU_CQPROC_SHIFT;
1620
1621		iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1622	}
1623}
1624
1625static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1626					 struct beiscsi_conn *beiscsi_conn,
1627					 struct i_t_dpdu_cqe *pdpdu_cqe)
1628{
1629	struct hwi_controller *phwi_ctrlr;
1630	struct hwi_async_pdu_context *pasync_ctx;
1631	struct async_pdu_handle *pasync_handle = NULL;
1632	unsigned int cq_index = -1;
1633
1634	phwi_ctrlr = phba->phwi_ctrlr;
1635	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1636
1637	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1638					     pdpdu_cqe, &cq_index);
1639	BUG_ON(pasync_handle->is_header != 0);
1640	if (pasync_handle->consumed == 0)
1641		hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1642					   cq_index);
1643
1644	hwi_free_async_msg(phba, pasync_handle->cri);
1645	hwi_post_async_buffers(phba, pasync_handle->is_header);
1646}
1647
1648static unsigned int
1649hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1650		  struct beiscsi_hba *phba,
1651		  struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1652{
1653	struct list_head *plist;
1654	struct async_pdu_handle *pasync_handle;
1655	void *phdr = NULL;
1656	unsigned int hdr_len = 0, buf_len = 0;
1657	unsigned int status, index = 0, offset = 0;
1658	void *pfirst_buffer = NULL;
1659	unsigned int num_buf = 0;
1660
1661	plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1662
1663	list_for_each_entry(pasync_handle, plist, link) {
1664		if (index == 0) {
1665			phdr = pasync_handle->pbuffer;
1666			hdr_len = pasync_handle->buffer_len;
1667		} else {
1668			buf_len = pasync_handle->buffer_len;
1669			if (!num_buf) {
1670				pfirst_buffer = pasync_handle->pbuffer;
1671				num_buf++;
1672			}
1673			memcpy(pfirst_buffer + offset,
1674			       pasync_handle->pbuffer, buf_len);
1675			offset = buf_len;
1676		}
1677		index++;
1678	}
1679
1680	status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1681					   (beiscsi_conn->beiscsi_conn_cid -
1682					    phba->fw_config.iscsi_cid_start),
1683					    phdr, hdr_len, pfirst_buffer,
1684					    buf_len);
1685
1686	if (status == 0)
1687		hwi_free_async_msg(phba, cri);
1688	return 0;
1689}
1690
1691static unsigned int
1692hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1693		     struct beiscsi_hba *phba,
1694		     struct async_pdu_handle *pasync_handle)
1695{
1696	struct hwi_async_pdu_context *pasync_ctx;
1697	struct hwi_controller *phwi_ctrlr;
1698	unsigned int bytes_needed = 0, status = 0;
1699	unsigned short cri = pasync_handle->cri;
1700	struct pdu_base *ppdu;
1701
1702	phwi_ctrlr = phba->phwi_ctrlr;
1703	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1704
1705	list_del(&pasync_handle->link);
1706	if (pasync_handle->is_header) {
1707		pasync_ctx->async_header.busy_entries--;
1708		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1709			hwi_free_async_msg(phba, cri);
1710			BUG();
1711		}
1712
1713		pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1714		pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1715		pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1716				(unsigned short)pasync_handle->buffer_len;
1717		list_add_tail(&pasync_handle->link,
1718			      &pasync_ctx->async_entry[cri].wait_queue.list);
1719
1720		ppdu = pasync_handle->pbuffer;
1721		bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1722			data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1723			0xFFFF0000) | ((be16_to_cpu((ppdu->
1724			dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1725			& PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1726
1727		if (status == 0) {
1728			pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1729			    bytes_needed;
1730
1731			if (bytes_needed == 0)
1732				status = hwi_fwd_async_msg(beiscsi_conn, phba,
1733							   pasync_ctx, cri);
1734		}
1735	} else {
1736		pasync_ctx->async_data.busy_entries--;
1737		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1738			list_add_tail(&pasync_handle->link,
1739				      &pasync_ctx->async_entry[cri].wait_queue.
1740				      list);
1741			pasync_ctx->async_entry[cri].wait_queue.
1742				bytes_received +=
1743				(unsigned short)pasync_handle->buffer_len;
1744
1745			if (pasync_ctx->async_entry[cri].wait_queue.
1746			    bytes_received >=
1747			    pasync_ctx->async_entry[cri].wait_queue.
1748			    bytes_needed)
1749				status = hwi_fwd_async_msg(beiscsi_conn, phba,
1750							   pasync_ctx, cri);
1751		}
1752	}
1753	return status;
1754}
1755
1756static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1757					 struct beiscsi_hba *phba,
1758					 struct i_t_dpdu_cqe *pdpdu_cqe)
1759{
1760	struct hwi_controller *phwi_ctrlr;
1761	struct hwi_async_pdu_context *pasync_ctx;
1762	struct async_pdu_handle *pasync_handle = NULL;
1763	unsigned int cq_index = -1;
1764
1765	phwi_ctrlr = phba->phwi_ctrlr;
1766	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1767	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1768					     pdpdu_cqe, &cq_index);
1769
1770	if (pasync_handle->consumed == 0)
1771		hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1772					   cq_index);
1773	hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1774	hwi_post_async_buffers(phba, pasync_handle->is_header);
1775}
1776
1777static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1778{
1779	struct be_queue_info *mcc_cq;
1780	struct  be_mcc_compl *mcc_compl;
1781	unsigned int num_processed = 0;
1782
1783	mcc_cq = &phba->ctrl.mcc_obj.cq;
1784	mcc_compl = queue_tail_node(mcc_cq);
1785	mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1786	while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1787
1788		if (num_processed >= 32) {
1789			hwi_ring_cq_db(phba, mcc_cq->id,
1790					num_processed, 0, 0);
1791			num_processed = 0;
1792		}
1793		if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1794			/* Interpret flags as an async trailer */
1795			if (is_link_state_evt(mcc_compl->flags))
1796				/* Interpret compl as a async link evt */
1797				beiscsi_async_link_state_process(phba,
1798				(struct be_async_event_link_state *) mcc_compl);
1799			else
1800				SE_DEBUG(DBG_LVL_1,
1801					" Unsupported Async Event, flags"
1802					" = 0x%08x\n", mcc_compl->flags);
1803		} else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1804			be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1805			atomic_dec(&phba->ctrl.mcc_obj.q.used);
1806		}
1807
1808		mcc_compl->flags = 0;
1809		queue_tail_inc(mcc_cq);
1810		mcc_compl = queue_tail_node(mcc_cq);
1811		mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1812		num_processed++;
1813	}
1814
1815	if (num_processed > 0)
1816		hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1817
1818}
1819
1820static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1821{
1822	struct be_queue_info *cq;
1823	struct sol_cqe *sol;
1824	struct dmsg_cqe *dmsg;
1825	unsigned int num_processed = 0;
1826	unsigned int tot_nump = 0;
1827	struct beiscsi_conn *beiscsi_conn;
1828	struct beiscsi_endpoint *beiscsi_ep;
1829	struct iscsi_endpoint *ep;
1830	struct beiscsi_hba *phba;
1831
1832	cq = pbe_eq->cq;
1833	sol = queue_tail_node(cq);
1834	phba = pbe_eq->phba;
1835
1836	while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1837	       CQE_VALID_MASK) {
1838		be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1839
1840		ep = phba->ep_array[(u32) ((sol->
1841				   dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1842				   SOL_CID_MASK) >> 6) -
1843				   phba->fw_config.iscsi_cid_start];
1844
1845		beiscsi_ep = ep->dd_data;
1846		beiscsi_conn = beiscsi_ep->conn;
1847
1848		if (num_processed >= 32) {
1849			hwi_ring_cq_db(phba, cq->id,
1850					num_processed, 0, 0);
1851			tot_nump += num_processed;
1852			num_processed = 0;
1853		}
1854
1855		switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1856			32] & CQE_CODE_MASK) {
1857		case SOL_CMD_COMPLETE:
1858			hwi_complete_cmd(beiscsi_conn, phba, sol);
1859			break;
1860		case DRIVERMSG_NOTIFY:
1861			SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY\n");
1862			dmsg = (struct dmsg_cqe *)sol;
1863			hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1864			break;
1865		case UNSOL_HDR_NOTIFY:
1866			SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1867			hwi_process_default_pdu_ring(beiscsi_conn, phba,
1868					     (struct i_t_dpdu_cqe *)sol);
1869			break;
1870		case UNSOL_DATA_NOTIFY:
1871			SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1872			hwi_process_default_pdu_ring(beiscsi_conn, phba,
1873					     (struct i_t_dpdu_cqe *)sol);
1874			break;
1875		case CXN_INVALIDATE_INDEX_NOTIFY:
1876		case CMD_INVALIDATED_NOTIFY:
1877		case CXN_INVALIDATE_NOTIFY:
1878			SE_DEBUG(DBG_LVL_1,
1879				 "Ignoring CQ Error notification for cmd/cxn"
1880				 "invalidate\n");
1881			break;
1882		case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1883		case CMD_KILLED_INVALID_STATSN_RCVD:
1884		case CMD_KILLED_INVALID_R2T_RCVD:
1885		case CMD_CXN_KILLED_LUN_INVALID:
1886		case CMD_CXN_KILLED_ICD_INVALID:
1887		case CMD_CXN_KILLED_ITT_INVALID:
1888		case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1889		case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1890			SE_DEBUG(DBG_LVL_1,
1891				 "CQ Error notification for cmd.. "
1892				 "code %d cid 0x%x\n",
1893				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1894				 32] & CQE_CODE_MASK,
1895				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1896				 32] & SOL_CID_MASK));
1897			break;
1898		case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1899			SE_DEBUG(DBG_LVL_1,
1900				 "Digest error on def pdu ring, dropping..\n");
1901			hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1902					     (struct i_t_dpdu_cqe *) sol);
1903			break;
1904		case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1905		case CXN_KILLED_BURST_LEN_MISMATCH:
1906		case CXN_KILLED_AHS_RCVD:
1907		case CXN_KILLED_HDR_DIGEST_ERR:
1908		case CXN_KILLED_UNKNOWN_HDR:
1909		case CXN_KILLED_STALE_ITT_TTT_RCVD:
1910		case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1911		case CXN_KILLED_TIMED_OUT:
1912		case CXN_KILLED_FIN_RCVD:
1913		case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1914		case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1915		case CXN_KILLED_OVER_RUN_RESIDUAL:
1916		case CXN_KILLED_UNDER_RUN_RESIDUAL:
1917		case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1918			SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1919				 "0x%x...\n",
1920				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1921				 32] & CQE_CODE_MASK,
1922				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1923				 32] & CQE_CID_MASK));
1924			iscsi_conn_failure(beiscsi_conn->conn,
1925					   ISCSI_ERR_CONN_FAILED);
1926			break;
1927		case CXN_KILLED_RST_SENT:
1928		case CXN_KILLED_RST_RCVD:
1929			SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1930				"received/sent on CID 0x%x...\n",
1931				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1932				 32] & CQE_CODE_MASK,
1933				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1934				 32] & CQE_CID_MASK));
1935			iscsi_conn_failure(beiscsi_conn->conn,
1936					   ISCSI_ERR_CONN_FAILED);
1937			break;
1938		default:
1939			SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1940				 "received on CID 0x%x...\n",
1941				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1942				 32] & CQE_CODE_MASK,
1943				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1944				 32] & CQE_CID_MASK));
1945			break;
1946		}
1947
1948		AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1949		queue_tail_inc(cq);
1950		sol = queue_tail_node(cq);
1951		num_processed++;
1952	}
1953
1954	if (num_processed > 0) {
1955		tot_nump += num_processed;
1956		hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1957	}
1958	return tot_nump;
1959}
1960
1961void beiscsi_process_all_cqs(struct work_struct *work)
1962{
1963	unsigned long flags;
1964	struct hwi_controller *phwi_ctrlr;
1965	struct hwi_context_memory *phwi_context;
1966	struct be_eq_obj *pbe_eq;
1967	struct beiscsi_hba *phba =
1968	    container_of(work, struct beiscsi_hba, work_cqs);
1969
1970	phwi_ctrlr = phba->phwi_ctrlr;
1971	phwi_context = phwi_ctrlr->phwi_ctxt;
1972	if (phba->msix_enabled)
1973		pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1974	else
1975		pbe_eq = &phwi_context->be_eq[0];
1976
1977	if (phba->todo_mcc_cq) {
1978		spin_lock_irqsave(&phba->isr_lock, flags);
1979		phba->todo_mcc_cq = 0;
1980		spin_unlock_irqrestore(&phba->isr_lock, flags);
1981		beiscsi_process_mcc_isr(phba);
1982	}
1983
1984	if (phba->todo_cq) {
1985		spin_lock_irqsave(&phba->isr_lock, flags);
1986		phba->todo_cq = 0;
1987		spin_unlock_irqrestore(&phba->isr_lock, flags);
1988		beiscsi_process_cq(pbe_eq);
1989	}
1990}
1991
1992static int be_iopoll(struct blk_iopoll *iop, int budget)
1993{
1994	static unsigned int ret;
1995	struct beiscsi_hba *phba;
1996	struct be_eq_obj *pbe_eq;
1997
1998	pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1999	ret = beiscsi_process_cq(pbe_eq);
2000	if (ret < budget) {
2001		phba = pbe_eq->phba;
2002		blk_iopoll_complete(iop);
2003		SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
2004		hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
2005	}
2006	return ret;
2007}
2008
2009static void
2010hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2011	      unsigned int num_sg, struct beiscsi_io_task *io_task)
2012{
2013	struct iscsi_sge *psgl;
2014	unsigned int sg_len, index;
2015	unsigned int sge_len = 0;
2016	unsigned long long addr;
2017	struct scatterlist *l_sg;
2018	unsigned int offset;
2019
2020	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2021				      io_task->bhs_pa.u.a32.address_lo);
2022	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2023				      io_task->bhs_pa.u.a32.address_hi);
2024
2025	l_sg = sg;
2026	for (index = 0; (index < num_sg) && (index < 2); index++,
2027							 sg = sg_next(sg)) {
2028		if (index == 0) {
2029			sg_len = sg_dma_len(sg);
2030			addr = (u64) sg_dma_address(sg);
2031			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2032						((u32)(addr & 0xFFFFFFFF)));
2033			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2034							((u32)(addr >> 32)));
2035			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2036							sg_len);
2037			sge_len = sg_len;
2038		} else {
2039			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2040							pwrb, sge_len);
2041			sg_len = sg_dma_len(sg);
2042			addr = (u64) sg_dma_address(sg);
2043			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
2044						((u32)(addr & 0xFFFFFFFF)));
2045			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
2046							((u32)(addr >> 32)));
2047			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2048							sg_len);
2049		}
2050	}
2051	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2052	memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2053
2054	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2055
2056	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2057			io_task->bhs_pa.u.a32.address_hi);
2058	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2059			io_task->bhs_pa.u.a32.address_lo);
2060
2061	if (num_sg == 1) {
2062		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2063								1);
2064		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2065								0);
2066	} else if (num_sg == 2) {
2067		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2068								0);
2069		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2070								1);
2071	} else {
2072		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2073								0);
2074		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2075								0);
2076	}
2077	sg = l_sg;
2078	psgl++;
2079	psgl++;
2080	offset = 0;
2081	for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2082		sg_len = sg_dma_len(sg);
2083		addr = (u64) sg_dma_address(sg);
2084		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2085						(addr & 0xFFFFFFFF));
2086		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2087						(addr >> 32));
2088		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2089		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2090		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2091		offset += sg_len;
2092	}
2093	psgl--;
2094	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2095}
2096
2097static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2098{
2099	struct iscsi_sge *psgl;
2100	unsigned long long addr;
2101	struct beiscsi_io_task *io_task = task->dd_data;
2102	struct beiscsi_conn *beiscsi_conn = io_task->conn;
2103	struct beiscsi_hba *phba = beiscsi_conn->phba;
2104
2105	io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2106	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2107				io_task->bhs_pa.u.a32.address_lo);
2108	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2109				io_task->bhs_pa.u.a32.address_hi);
2110
2111	if (task->data) {
2112		if (task->data_count) {
2113			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
2114			addr = (u64) pci_map_single(phba->pcidev,
2115						    task->data,
2116						    task->data_count, 1);
2117		} else {
2118			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2119			addr = 0;
2120		}
2121		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2122						((u32)(addr & 0xFFFFFFFF)));
2123		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2124						((u32)(addr >> 32)));
2125		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2126						task->data_count);
2127
2128		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2129	} else {
2130		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2131		addr = 0;
2132	}
2133
2134	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2135
2136	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2137
2138	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2139		      io_task->bhs_pa.u.a32.address_hi);
2140	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2141		      io_task->bhs_pa.u.a32.address_lo);
2142	if (task->data) {
2143		psgl++;
2144		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2145		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2146		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2147		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2148		AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2149		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2150
2151		psgl++;
2152		if (task->data) {
2153			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2154						((u32)(addr & 0xFFFFFFFF)));
2155			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2156						((u32)(addr >> 32)));
2157		}
2158		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2159	}
2160	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2161}
2162
2163static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2164{
2165	unsigned int num_cq_pages, num_async_pdu_buf_pages;
2166	unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2167	unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2168
2169	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2170				      sizeof(struct sol_cqe));
2171	num_async_pdu_buf_pages =
2172			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2173				       phba->params.defpdu_hdr_sz);
2174	num_async_pdu_buf_sgl_pages =
2175			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2176				       sizeof(struct phys_addr));
2177	num_async_pdu_data_pages =
2178			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2179				       phba->params.defpdu_data_sz);
2180	num_async_pdu_data_sgl_pages =
2181			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
2182				       sizeof(struct phys_addr));
2183
2184	phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2185
2186	phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2187						 BE_ISCSI_PDU_HEADER_SIZE;
2188	phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2189					    sizeof(struct hwi_context_memory);
2190
2191
2192	phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2193	    * (phba->params.wrbs_per_cxn)
2194	    * phba->params.cxns_per_ctrl;
2195	wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
2196				 (phba->params.wrbs_per_cxn);
2197	phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2198				phba->params.cxns_per_ctrl);
2199
2200	phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2201		phba->params.icds_per_ctrl;
2202	phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2203		phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2204
2205	phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
2206		num_async_pdu_buf_pages * PAGE_SIZE;
2207	phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
2208		num_async_pdu_data_pages * PAGE_SIZE;
2209	phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
2210		num_async_pdu_buf_sgl_pages * PAGE_SIZE;
2211	phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
2212		num_async_pdu_data_sgl_pages * PAGE_SIZE;
2213	phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
2214		phba->params.asyncpdus_per_ctrl *
2215		sizeof(struct async_pdu_handle);
2216	phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
2217		phba->params.asyncpdus_per_ctrl *
2218		sizeof(struct async_pdu_handle);
2219	phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
2220		sizeof(struct hwi_async_pdu_context) +
2221		(phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
2222}
2223
2224static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2225{
2226	struct be_mem_descriptor *mem_descr;
2227	dma_addr_t bus_add;
2228	struct mem_array *mem_arr, *mem_arr_orig;
2229	unsigned int i, j, alloc_size, curr_alloc_size;
2230
2231	phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
2232	if (!phba->phwi_ctrlr)
2233		return -ENOMEM;
2234
2235	phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2236				 GFP_KERNEL);
2237	if (!phba->init_mem) {
2238		kfree(phba->phwi_ctrlr);
2239		return -ENOMEM;
2240	}
2241
2242	mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2243			       GFP_KERNEL);
2244	if (!mem_arr_orig) {
2245		kfree(phba->init_mem);
2246		kfree(phba->phwi_ctrlr);
2247		return -ENOMEM;
2248	}
2249
2250	mem_descr = phba->init_mem;
2251	for (i = 0; i < SE_MEM_MAX; i++) {
2252		j = 0;
2253		mem_arr = mem_arr_orig;
2254		alloc_size = phba->mem_req[i];
2255		memset(mem_arr, 0, sizeof(struct mem_array) *
2256		       BEISCSI_MAX_FRAGS_INIT);
2257		curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2258		do {
2259			mem_arr->virtual_address = pci_alloc_consistent(
2260							phba->pcidev,
2261							curr_alloc_size,
2262							&bus_add);
2263			if (!mem_arr->virtual_address) {
2264				if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2265					goto free_mem;
2266				if (curr_alloc_size -
2267					rounddown_pow_of_two(curr_alloc_size))
2268					curr_alloc_size = rounddown_pow_of_two
2269							     (curr_alloc_size);
2270				else
2271					curr_alloc_size = curr_alloc_size / 2;
2272			} else {
2273				mem_arr->bus_address.u.
2274				    a64.address = (__u64) bus_add;
2275				mem_arr->size = curr_alloc_size;
2276				alloc_size -= curr_alloc_size;
2277				curr_alloc_size = min(be_max_phys_size *
2278						      1024, alloc_size);
2279				j++;
2280				mem_arr++;
2281			}
2282		} while (alloc_size);
2283		mem_descr->num_elements = j;
2284		mem_descr->size_in_bytes = phba->mem_req[i];
2285		mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2286					       GFP_KERNEL);
2287		if (!mem_descr->mem_array)
2288			goto free_mem;
2289
2290		memcpy(mem_descr->mem_array, mem_arr_orig,
2291		       sizeof(struct mem_array) * j);
2292		mem_descr++;
2293	}
2294	kfree(mem_arr_orig);
2295	return 0;
2296free_mem:
2297	mem_descr->num_elements = j;
2298	while ((i) || (j)) {
2299		for (j = mem_descr->num_elements; j > 0; j--) {
2300			pci_free_consistent(phba->pcidev,
2301					    mem_descr->mem_array[j - 1].size,
2302					    mem_descr->mem_array[j - 1].
2303					    virtual_address,
2304					    (unsigned long)mem_descr->
2305					    mem_array[j - 1].
2306					    bus_address.u.a64.address);
2307		}
2308		if (i) {
2309			i--;
2310			kfree(mem_descr->mem_array);
2311			mem_descr--;
2312		}
2313	}
2314	kfree(mem_arr_orig);
2315	kfree(phba->init_mem);
2316	kfree(phba->phwi_ctrlr);
2317	return -ENOMEM;
2318}
2319
2320static int beiscsi_get_memory(struct beiscsi_hba *phba)
2321{
2322	beiscsi_find_mem_req(phba);
2323	return beiscsi_alloc_mem(phba);
2324}
2325
2326static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2327{
2328	struct pdu_data_out *pdata_out;
2329	struct pdu_nop_out *pnop_out;
2330	struct be_mem_descriptor *mem_descr;
2331
2332	mem_descr = phba->init_mem;
2333	mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2334	pdata_out =
2335	    (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2336	memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2337
2338	AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2339		      IIOC_SCSI_DATA);
2340
2341	pnop_out =
2342	    (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2343				   virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2344
2345	memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2346	AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2347	AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2348	AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2349}
2350
2351static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2352{
2353	struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2354	struct wrb_handle *pwrb_handle;
2355	struct hwi_controller *phwi_ctrlr;
2356	struct hwi_wrb_context *pwrb_context;
2357	struct iscsi_wrb *pwrb;
2358	unsigned int num_cxn_wrbh;
2359	unsigned int num_cxn_wrb, j, idx, index;
2360
2361	mem_descr_wrbh = phba->init_mem;
2362	mem_descr_wrbh += HWI_MEM_WRBH;
2363
2364	mem_descr_wrb = phba->init_mem;
2365	mem_descr_wrb += HWI_MEM_WRB;
2366
2367	idx = 0;
2368	pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2369	num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2370			((sizeof(struct wrb_handle)) *
2371			 phba->params.wrbs_per_cxn));
2372	phwi_ctrlr = phba->phwi_ctrlr;
2373
2374	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2375		pwrb_context = &phwi_ctrlr->wrb_context[index];
2376		pwrb_context->pwrb_handle_base =
2377				kzalloc(sizeof(struct wrb_handle *) *
2378					phba->params.wrbs_per_cxn, GFP_KERNEL);
2379		pwrb_context->pwrb_handle_basestd =
2380				kzalloc(sizeof(struct wrb_handle *) *
2381					phba->params.wrbs_per_cxn, GFP_KERNEL);
2382		if (num_cxn_wrbh) {
2383			pwrb_context->alloc_index = 0;
2384			pwrb_context->wrb_handles_available = 0;
2385			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2386				pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2387				pwrb_context->pwrb_handle_basestd[j] =
2388								pwrb_handle;
2389				pwrb_context->wrb_handles_available++;
2390				pwrb_handle->wrb_index = j;
2391				pwrb_handle++;
2392			}
2393			pwrb_context->free_index = 0;
2394			num_cxn_wrbh--;
2395		} else {
2396			idx++;
2397			pwrb_handle =
2398			    mem_descr_wrbh->mem_array[idx].virtual_address;
2399			num_cxn_wrbh =
2400			    ((mem_descr_wrbh->mem_array[idx].size) /
2401			     ((sizeof(struct wrb_handle)) *
2402			      phba->params.wrbs_per_cxn));
2403			pwrb_context->alloc_index = 0;
2404			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2405				pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2406				pwrb_context->pwrb_handle_basestd[j] =
2407				    pwrb_handle;
2408				pwrb_context->wrb_handles_available++;
2409				pwrb_handle->wrb_index = j;
2410				pwrb_handle++;
2411			}
2412			pwrb_context->free_index = 0;
2413			num_cxn_wrbh--;
2414		}
2415	}
2416	idx = 0;
2417	pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2418	num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2419		      ((sizeof(struct iscsi_wrb) *
2420			phba->params.wrbs_per_cxn));
2421	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2422		pwrb_context = &phwi_ctrlr->wrb_context[index];
2423		if (num_cxn_wrb) {
2424			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2425				pwrb_handle = pwrb_context->pwrb_handle_base[j];
2426				pwrb_handle->pwrb = pwrb;
2427				pwrb++;
2428			}
2429			num_cxn_wrb--;
2430		} else {
2431			idx++;
2432			pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2433			num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2434				      ((sizeof(struct iscsi_wrb) *
2435					phba->params.wrbs_per_cxn));
2436			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2437				pwrb_handle = pwrb_context->pwrb_handle_base[j];
2438				pwrb_handle->pwrb = pwrb;
2439				pwrb++;
2440			}
2441			num_cxn_wrb--;
2442		}
2443	}
2444}
2445
2446static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2447{
2448	struct hwi_controller *phwi_ctrlr;
2449	struct hba_parameters *p = &phba->params;
2450	struct hwi_async_pdu_context *pasync_ctx;
2451	struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2452	unsigned int index;
2453	struct be_mem_descriptor *mem_descr;
2454
2455	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2456	mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2457
2458	phwi_ctrlr = phba->phwi_ctrlr;
2459	phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2460				mem_descr->mem_array[0].virtual_address;
2461	pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2462	memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2463
2464	pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2465	pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2466	pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2467	pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2468
2469	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2470	mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2471	if (mem_descr->mem_array[0].virtual_address) {
2472		SE_DEBUG(DBG_LVL_8,
2473			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2474			 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2475	} else
2476		shost_printk(KERN_WARNING, phba->shost,
2477			     "No Virtual address\n");
2478
2479	pasync_ctx->async_header.va_base =
2480			mem_descr->mem_array[0].virtual_address;
2481
2482	pasync_ctx->async_header.pa_base.u.a64.address =
2483			mem_descr->mem_array[0].bus_address.u.a64.address;
2484
2485	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2486	mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2487	if (mem_descr->mem_array[0].virtual_address) {
2488		SE_DEBUG(DBG_LVL_8,
2489			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2490			 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2491	} else
2492		shost_printk(KERN_WARNING, phba->shost,
2493			    "No Virtual address\n");
2494	pasync_ctx->async_header.ring_base =
2495			mem_descr->mem_array[0].virtual_address;
2496
2497	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2498	mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2499	if (mem_descr->mem_array[0].virtual_address) {
2500		SE_DEBUG(DBG_LVL_8,
2501			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2502			 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2503	} else
2504		shost_printk(KERN_WARNING, phba->shost,
2505			    "No Virtual address\n");
2506
2507	pasync_ctx->async_header.handle_base =
2508			mem_descr->mem_array[0].virtual_address;
2509	pasync_ctx->async_header.writables = 0;
2510	INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2511
2512	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2513	mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2514	if (mem_descr->mem_array[0].virtual_address) {
2515		SE_DEBUG(DBG_LVL_8,
2516			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2517			 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2518	} else
2519		shost_printk(KERN_WARNING, phba->shost,
2520			    "No Virtual address\n");
2521	pasync_ctx->async_data.va_base =
2522			mem_descr->mem_array[0].virtual_address;
2523	pasync_ctx->async_data.pa_base.u.a64.address =
2524			mem_descr->mem_array[0].bus_address.u.a64.address;
2525
2526	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2527	mem_descr += HWI_MEM_ASYNC_DATA_RING;
2528	if (mem_descr->mem_array[0].virtual_address) {
2529		SE_DEBUG(DBG_LVL_8,
2530			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2531			 "va=%p\n", mem_descr->mem_array[0].virtual_address);
2532	} else
2533		shost_printk(KERN_WARNING, phba->shost,
2534			     "No Virtual address\n");
2535
2536	pasync_ctx->async_data.ring_base =
2537			mem_descr->mem_array[0].virtual_address;
2538
2539	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2540	mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2541	if (!mem_descr->mem_array[0].virtual_address)
2542		shost_printk(KERN_WARNING, phba->shost,
2543			    "No Virtual address\n");
2544
2545	pasync_ctx->async_data.handle_base =
2546			mem_descr->mem_array[0].virtual_address;
2547	pasync_ctx->async_data.writables = 0;
2548	INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2549
2550	pasync_header_h =
2551		(struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2552	pasync_data_h =
2553		(struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2554
2555	for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2556		pasync_header_h->cri = -1;
2557		pasync_header_h->index = (char)index;
2558		INIT_LIST_HEAD(&pasync_header_h->link);
2559		pasync_header_h->pbuffer =
2560			(void *)((unsigned long)
2561			(pasync_ctx->async_header.va_base) +
2562			(p->defpdu_hdr_sz * index));
2563
2564		pasync_header_h->pa.u.a64.address =
2565			pasync_ctx->async_header.pa_base.u.a64.address +
2566			(p->defpdu_hdr_sz * index);
2567
2568		list_add_tail(&pasync_header_h->link,
2569				&pasync_ctx->async_header.free_list);
2570		pasync_header_h++;
2571		pasync_ctx->async_header.free_entries++;
2572		pasync_ctx->async_header.writables++;
2573
2574		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2575		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2576			       header_busy_list);
2577		pasync_data_h->cri = -1;
2578		pasync_data_h->index = (char)index;
2579		INIT_LIST_HEAD(&pasync_data_h->link);
2580		pasync_data_h->pbuffer =
2581			(void *)((unsigned long)
2582			(pasync_ctx->async_data.va_base) +
2583			(p->defpdu_data_sz * index));
2584
2585		pasync_data_h->pa.u.a64.address =
2586		    pasync_ctx->async_data.pa_base.u.a64.address +
2587		    (p->defpdu_data_sz * index);
2588
2589		list_add_tail(&pasync_data_h->link,
2590			      &pasync_ctx->async_data.free_list);
2591		pasync_data_h++;
2592		pasync_ctx->async_data.free_entries++;
2593		pasync_ctx->async_data.writables++;
2594
2595		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2596	}
2597
2598	pasync_ctx->async_header.host_write_ptr = 0;
2599	pasync_ctx->async_header.ep_read_ptr = -1;
2600	pasync_ctx->async_data.host_write_ptr = 0;
2601	pasync_ctx->async_data.ep_read_ptr = -1;
2602}
2603
2604static int
2605be_sgl_create_contiguous(void *virtual_address,
2606			 u64 physical_address, u32 length,
2607			 struct be_dma_mem *sgl)
2608{
2609	WARN_ON(!virtual_address);
2610	WARN_ON(!physical_address);
2611	WARN_ON(!length > 0);
2612	WARN_ON(!sgl);
2613
2614	sgl->va = virtual_address;
2615	sgl->dma = (unsigned long)physical_address;
2616	sgl->size = length;
2617
2618	return 0;
2619}
2620
2621static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2622{
2623	memset(sgl, 0, sizeof(*sgl));
2624}
2625
2626static void
2627hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2628		     struct mem_array *pmem, struct be_dma_mem *sgl)
2629{
2630	if (sgl->va)
2631		be_sgl_destroy_contiguous(sgl);
2632
2633	be_sgl_create_contiguous(pmem->virtual_address,
2634				 pmem->bus_address.u.a64.address,
2635				 pmem->size, sgl);
2636}
2637
2638static void
2639hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2640			   struct mem_array *pmem, struct be_dma_mem *sgl)
2641{
2642	if (sgl->va)
2643		be_sgl_destroy_contiguous(sgl);
2644
2645	be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2646				 pmem->bus_address.u.a64.address,
2647				 pmem->size, sgl);
2648}
2649
2650static int be_fill_queue(struct be_queue_info *q,
2651		u16 len, u16 entry_size, void *vaddress)
2652{
2653	struct be_dma_mem *mem = &q->dma_mem;
2654
2655	memset(q, 0, sizeof(*q));
2656	q->len = len;
2657	q->entry_size = entry_size;
2658	mem->size = len * entry_size;
2659	mem->va = vaddress;
2660	if (!mem->va)
2661		return -ENOMEM;
2662	memset(mem->va, 0, mem->size);
2663	return 0;
2664}
2665
2666static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2667			     struct hwi_context_memory *phwi_context)
2668{
2669	unsigned int i, num_eq_pages;
2670	int ret, eq_for_mcc;
2671	struct be_queue_info *eq;
2672	struct be_dma_mem *mem;
2673	void *eq_vaddress;
2674	dma_addr_t paddr;
2675
2676	num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2677				      sizeof(struct be_eq_entry));
2678
2679	if (phba->msix_enabled)
2680		eq_for_mcc = 1;
2681	else
2682		eq_for_mcc = 0;
2683	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2684		eq = &phwi_context->be_eq[i].q;
2685		mem = &eq->dma_mem;
2686		phwi_context->be_eq[i].phba = phba;
2687		eq_vaddress = pci_alloc_consistent(phba->pcidev,
2688						     num_eq_pages * PAGE_SIZE,
2689						     &paddr);
2690		if (!eq_vaddress)
2691			goto create_eq_error;
2692
2693		mem->va = eq_vaddress;
2694		ret = be_fill_queue(eq, phba->params.num_eq_entries,
2695				    sizeof(struct be_eq_entry), eq_vaddress);
2696		if (ret) {
2697			shost_printk(KERN_ERR, phba->shost,
2698				     "be_fill_queue Failed for EQ\n");
2699			goto create_eq_error;
2700		}
2701
2702		mem->dma = paddr;
2703		ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2704					    phwi_context->cur_eqd);
2705		if (ret) {
2706			shost_printk(KERN_ERR, phba->shost,
2707				     "beiscsi_cmd_eq_create"
2708				     "Failedfor EQ\n");
2709			goto create_eq_error;
2710		}
2711		SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2712	}
2713	return 0;
2714create_eq_error:
2715	for (i = 0; i < (phba->num_cpus + 1); i++) {
2716		eq = &phwi_context->be_eq[i].q;
2717		mem = &eq->dma_mem;
2718		if (mem->va)
2719			pci_free_consistent(phba->pcidev, num_eq_pages
2720					    * PAGE_SIZE,
2721					    mem->va, mem->dma);
2722	}
2723	return ret;
2724}
2725
2726static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2727			     struct hwi_context_memory *phwi_context)
2728{
2729	unsigned int i, num_cq_pages;
2730	int ret;
2731	struct be_queue_info *cq, *eq;
2732	struct be_dma_mem *mem;
2733	struct be_eq_obj *pbe_eq;
2734	void *cq_vaddress;
2735	dma_addr_t paddr;
2736
2737	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2738				      sizeof(struct sol_cqe));
2739
2740	for (i = 0; i < phba->num_cpus; i++) {
2741		cq = &phwi_context->be_cq[i];
2742		eq = &phwi_context->be_eq[i].q;
2743		pbe_eq = &phwi_context->be_eq[i];
2744		pbe_eq->cq = cq;
2745		pbe_eq->phba = phba;
2746		mem = &cq->dma_mem;
2747		cq_vaddress = pci_alloc_consistent(phba->pcidev,
2748						     num_cq_pages * PAGE_SIZE,
2749						     &paddr);
2750		if (!cq_vaddress)
2751			goto create_cq_error;
2752		ret = be_fill_queue(cq, phba->params.num_cq_entries,
2753				    sizeof(struct sol_cqe), cq_vaddress);
2754		if (ret) {
2755			shost_printk(KERN_ERR, phba->shost,
2756				     "be_fill_queue Failed for ISCSI CQ\n");
2757			goto create_cq_error;
2758		}
2759
2760		mem->dma = paddr;
2761		ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2762					    false, 0);
2763		if (ret) {
2764			shost_printk(KERN_ERR, phba->shost,
2765				     "beiscsi_cmd_eq_create"
2766				     "Failed for ISCSI CQ\n");
2767			goto create_cq_error;
2768		}
2769		SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2770						 cq->id, eq->id);
2771		SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2772	}
2773	return 0;
2774
2775create_cq_error:
2776	for (i = 0; i < phba->num_cpus; i++) {
2777		cq = &phwi_context->be_cq[i];
2778		mem = &cq->dma_mem;
2779		if (mem->va)
2780			pci_free_consistent(phba->pcidev, num_cq_pages
2781					    * PAGE_SIZE,
2782					    mem->va, mem->dma);
2783	}
2784	return ret;
2785
2786}
2787
2788static int
2789beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2790		       struct hwi_context_memory *phwi_context,
2791		       struct hwi_controller *phwi_ctrlr,
2792		       unsigned int def_pdu_ring_sz)
2793{
2794	unsigned int idx;
2795	int ret;
2796	struct be_queue_info *dq, *cq;
2797	struct be_dma_mem *mem;
2798	struct be_mem_descriptor *mem_descr;
2799	void *dq_vaddress;
2800
2801	idx = 0;
2802	dq = &phwi_context->be_def_hdrq;
2803	cq = &phwi_context->be_cq[0];
2804	mem = &dq->dma_mem;
2805	mem_descr = phba->init_mem;
2806	mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2807	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2808	ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2809			    sizeof(struct phys_addr),
2810			    sizeof(struct phys_addr), dq_vaddress);
2811	if (ret) {
2812		shost_printk(KERN_ERR, phba->shost,
2813			     "be_fill_queue Failed for DEF PDU HDR\n");
2814		return ret;
2815	}
2816	mem->dma = (unsigned long)mem_descr->mem_array[idx].
2817				  bus_address.u.a64.address;
2818	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2819					      def_pdu_ring_sz,
2820					      phba->params.defpdu_hdr_sz);
2821	if (ret) {
2822		shost_printk(KERN_ERR, phba->shost,
2823			     "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2824		return ret;
2825	}
2826	phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2827	SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2828		 phwi_context->be_def_hdrq.id);
2829	hwi_post_async_buffers(phba, 1);
2830	return 0;
2831}
2832
2833static int
2834beiscsi_create_def_data(struct beiscsi_hba *phba,
2835			struct hwi_context_memory *phwi_context,
2836			struct hwi_controller *phwi_ctrlr,
2837			unsigned int def_pdu_ring_sz)
2838{
2839	unsigned int idx;
2840	int ret;
2841	struct be_queue_info *dataq, *cq;
2842	struct be_dma_mem *mem;
2843	struct be_mem_descriptor *mem_descr;
2844	void *dq_vaddress;
2845
2846	idx = 0;
2847	dataq = &phwi_context->be_def_dataq;
2848	cq = &phwi_context->be_cq[0];
2849	mem = &dataq->dma_mem;
2850	mem_descr = phba->init_mem;
2851	mem_descr += HWI_MEM_ASYNC_DATA_RING;
2852	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2853	ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2854			    sizeof(struct phys_addr),
2855			    sizeof(struct phys_addr), dq_vaddress);
2856	if (ret) {
2857		shost_printk(KERN_ERR, phba->shost,
2858			     "be_fill_queue Failed for DEF PDU DATA\n");
2859		return ret;
2860	}
2861	mem->dma = (unsigned long)mem_descr->mem_array[idx].
2862				  bus_address.u.a64.address;
2863	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2864					      def_pdu_ring_sz,
2865					      phba->params.defpdu_data_sz);
2866	if (ret) {
2867		shost_printk(KERN_ERR, phba->shost,
2868			     "be_cmd_create_default_pdu_queue Failed"
2869			     " for DEF PDU DATA\n");
2870		return ret;
2871	}
2872	phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2873	SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2874		 phwi_context->be_def_dataq.id);
2875	hwi_post_async_buffers(phba, 0);
2876	SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED\n");
2877	return 0;
2878}
2879
2880static int
2881beiscsi_post_pages(struct beiscsi_hba *phba)
2882{
2883	struct be_mem_descriptor *mem_descr;
2884	struct mem_array *pm_arr;
2885	unsigned int page_offset, i;
2886	struct be_dma_mem sgl;
2887	int status;
2888
2889	mem_descr = phba->init_mem;
2890	mem_descr += HWI_MEM_SGE;
2891	pm_arr = mem_descr->mem_array;
2892
2893	page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2894			phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2895	for (i = 0; i < mem_descr->num_elements; i++) {
2896		hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2897		status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2898						page_offset,
2899						(pm_arr->size / PAGE_SIZE));
2900		page_offset += pm_arr->size / PAGE_SIZE;
2901		if (status != 0) {
2902			shost_printk(KERN_ERR, phba->shost,
2903				     "post sgl failed.\n");
2904			return status;
2905		}
2906		pm_arr++;
2907	}
2908	SE_DEBUG(DBG_LVL_8, "POSTED PAGES\n");
2909	return 0;
2910}
2911
2912static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2913{
2914	struct be_dma_mem *mem = &q->dma_mem;
2915	if (mem->va)
2916		pci_free_consistent(phba->pcidev, mem->size,
2917			mem->va, mem->dma);
2918}
2919
2920static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2921		u16 len, u16 entry_size)
2922{
2923	struct be_dma_mem *mem = &q->dma_mem;
2924
2925	memset(q, 0, sizeof(*q));
2926	q->len = len;
2927	q->entry_size = entry_size;
2928	mem->size = len * entry_size;
2929	mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2930	if (!mem->va)
2931		return -ENOMEM;
2932	memset(mem->va, 0, mem->size);
2933	return 0;
2934}
2935
2936static int
2937beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2938			 struct hwi_context_memory *phwi_context,
2939			 struct hwi_controller *phwi_ctrlr)
2940{
2941	unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2942	u64 pa_addr_lo;
2943	unsigned int idx, num, i;
2944	struct mem_array *pwrb_arr;
2945	void *wrb_vaddr;
2946	struct be_dma_mem sgl;
2947	struct be_mem_descriptor *mem_descr;
2948	int status;
2949
2950	idx = 0;
2951	mem_descr = phba->init_mem;
2952	mem_descr += HWI_MEM_WRB;
2953	pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2954			   GFP_KERNEL);
2955	if (!pwrb_arr) {
2956		shost_printk(KERN_ERR, phba->shost,
2957			     "Memory alloc failed in create wrb ring.\n");
2958		return -ENOMEM;
2959	}
2960	wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2961	pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2962	num_wrb_rings = mem_descr->mem_array[idx].size /
2963		(phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2964
2965	for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2966		if (num_wrb_rings) {
2967			pwrb_arr[num].virtual_address = wrb_vaddr;
2968			pwrb_arr[num].bus_address.u.a64.address	= pa_addr_lo;
2969			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2970					    sizeof(struct iscsi_wrb);
2971			wrb_vaddr += pwrb_arr[num].size;
2972			pa_addr_lo += pwrb_arr[num].size;
2973			num_wrb_rings--;
2974		} else {
2975			idx++;
2976			wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2977			pa_addr_lo = mem_descr->mem_array[idx].\
2978					bus_address.u.a64.address;
2979			num_wrb_rings = mem_descr->mem_array[idx].size /
2980					(phba->params.wrbs_per_cxn *
2981					sizeof(struct iscsi_wrb));
2982			pwrb_arr[num].virtual_address = wrb_vaddr;
2983			pwrb_arr[num].bus_address.u.a64.address\
2984						= pa_addr_lo;
2985			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2986						 sizeof(struct iscsi_wrb);
2987			wrb_vaddr += pwrb_arr[num].size;
2988			pa_addr_lo   += pwrb_arr[num].size;
2989			num_wrb_rings--;
2990		}
2991	}
2992	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2993		wrb_mem_index = 0;
2994		offset = 0;
2995		size = 0;
2996
2997		hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2998		status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2999					    &phwi_context->be_wrbq[i]);
3000		if (status != 0) {
3001			shost_printk(KERN_ERR, phba->shost,
3002				     "wrbq create failed.");
3003			kfree(pwrb_arr);
3004			return status;
3005		}
3006		phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
3007								   id;
3008	}
3009	kfree(pwrb_arr);
3010	return 0;
3011}
3012
3013static void free_wrb_handles(struct beiscsi_hba *phba)
3014{
3015	unsigned int index;
3016	struct hwi_controller *phwi_ctrlr;
3017	struct hwi_wrb_context *pwrb_context;
3018
3019	phwi_ctrlr = phba->phwi_ctrlr;
3020	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
3021		pwrb_context = &phwi_ctrlr->wrb_context[index];
3022		kfree(pwrb_context->pwrb_handle_base);
3023		kfree(pwrb_context->pwrb_handle_basestd);
3024	}
3025}
3026
3027static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3028{
3029	struct be_queue_info *q;
3030	struct be_ctrl_info *ctrl = &phba->ctrl;
3031
3032	q = &phba->ctrl.mcc_obj.q;
3033	if (q->created)
3034		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3035	be_queue_free(phba, q);
3036
3037	q = &phba->ctrl.mcc_obj.cq;
3038	if (q->created)
3039		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3040	be_queue_free(phba, q);
3041}
3042
3043static void hwi_cleanup(struct beiscsi_hba *phba)
3044{
3045	struct be_queue_info *q;
3046	struct be_ctrl_info *ctrl = &phba->ctrl;
3047	struct hwi_controller *phwi_ctrlr;
3048	struct hwi_context_memory *phwi_context;
3049	int i, eq_num;
3050
3051	phwi_ctrlr = phba->phwi_ctrlr;
3052	phwi_context = phwi_ctrlr->phwi_ctxt;
3053	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3054		q = &phwi_context->be_wrbq[i];
3055		if (q->created)
3056			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3057	}
3058	free_wrb_handles(phba);
3059
3060	q = &phwi_context->be_def_hdrq;
3061	if (q->created)
3062		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3063
3064	q = &phwi_context->be_def_dataq;
3065	if (q->created)
3066		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3067
3068	beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3069
3070	for (i = 0; i < (phba->num_cpus); i++) {
3071		q = &phwi_context->be_cq[i];
3072		if (q->created)
3073			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3074	}
3075	if (phba->msix_enabled)
3076		eq_num = 1;
3077	else
3078		eq_num = 0;
3079	for (i = 0; i < (phba->num_cpus + eq_num); i++) {
3080		q = &phwi_context->be_eq[i].q;
3081		if (q->created)
3082			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3083	}
3084	be_mcc_queues_destroy(phba);
3085}
3086
3087static int be_mcc_queues_create(struct beiscsi_hba *phba,
3088				struct hwi_context_memory *phwi_context)
3089{
3090	struct be_queue_info *q, *cq;
3091	struct be_ctrl_info *ctrl = &phba->ctrl;
3092
3093	/* Alloc MCC compl queue */
3094	cq = &phba->ctrl.mcc_obj.cq;
3095	if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3096			sizeof(struct be_mcc_compl)))
3097		goto err;
3098	/* Ask BE to create MCC compl queue; */
3099	if (phba->msix_enabled) {
3100		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3101					 [phba->num_cpus].q, false, true, 0))
3102		goto mcc_cq_free;
3103	} else {
3104		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3105					  false, true, 0))
3106		goto mcc_cq_free;
3107	}
3108
3109	/* Alloc MCC queue */
3110	q = &phba->ctrl.mcc_obj.q;
3111	if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3112		goto mcc_cq_destroy;
3113
3114	/* Ask BE to create MCC queue */
3115	if (beiscsi_cmd_mccq_create(phba, q, cq))
3116		goto mcc_q_free;
3117
3118	return 0;
3119
3120mcc_q_free:
3121	be_queue_free(phba, q);
3122mcc_cq_destroy:
3123	beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3124mcc_cq_free:
3125	be_queue_free(phba, cq);
3126err:
3127	return -ENOMEM;
3128}
3129
3130static int find_num_cpus(void)
3131{
3132	int  num_cpus = 0;
3133
3134	num_cpus = num_online_cpus();
3135	if (num_cpus >= MAX_CPUS)
3136		num_cpus = MAX_CPUS - 1;
3137
3138	SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", num_cpus);
3139	return num_cpus;
3140}
3141
3142static int hwi_init_port(struct beiscsi_hba *phba)
3143{
3144	struct hwi_controller *phwi_ctrlr;
3145	struct hwi_context_memory *phwi_context;
3146	unsigned int def_pdu_ring_sz;
3147	struct be_ctrl_info *ctrl = &phba->ctrl;
3148	int status;
3149
3150	def_pdu_ring_sz =
3151		phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
3152	phwi_ctrlr = phba->phwi_ctrlr;
3153	phwi_context = phwi_ctrlr->phwi_ctxt;
3154	phwi_context->max_eqd = 0;
3155	phwi_context->min_eqd = 0;
3156	phwi_context->cur_eqd = 64;
3157	be_cmd_fw_initialize(&phba->ctrl);
3158
3159	status = beiscsi_create_eqs(phba, phwi_context);
3160	if (status != 0) {
3161		shost_printk(KERN_ERR, phba->shost, "EQ not created\n");
3162		goto error;
3163	}
3164
3165	status = be_mcc_queues_create(phba, phwi_context);
3166	if (status != 0)
3167		goto error;
3168
3169	status = mgmt_check_supported_fw(ctrl, phba);
3170	if (status != 0) {
3171		shost_printk(KERN_ERR, phba->shost,
3172			     "Unsupported fw version\n");
3173		goto error;
3174	}
3175
3176	status = beiscsi_create_cqs(phba, phwi_context);
3177	if (status != 0) {
3178		shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
3179		goto error;
3180	}
3181
3182	status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
3183					def_pdu_ring_sz);
3184	if (status != 0) {
3185		shost_printk(KERN_ERR, phba->shost,
3186			     "Default Header not created\n");
3187		goto error;
3188	}
3189
3190	status = beiscsi_create_def_data(phba, phwi_context,
3191					 phwi_ctrlr, def_pdu_ring_sz);
3192	if (status != 0) {
3193		shost_printk(KERN_ERR, phba->shost,
3194			     "Default Data not created\n");
3195		goto error;
3196	}
3197
3198	status = beiscsi_post_pages(phba);
3199	if (status != 0) {
3200		shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
3201		goto error;
3202	}
3203
3204	status = beiscsi_create_wrb_rings(phba,	phwi_context, phwi_ctrlr);
3205	if (status != 0) {
3206		shost_printk(KERN_ERR, phba->shost,
3207			     "WRB Rings not created\n");
3208		goto error;
3209	}
3210
3211	SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
3212	return 0;
3213
3214error:
3215	shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
3216	hwi_cleanup(phba);
3217	return -ENOMEM;
3218}
3219
3220static int hwi_init_controller(struct beiscsi_hba *phba)
3221{
3222	struct hwi_controller *phwi_ctrlr;
3223
3224	phwi_ctrlr = phba->phwi_ctrlr;
3225	if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3226		phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3227		    init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
3228		SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p\n",
3229			 phwi_ctrlr->phwi_ctxt);
3230	} else {
3231		shost_printk(KERN_ERR, phba->shost,
3232			     "HWI_MEM_ADDN_CONTEXT is more than one element."
3233			     "Failing to load\n");
3234		return -ENOMEM;
3235	}
3236
3237	iscsi_init_global_templates(phba);
3238	beiscsi_init_wrb_handle(phba);
3239	hwi_init_async_pdu_ctx(phba);
3240	if (hwi_init_port(phba) != 0) {
3241		shost_printk(KERN_ERR, phba->shost,
3242			     "hwi_init_controller failed\n");
3243		return -ENOMEM;
3244	}
3245	return 0;
3246}
3247
3248static void beiscsi_free_mem(struct beiscsi_hba *phba)
3249{
3250	struct be_mem_descriptor *mem_descr;
3251	int i, j;
3252
3253	mem_descr = phba->init_mem;
3254	i = 0;
3255	j = 0;
3256	for (i = 0; i < SE_MEM_MAX; i++) {
3257		for (j = mem_descr->num_elements; j > 0; j--) {
3258			pci_free_consistent(phba->pcidev,
3259			  mem_descr->mem_array[j - 1].size,
3260			  mem_descr->mem_array[j - 1].virtual_address,
3261			  (unsigned long)mem_descr->mem_array[j - 1].
3262			  bus_address.u.a64.address);
3263		}
3264		kfree(mem_descr->mem_array);
3265		mem_descr++;
3266	}
3267	kfree(phba->init_mem);
3268	kfree(phba->phwi_ctrlr);
3269}
3270
3271static int beiscsi_init_controller(struct beiscsi_hba *phba)
3272{
3273	int ret = -ENOMEM;
3274
3275	ret = beiscsi_get_memory(phba);
3276	if (ret < 0) {
3277		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
3278			     "Failed in beiscsi_alloc_memory\n");
3279		return ret;
3280	}
3281
3282	ret = hwi_init_controller(phba);
3283	if (ret)
3284		goto free_init;
3285	SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
3286	return 0;
3287
3288free_init:
3289	beiscsi_free_mem(phba);
3290	return -ENOMEM;
3291}
3292
3293static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3294{
3295	struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3296	struct sgl_handle *psgl_handle;
3297	struct iscsi_sge *pfrag;
3298	unsigned int arr_index, i, idx;
3299
3300	phba->io_sgl_hndl_avbl = 0;
3301	phba->eh_sgl_hndl_avbl = 0;
3302
3303	mem_descr_sglh = phba->init_mem;
3304	mem_descr_sglh += HWI_MEM_SGLH;
3305	if (1 == mem_descr_sglh->num_elements) {
3306		phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3307						 phba->params.ios_per_ctrl,
3308						 GFP_KERNEL);
3309		if (!phba->io_sgl_hndl_base) {
3310			shost_printk(KERN_ERR, phba->shost,
3311				     "Mem Alloc Failed. Failing to load\n");
3312			return -ENOMEM;
3313		}
3314		phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3315						 (phba->params.icds_per_ctrl -
3316						 phba->params.ios_per_ctrl),
3317						 GFP_KERNEL);
3318		if (!phba->eh_sgl_hndl_base) {
3319			kfree(phba->io_sgl_hndl_base);
3320			shost_printk(KERN_ERR, phba->shost,
3321				     "Mem Alloc Failed. Failing to load\n");
3322			return -ENOMEM;
3323		}
3324	} else {
3325		shost_printk(KERN_ERR, phba->shost,
3326			     "HWI_MEM_SGLH is more than one element."
3327			     "Failing to load\n");
3328		return -ENOMEM;
3329	}
3330
3331	arr_index = 0;
3332	idx = 0;
3333	while (idx < mem_descr_sglh->num_elements) {
3334		psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3335
3336		for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3337		      sizeof(struct sgl_handle)); i++) {
3338			if (arr_index < phba->params.ios_per_ctrl) {
3339				phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3340				phba->io_sgl_hndl_avbl++;
3341				arr_index++;
3342			} else {
3343				phba->eh_sgl_hndl_base[arr_index -
3344					phba->params.ios_per_ctrl] =
3345								psgl_handle;
3346				arr_index++;
3347				phba->eh_sgl_hndl_avbl++;
3348			}
3349			psgl_handle++;
3350		}
3351		idx++;
3352	}
3353	SE_DEBUG(DBG_LVL_8,
3354		 "phba->io_sgl_hndl_avbl=%d"
3355		 "phba->eh_sgl_hndl_avbl=%d\n",
3356		 phba->io_sgl_hndl_avbl,
3357		 phba->eh_sgl_hndl_avbl);
3358	mem_descr_sg = phba->init_mem;
3359	mem_descr_sg += HWI_MEM_SGE;
3360	SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d\n",
3361		 mem_descr_sg->num_elements);
3362	arr_index = 0;
3363	idx = 0;
3364	while (idx < mem_descr_sg->num_elements) {
3365		pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3366
3367		for (i = 0;
3368		     i < (mem_descr_sg->mem_array[idx].size) /
3369		     (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3370		     i++) {
3371			if (arr_index < phba->params.ios_per_ctrl)
3372				psgl_handle = phba->io_sgl_hndl_base[arr_index];
3373			else
3374				psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3375						phba->params.ios_per_ctrl];
3376			psgl_handle->pfrag = pfrag;
3377			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3378			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3379			pfrag += phba->params.num_sge_per_io;
3380			psgl_handle->sgl_index =
3381				phba->fw_config.iscsi_icd_start + arr_index++;
3382		}
3383		idx++;
3384	}
3385	phba->io_sgl_free_index = 0;
3386	phba->io_sgl_alloc_index = 0;
3387	phba->eh_sgl_free_index = 0;
3388	phba->eh_sgl_alloc_index = 0;
3389	return 0;
3390}
3391
3392static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3393{
3394	int i, new_cid;
3395
3396	phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3397				  GFP_KERNEL);
3398	if (!phba->cid_array) {
3399		shost_printk(KERN_ERR, phba->shost,
3400			     "Failed to allocate memory in "
3401			     "hba_setup_cid_tbls\n");
3402		return -ENOMEM;
3403	}
3404	phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3405				 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3406	if (!phba->ep_array) {
3407		shost_printk(KERN_ERR, phba->shost,
3408			     "Failed to allocate memory in "
3409			     "hba_setup_cid_tbls\n");
3410		kfree(phba->cid_array);
3411		return -ENOMEM;
3412	}
3413	new_cid = phba->fw_config.iscsi_cid_start;
3414	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3415		phba->cid_array[i] = new_cid;
3416		new_cid += 2;
3417	}
3418	phba->avlbl_cids = phba->params.cxns_per_ctrl;
3419	return 0;
3420}
3421
3422static void hwi_enable_intr(struct beiscsi_hba *phba)
3423{
3424	struct be_ctrl_info *ctrl = &phba->ctrl;
3425	struct hwi_controller *phwi_ctrlr;
3426	struct hwi_context_memory *phwi_context;
3427	struct be_queue_info *eq;
3428	u8 __iomem *addr;
3429	u32 reg, i;
3430	u32 enabled;
3431
3432	phwi_ctrlr = phba->phwi_ctrlr;
3433	phwi_context = phwi_ctrlr->phwi_ctxt;
3434
3435	addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3436			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3437	reg = ioread32(addr);
3438
3439	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3440	if (!enabled) {
3441		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3442		SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p\n", reg, addr);
3443		iowrite32(reg, addr);
3444	}
3445
3446	if (!phba->msix_enabled) {
3447		eq = &phwi_context->be_eq[0].q;
3448		SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3449		hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3450	} else {
3451		for (i = 0; i <= phba->num_cpus; i++) {
3452			eq = &phwi_context->be_eq[i].q;
3453			SE_DEBUG(DBG_LVL_8, "eq->id=%d\n", eq->id);
3454			hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3455		}
3456	}
3457}
3458
3459static void hwi_disable_intr(struct beiscsi_hba *phba)
3460{
3461	struct be_ctrl_info *ctrl = &phba->ctrl;
3462
3463	u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3464	u32 reg = ioread32(addr);
3465
3466	u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3467	if (enabled) {
3468		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3469		iowrite32(reg, addr);
3470	} else
3471		shost_printk(KERN_WARNING, phba->shost,
3472			     "In hwi_disable_intr, Already Disabled\n");
3473}
3474
3475static int beiscsi_get_boot_info(struct beiscsi_hba *phba)
3476{
3477	struct be_cmd_resp_get_boot_target *boot_resp;
3478	struct be_cmd_resp_get_session *session_resp;
3479	struct be_mcc_wrb *wrb;
3480	struct be_dma_mem nonemb_cmd;
3481	unsigned int tag, wrb_num;
3482	unsigned short status, extd_status;
3483	struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
3484	int ret = -ENOMEM;
3485
3486	tag = beiscsi_get_boot_target(phba);
3487	if (!tag) {
3488		SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed\n");
3489		return -EAGAIN;
3490	} else
3491		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3492					 phba->ctrl.mcc_numtag[tag]);
3493
3494	wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3495	extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3496	status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3497	if (status || extd_status) {
3498		SE_DEBUG(DBG_LVL_1, "be_cmd_get_mac_addr Failed"
3499				    " status = %d extd_status = %d\n",
3500				    status, extd_status);
3501		free_mcc_tag(&phba->ctrl, tag);
3502		return -EBUSY;
3503	}
3504	wrb = queue_get_wrb(mccq, wrb_num);
3505	free_mcc_tag(&phba->ctrl, tag);
3506	boot_resp = embedded_payload(wrb);
3507
3508	if (boot_resp->boot_session_handle < 0) {
3509		shost_printk(KERN_INFO, phba->shost, "No Boot Session.\n");
3510		return -ENXIO;
3511	}
3512
3513	nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
3514				sizeof(*session_resp),
3515				&nonemb_cmd.dma);
3516	if (nonemb_cmd.va == NULL) {
3517		SE_DEBUG(DBG_LVL_1,
3518			 "Failed to allocate memory for"
3519			 "beiscsi_get_session_info\n");
3520		return -ENOMEM;
3521	}
3522
3523	memset(nonemb_cmd.va, 0, sizeof(*session_resp));
3524	tag = beiscsi_get_session_info(phba,
3525		boot_resp->boot_session_handle, &nonemb_cmd);
3526	if (!tag) {
3527		SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info"
3528			" Failed\n");
3529		goto boot_freemem;
3530	} else
3531		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
3532					 phba->ctrl.mcc_numtag[tag]);
3533
3534	wrb_num = (phba->ctrl.mcc_numtag[tag] & 0x00FF0000) >> 16;
3535	extd_status = (phba->ctrl.mcc_numtag[tag] & 0x0000FF00) >> 8;
3536	status = phba->ctrl.mcc_numtag[tag] & 0x000000FF;
3537	if (status || extd_status) {
3538		SE_DEBUG(DBG_LVL_1, "beiscsi_get_session_info Failed"
3539				    " status = %d extd_status = %d\n",
3540				    status, extd_status);
3541		free_mcc_tag(&phba->ctrl, tag);
3542		goto boot_freemem;
3543	}
3544	wrb = queue_get_wrb(mccq, wrb_num);
3545	free_mcc_tag(&phba->ctrl, tag);
3546	session_resp = nonemb_cmd.va ;
3547
3548	memcpy(&phba->boot_sess, &session_resp->session_info,
3549	       sizeof(struct mgmt_session_info));
3550	ret = 0;
3551
3552boot_freemem:
3553	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
3554		    nonemb_cmd.va, nonemb_cmd.dma);
3555	return ret;
3556}
3557
3558static void beiscsi_boot_release(void *data)
3559{
3560	struct beiscsi_hba *phba = data;
3561
3562	scsi_host_put(phba->shost);
3563}
3564
3565static int beiscsi_setup_boot_info(struct beiscsi_hba *phba)
3566{
3567	struct iscsi_boot_kobj *boot_kobj;
3568
3569	/* get boot info using mgmt cmd */
3570	if (beiscsi_get_boot_info(phba))
3571		/* Try to see if we can carry on without this */
3572		return 0;
3573
3574	phba->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
3575	if (!phba->boot_kset)
3576		return -ENOMEM;
3577
3578	/* get a ref because the show function will ref the phba */
3579	if (!scsi_host_get(phba->shost))
3580		goto free_kset;
3581	boot_kobj = iscsi_boot_create_target(phba->boot_kset, 0, phba,
3582					     beiscsi_show_boot_tgt_info,
3583					     beiscsi_tgt_get_attr_visibility,
3584					     beiscsi_boot_release);
3585	if (!boot_kobj)
3586		goto put_shost;
3587
3588	if (!scsi_host_get(phba->shost))
3589		goto free_kset;
3590	boot_kobj = iscsi_boot_create_initiator(phba->boot_kset, 0, phba,
3591						beiscsi_show_boot_ini_info,
3592						beiscsi_ini_get_attr_visibility,
3593						beiscsi_boot_release);
3594	if (!boot_kobj)
3595		goto put_shost;
3596
3597	if (!scsi_host_get(phba->shost))
3598		goto free_kset;
3599	boot_kobj = iscsi_boot_create_ethernet(phba->boot_kset, 0, phba,
3600					       beiscsi_show_boot_eth_info,
3601					       beiscsi_eth_get_attr_visibility,
3602					       beiscsi_boot_release);
3603	if (!boot_kobj)
3604		goto put_shost;
3605	return 0;
3606
3607put_shost:
3608	scsi_host_put(phba->shost);
3609free_kset:
3610	iscsi_boot_destroy_kset(phba->boot_kset);
3611	return -ENOMEM;
3612}
3613
3614static int beiscsi_init_port(struct beiscsi_hba *phba)
3615{
3616	int ret;
3617
3618	ret = beiscsi_init_controller(phba);
3619	if (ret < 0) {
3620		shost_printk(KERN_ERR, phba->shost,
3621			     "beiscsi_dev_probe - Failed in"
3622			     "beiscsi_init_controller\n");
3623		return ret;
3624	}
3625	ret = beiscsi_init_sgl_handle(phba);
3626	if (ret < 0) {
3627		shost_printk(KERN_ERR, phba->shost,
3628			     "beiscsi_dev_probe - Failed in"
3629			     "beiscsi_init_sgl_handle\n");
3630		goto do_cleanup_ctrlr;
3631	}
3632
3633	if (hba_setup_cid_tbls(phba)) {
3634		shost_printk(KERN_ERR, phba->shost,
3635			     "Failed in hba_setup_cid_tbls\n");
3636		kfree(phba->io_sgl_hndl_base);
3637		kfree(phba->eh_sgl_hndl_base);
3638		goto do_cleanup_ctrlr;
3639	}
3640
3641	return ret;
3642
3643do_cleanup_ctrlr:
3644	hwi_cleanup(phba);
3645	return ret;
3646}
3647
3648static void hwi_purge_eq(struct beiscsi_hba *phba)
3649{
3650	struct hwi_controller *phwi_ctrlr;
3651	struct hwi_context_memory *phwi_context;
3652	struct be_queue_info *eq;
3653	struct be_eq_entry *eqe = NULL;
3654	int i, eq_msix;
3655	unsigned int num_processed;
3656
3657	phwi_ctrlr = phba->phwi_ctrlr;
3658	phwi_context = phwi_ctrlr->phwi_ctxt;
3659	if (phba->msix_enabled)
3660		eq_msix = 1;
3661	else
3662		eq_msix = 0;
3663
3664	for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3665		eq = &phwi_context->be_eq[i].q;
3666		eqe = queue_tail_node(eq);
3667		num_processed = 0;
3668		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3669					& EQE_VALID_MASK) {
3670			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3671			queue_tail_inc(eq);
3672			eqe = queue_tail_node(eq);
3673			num_processed++;
3674		}
3675
3676		if (num_processed)
3677			hwi_ring_eq_db(phba, eq->id, 1,	num_processed, 1, 1);
3678	}
3679}
3680
3681static void beiscsi_clean_port(struct beiscsi_hba *phba)
3682{
3683	int mgmt_status;
3684
3685	mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3686	if (mgmt_status)
3687		shost_printk(KERN_WARNING, phba->shost,
3688			     "mgmt_epfw_cleanup FAILED\n");
3689
3690	hwi_purge_eq(phba);
3691	hwi_cleanup(phba);
3692	kfree(phba->io_sgl_hndl_base);
3693	kfree(phba->eh_sgl_hndl_base);
3694	kfree(phba->cid_array);
3695	kfree(phba->ep_array);
3696}
3697
3698void
3699beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3700			   struct beiscsi_offload_params *params)
3701{
3702	struct wrb_handle *pwrb_handle;
3703	struct iscsi_target_context_update_wrb *pwrb = NULL;
3704	struct be_mem_descriptor *mem_descr;
3705	struct beiscsi_hba *phba = beiscsi_conn->phba;
3706	u32 doorbell = 0;
3707
3708	/*
3709	 * We can always use 0 here because it is reserved by libiscsi for
3710	 * login/startup related tasks.
3711	 */
3712	pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3713				       phba->fw_config.iscsi_cid_start));
3714	pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3715	memset(pwrb, 0, sizeof(*pwrb));
3716	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3717		      max_burst_length, pwrb, params->dw[offsetof
3718		      (struct amap_beiscsi_offload_params,
3719		      max_burst_length) / 32]);
3720	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3721		      max_send_data_segment_length, pwrb,
3722		      params->dw[offsetof(struct amap_beiscsi_offload_params,
3723		      max_send_data_segment_length) / 32]);
3724	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3725		      first_burst_length,
3726		      pwrb,
3727		      params->dw[offsetof(struct amap_beiscsi_offload_params,
3728		      first_burst_length) / 32]);
3729
3730	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3731		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3732		      erl) / 32] & OFFLD_PARAMS_ERL));
3733	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3734		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3735		      dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3736	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3737		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3738		      hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3739	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3740		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3741		      ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3742	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3743		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3744		       imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3745	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3746		      pwrb,
3747		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3748		      exp_statsn) / 32] + 1));
3749	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3750		      0x7);
3751	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3752		      pwrb, pwrb_handle->wrb_index);
3753	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3754		      pwrb, pwrb_handle->nxt_wrb_index);
3755	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3756			session_state, pwrb, 0);
3757	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3758		      pwrb, 1);
3759	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3760		      pwrb, 0);
3761	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3762		      0);
3763
3764	mem_descr = phba->init_mem;
3765	mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3766
3767	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3768			pad_buffer_addr_hi, pwrb,
3769		      mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3770	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3771			pad_buffer_addr_lo, pwrb,
3772		      mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3773
3774	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3775
3776	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3777	doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3778			     << DB_DEF_PDU_WRB_INDEX_SHIFT;
3779	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3780
3781	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3782}
3783
3784static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3785			      int *index, int *age)
3786{
3787	*index = (int)itt;
3788	if (age)
3789		*age = conn->session->age;
3790}
3791
3792/**
3793 * beiscsi_alloc_pdu - allocates pdu and related resources
3794 * @task: libiscsi task
3795 * @opcode: opcode of pdu for task
3796 *
3797 * This is called with the session lock held. It will allocate
3798 * the wrb and sgl if needed for the command. And it will prep
3799 * the pdu's itt. beiscsi_parse_pdu will later translate
3800 * the pdu itt to the libiscsi task itt.
3801 */
3802static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3803{
3804	struct beiscsi_io_task *io_task = task->dd_data;
3805	struct iscsi_conn *conn = task->conn;
3806	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3807	struct beiscsi_hba *phba = beiscsi_conn->phba;
3808	struct hwi_wrb_context *pwrb_context;
3809	struct hwi_controller *phwi_ctrlr;
3810	itt_t itt;
3811	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3812	dma_addr_t paddr;
3813
3814	io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3815					  GFP_ATOMIC, &paddr);
3816	if (!io_task->cmd_bhs)
3817		return -ENOMEM;
3818	io_task->bhs_pa.u.a64.address = paddr;
3819	io_task->libiscsi_itt = (itt_t)task->itt;
3820	io_task->conn = beiscsi_conn;
3821
3822	task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3823	task->hdr_max = sizeof(struct be_cmd_bhs);
3824	io_task->psgl_handle = NULL;
3825	io_task->psgl_handle = NULL;
3826
3827	if (task->sc) {
3828		spin_lock(&phba->io_sgl_lock);
3829		io_task->psgl_handle = alloc_io_sgl_handle(phba);
3830		spin_unlock(&phba->io_sgl_lock);
3831		if (!io_task->psgl_handle)
3832			goto free_hndls;
3833		io_task->pwrb_handle = alloc_wrb_handle(phba,
3834					beiscsi_conn->beiscsi_conn_cid -
3835					phba->fw_config.iscsi_cid_start);
3836		if (!io_task->pwrb_handle)
3837			goto free_io_hndls;
3838	} else {
3839		io_task->scsi_cmnd = NULL;
3840		if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3841			if (!beiscsi_conn->login_in_progress) {
3842				spin_lock(&phba->mgmt_sgl_lock);
3843				io_task->psgl_handle = (struct sgl_handle *)
3844						alloc_mgmt_sgl_handle(phba);
3845				spin_unlock(&phba->mgmt_sgl_lock);
3846				if (!io_task->psgl_handle)
3847					goto free_hndls;
3848
3849				beiscsi_conn->login_in_progress = 1;
3850				beiscsi_conn->plogin_sgl_handle =
3851							io_task->psgl_handle;
3852				io_task->pwrb_handle =
3853					alloc_wrb_handle(phba,
3854					beiscsi_conn->beiscsi_conn_cid -
3855					phba->fw_config.iscsi_cid_start);
3856				if (!io_task->pwrb_handle)
3857					goto free_io_hndls;
3858				beiscsi_conn->plogin_wrb_handle =
3859							io_task->pwrb_handle;
3860
3861			} else {
3862				io_task->psgl_handle =
3863						beiscsi_conn->plogin_sgl_handle;
3864				io_task->pwrb_handle =
3865						beiscsi_conn->plogin_wrb_handle;
3866			}
3867		} else {
3868			spin_lock(&phba->mgmt_sgl_lock);
3869			io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3870			spin_unlock(&phba->mgmt_sgl_lock);
3871			if (!io_task->psgl_handle)
3872				goto free_hndls;
3873			io_task->pwrb_handle =
3874					alloc_wrb_handle(phba,
3875					beiscsi_conn->beiscsi_conn_cid -
3876					phba->fw_config.iscsi_cid_start);
3877			if (!io_task->pwrb_handle)
3878				goto free_mgmt_hndls;
3879
3880		}
3881	}
3882	itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3883				 wrb_index << 16) | (unsigned int)
3884				(io_task->psgl_handle->sgl_index));
3885	io_task->pwrb_handle->pio_handle = task;
3886
3887	io_task->cmd_bhs->iscsi_hdr.itt = itt;
3888	return 0;
3889
3890free_io_hndls:
3891	spin_lock(&phba->io_sgl_lock);
3892	free_io_sgl_handle(phba, io_task->psgl_handle);
3893	spin_unlock(&phba->io_sgl_lock);
3894	goto free_hndls;
3895free_mgmt_hndls:
3896	spin_lock(&phba->mgmt_sgl_lock);
3897	free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3898	spin_unlock(&phba->mgmt_sgl_lock);
3899free_hndls:
3900	phwi_ctrlr = phba->phwi_ctrlr;
3901	pwrb_context = &phwi_ctrlr->wrb_context[
3902			beiscsi_conn->beiscsi_conn_cid -
3903			phba->fw_config.iscsi_cid_start];
3904	if (io_task->pwrb_handle)
3905		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3906	io_task->pwrb_handle = NULL;
3907	pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3908		      io_task->bhs_pa.u.a64.address);
3909	SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed\n");
3910	return -ENOMEM;
3911}
3912
3913static void beiscsi_cleanup_task(struct iscsi_task *task)
3914{
3915	struct beiscsi_io_task *io_task = task->dd_data;
3916	struct iscsi_conn *conn = task->conn;
3917	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3918	struct beiscsi_hba *phba = beiscsi_conn->phba;
3919	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3920	struct hwi_wrb_context *pwrb_context;
3921	struct hwi_controller *phwi_ctrlr;
3922
3923	phwi_ctrlr = phba->phwi_ctrlr;
3924	pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3925			- phba->fw_config.iscsi_cid_start];
3926	if (io_task->pwrb_handle) {
3927		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3928		io_task->pwrb_handle = NULL;
3929	}
3930
3931	if (io_task->cmd_bhs) {
3932		pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3933			      io_task->bhs_pa.u.a64.address);
3934	}
3935
3936	if (task->sc) {
3937		if (io_task->psgl_handle) {
3938			spin_lock(&phba->io_sgl_lock);
3939			free_io_sgl_handle(phba, io_task->psgl_handle);
3940			spin_unlock(&phba->io_sgl_lock);
3941			io_task->psgl_handle = NULL;
3942		}
3943	} else {
3944		if (task->hdr &&
3945		   ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN))
3946			return;
3947		if (io_task->psgl_handle) {
3948			spin_lock(&phba->mgmt_sgl_lock);
3949			free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3950			spin_unlock(&phba->mgmt_sgl_lock);
3951			io_task->psgl_handle = NULL;
3952		}
3953	}
3954}
3955
3956static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3957			  unsigned int num_sg, unsigned int xferlen,
3958			  unsigned int writedir)
3959{
3960
3961	struct beiscsi_io_task *io_task = task->dd_data;
3962	struct iscsi_conn *conn = task->conn;
3963	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3964	struct beiscsi_hba *phba = beiscsi_conn->phba;
3965	struct iscsi_wrb *pwrb = NULL;
3966	unsigned int doorbell = 0;
3967
3968	pwrb = io_task->pwrb_handle->pwrb;
3969	io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3970	io_task->bhs_len = sizeof(struct be_cmd_bhs);
3971
3972	if (writedir) {
3973		memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3974		AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3975			      &io_task->cmd_bhs->iscsi_data_pdu,
3976			      (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3977		AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3978			      &io_task->cmd_bhs->iscsi_data_pdu,
3979			      ISCSI_OPCODE_SCSI_DATA_OUT);
3980		AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3981			      &io_task->cmd_bhs->iscsi_data_pdu, 1);
3982		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3983			      INI_WR_CMD);
3984		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3985	} else {
3986		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3987			      INI_RD_CMD);
3988		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3989	}
3990	memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3991	       dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3992	       &io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3993
3994	AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3995		      cpu_to_be16(*(unsigned short *)&io_task->cmd_bhs->iscsi_hdr.lun));
3996	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3997	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3998		      io_task->pwrb_handle->wrb_index);
3999	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4000		      be32_to_cpu(task->cmdsn));
4001	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4002		      io_task->psgl_handle->sgl_index);
4003
4004	hwi_write_sgl(pwrb, sg, num_sg, io_task);
4005
4006	AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4007		      io_task->pwrb_handle->nxt_wrb_index);
4008	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4009
4010	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4011	doorbell |= (io_task->pwrb_handle->wrb_index &
4012		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4013	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4014
4015	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4016	return 0;
4017}
4018
4019static int beiscsi_mtask(struct iscsi_task *task)
4020{
4021	struct beiscsi_io_task *io_task = task->dd_data;
4022	struct iscsi_conn *conn = task->conn;
4023	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4024	struct beiscsi_hba *phba = beiscsi_conn->phba;
4025	struct iscsi_wrb *pwrb = NULL;
4026	unsigned int doorbell = 0;
4027	unsigned int cid;
4028
4029	cid = beiscsi_conn->beiscsi_conn_cid;
4030	pwrb = io_task->pwrb_handle->pwrb;
4031	memset(pwrb, 0, sizeof(*pwrb));
4032	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4033		      be32_to_cpu(task->cmdsn));
4034	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4035		      io_task->pwrb_handle->wrb_index);
4036	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4037		      io_task->psgl_handle->sgl_index);
4038
4039	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
4040	case ISCSI_OP_LOGIN:
4041		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4042			      TGT_DM_CMD);
4043		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4044		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
4045		hwi_write_buffer(pwrb, task);
4046		break;
4047	case ISCSI_OP_NOOP_OUT:
4048		if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4049			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4050				      TGT_DM_CMD);
4051			AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt,
4052				      pwrb, 0);
4053			AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
4054		} else {
4055			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4056				      INI_RD_CMD);
4057			AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4058		}
4059		hwi_write_buffer(pwrb, task);
4060		break;
4061	case ISCSI_OP_TEXT:
4062		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4063			      TGT_DM_CMD);
4064		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4065		hwi_write_buffer(pwrb, task);
4066		break;
4067	case ISCSI_OP_SCSI_TMFUNC:
4068		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4069			      INI_TMF_CMD);
4070		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4071		hwi_write_buffer(pwrb, task);
4072		break;
4073	case ISCSI_OP_LOGOUT:
4074		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
4075		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4076			      HWH_TYPE_LOGOUT);
4077		hwi_write_buffer(pwrb, task);
4078		break;
4079
4080	default:
4081		SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported\n",
4082			 task->hdr->opcode & ISCSI_OPCODE_MASK);
4083		return -EINVAL;
4084	}
4085
4086	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
4087		      task->data_count);
4088	AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4089		      io_task->pwrb_handle->nxt_wrb_index);
4090	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4091
4092	doorbell |= cid & DB_WRB_POST_CID_MASK;
4093	doorbell |= (io_task->pwrb_handle->wrb_index &
4094		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4095	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4096	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
4097	return 0;
4098}
4099
4100static int beiscsi_task_xmit(struct iscsi_task *task)
4101{
4102	struct beiscsi_io_task *io_task = task->dd_data;
4103	struct scsi_cmnd *sc = task->sc;
4104	struct scatterlist *sg;
4105	int num_sg;
4106	unsigned int  writedir = 0, xferlen = 0;
4107
4108	if (!sc)
4109		return beiscsi_mtask(task);
4110
4111	io_task->scsi_cmnd = sc;
4112	num_sg = scsi_dma_map(sc);
4113	if (num_sg < 0) {
4114		SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
4115		return num_sg;
4116	}
4117	xferlen = scsi_bufflen(sc);
4118	sg = scsi_sglist(sc);
4119	if (sc->sc_data_direction == DMA_TO_DEVICE) {
4120		writedir = 1;
4121		SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x\n",
4122			 task->imm_count);
4123	} else
4124		writedir = 0;
4125	return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
4126}
4127
4128static void beiscsi_quiesce(struct beiscsi_hba *phba)
4129{
4130	struct hwi_controller *phwi_ctrlr;
4131	struct hwi_context_memory *phwi_context;
4132	struct be_eq_obj *pbe_eq;
4133	unsigned int i, msix_vec;
4134	u8 *real_offset = 0;
4135	u32 value = 0;
4136
4137	phwi_ctrlr = phba->phwi_ctrlr;
4138	phwi_context = phwi_ctrlr->phwi_ctxt;
4139	hwi_disable_intr(phba);
4140	if (phba->msix_enabled) {
4141		for (i = 0; i <= phba->num_cpus; i++) {
4142			msix_vec = phba->msix_entries[i].vector;
4143			free_irq(msix_vec, &phwi_context->be_eq[i]);
4144			kfree(phba->msi_name[i]);
4145		}
4146	} else
4147		if (phba->pcidev->irq)
4148			free_irq(phba->pcidev->irq, phba);
4149	pci_disable_msix(phba->pcidev);
4150	destroy_workqueue(phba->wq);
4151	if (blk_iopoll_enabled)
4152		for (i = 0; i < phba->num_cpus; i++) {
4153			pbe_eq = &phwi_context->be_eq[i];
4154			blk_iopoll_disable(&pbe_eq->iopoll);
4155		}
4156
4157	beiscsi_clean_port(phba);
4158	beiscsi_free_mem(phba);
4159	real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4160
4161	value = readl((void *)real_offset);
4162
4163	if (value & 0x00010000) {
4164		value &= 0xfffeffff;
4165		writel(value, (void *)real_offset);
4166	}
4167	beiscsi_unmap_pci_function(phba);
4168	pci_free_consistent(phba->pcidev,
4169			    phba->ctrl.mbox_mem_alloced.size,
4170			    phba->ctrl.mbox_mem_alloced.va,
4171			    phba->ctrl.mbox_mem_alloced.dma);
4172}
4173
4174static void beiscsi_remove(struct pci_dev *pcidev)
4175{
4176
4177	struct beiscsi_hba *phba = NULL;
4178
4179	phba = pci_get_drvdata(pcidev);
4180	if (!phba) {
4181		dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
4182		return;
4183	}
4184
4185	beiscsi_quiesce(phba);
4186	iscsi_boot_destroy_kset(phba->boot_kset);
4187	iscsi_host_remove(phba->shost);
4188	pci_dev_put(phba->pcidev);
4189	iscsi_host_free(phba->shost);
4190	pci_disable_device(pcidev);
4191}
4192
4193static void beiscsi_shutdown(struct pci_dev *pcidev)
4194{
4195
4196	struct beiscsi_hba *phba = NULL;
4197
4198	phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
4199	if (!phba) {
4200		dev_err(&pcidev->dev, "beiscsi_shutdown called with no phba\n");
4201		return;
4202	}
4203
4204	beiscsi_quiesce(phba);
4205	pci_disable_device(pcidev);
4206}
4207
4208static void beiscsi_msix_enable(struct beiscsi_hba *phba)
4209{
4210	int i, status;
4211
4212	for (i = 0; i <= phba->num_cpus; i++)
4213		phba->msix_entries[i].entry = i;
4214
4215	status = pci_enable_msix(phba->pcidev, phba->msix_entries,
4216				 (phba->num_cpus + 1));
4217	if (!status)
4218		phba->msix_enabled = true;
4219
4220	return;
4221}
4222
4223static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
4224				const struct pci_device_id *id)
4225{
4226	struct beiscsi_hba *phba = NULL;
4227	struct hwi_controller *phwi_ctrlr;
4228	struct hwi_context_memory *phwi_context;
4229	struct be_eq_obj *pbe_eq;
4230	int ret, num_cpus, i;
4231	u8 *real_offset = 0;
4232	u32 value = 0;
4233
4234	ret = beiscsi_enable_pci(pcidev);
4235	if (ret < 0) {
4236		dev_err(&pcidev->dev, "beiscsi_dev_probe-"
4237			" Failed to enable pci device\n");
4238		return ret;
4239	}
4240
4241	phba = beiscsi_hba_alloc(pcidev);
4242	if (!phba) {
4243		dev_err(&pcidev->dev, "beiscsi_dev_probe-"
4244			" Failed in beiscsi_hba_alloc\n");
4245		goto disable_pci;
4246	}
4247
4248	switch (pcidev->device) {
4249	case BE_DEVICE_ID1:
4250	case OC_DEVICE_ID1:
4251	case OC_DEVICE_ID2:
4252		phba->generation = BE_GEN2;
4253		break;
4254	case BE_DEVICE_ID2:
4255	case OC_DEVICE_ID3:
4256		phba->generation = BE_GEN3;
4257		break;
4258	default:
4259		phba->generation = 0;
4260	}
4261
4262	if (enable_msix)
4263		num_cpus = find_num_cpus();
4264	else
4265		num_cpus = 1;
4266	phba->num_cpus = num_cpus;
4267	SE_DEBUG(DBG_LVL_8, "num_cpus = %d\n", phba->num_cpus);
4268
4269	if (enable_msix)
4270		beiscsi_msix_enable(phba);
4271	ret = be_ctrl_init(phba, pcidev);
4272	if (ret) {
4273		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4274				"Failed in be_ctrl_init\n");
4275		goto hba_free;
4276	}
4277
4278	if (!num_hba) {
4279		real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4280		value = readl((void *)real_offset);
4281		if (value & 0x00010000) {
4282			gcrashmode++;
4283			shost_printk(KERN_ERR, phba->shost,
4284				"Loading Driver in crashdump mode\n");
4285			ret = beiscsi_cmd_reset_function(phba);
4286			if (ret) {
4287				shost_printk(KERN_ERR, phba->shost,
4288					"Reset Failed. Aborting Crashdump\n");
4289				goto hba_free;
4290			}
4291			ret = be_chk_reset_complete(phba);
4292			if (ret) {
4293				shost_printk(KERN_ERR, phba->shost,
4294					"Failed to get out of reset."
4295					"Aborting Crashdump\n");
4296				goto hba_free;
4297			}
4298		} else {
4299			value |= 0x00010000;
4300			writel(value, (void *)real_offset);
4301			num_hba++;
4302		}
4303	}
4304
4305	spin_lock_init(&phba->io_sgl_lock);
4306	spin_lock_init(&phba->mgmt_sgl_lock);
4307	spin_lock_init(&phba->isr_lock);
4308	ret = mgmt_get_fw_config(&phba->ctrl, phba);
4309	if (ret != 0) {
4310		shost_printk(KERN_ERR, phba->shost,
4311			     "Error getting fw config\n");
4312		goto free_port;
4313	}
4314	phba->shost->max_id = phba->fw_config.iscsi_cid_count;
4315	beiscsi_get_params(phba);
4316	phba->shost->can_queue = phba->params.ios_per_ctrl;
4317	ret = beiscsi_init_port(phba);
4318	if (ret < 0) {
4319		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4320			     "Failed in beiscsi_init_port\n");
4321		goto free_port;
4322	}
4323
4324	for (i = 0; i < MAX_MCC_CMD ; i++) {
4325		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
4326		phba->ctrl.mcc_tag[i] = i + 1;
4327		phba->ctrl.mcc_numtag[i + 1] = 0;
4328		phba->ctrl.mcc_tag_available++;
4329	}
4330
4331	phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
4332
4333	snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
4334		 phba->shost->host_no);
4335	phba->wq = alloc_workqueue(phba->wq_name, WQ_MEM_RECLAIM, 1);
4336	if (!phba->wq) {
4337		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4338				"Failed to allocate work queue\n");
4339		goto free_twq;
4340	}
4341
4342	INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
4343
4344	phwi_ctrlr = phba->phwi_ctrlr;
4345	phwi_context = phwi_ctrlr->phwi_ctxt;
4346	if (blk_iopoll_enabled) {
4347		for (i = 0; i < phba->num_cpus; i++) {
4348			pbe_eq = &phwi_context->be_eq[i];
4349			blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
4350					be_iopoll);
4351			blk_iopoll_enable(&pbe_eq->iopoll);
4352		}
4353	}
4354	ret = beiscsi_init_irqs(phba);
4355	if (ret < 0) {
4356		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
4357			     "Failed to beiscsi_init_irqs\n");
4358		goto free_blkenbld;
4359	}
4360	hwi_enable_intr(phba);
4361
4362	if (beiscsi_setup_boot_info(phba))
4363		/*
4364		 * log error but continue, because we may not be using
4365		 * iscsi boot.
4366		 */
4367		shost_printk(KERN_ERR, phba->shost, "Could not set up "
4368			     "iSCSI boot info.");
4369
4370	SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED\n\n\n");
4371	return 0;
4372
4373free_blkenbld:
4374	destroy_workqueue(phba->wq);
4375	if (blk_iopoll_enabled)
4376		for (i = 0; i < phba->num_cpus; i++) {
4377			pbe_eq = &phwi_context->be_eq[i];
4378			blk_iopoll_disable(&pbe_eq->iopoll);
4379		}
4380free_twq:
4381	beiscsi_clean_port(phba);
4382	beiscsi_free_mem(phba);
4383free_port:
4384	real_offset = (u8 *)phba->csr_va + MPU_EP_SEMAPHORE;
4385
4386	value = readl((void *)real_offset);
4387
4388	if (value & 0x00010000) {
4389		value &= 0xfffeffff;
4390		writel(value, (void *)real_offset);
4391	}
4392
4393	pci_free_consistent(phba->pcidev,
4394			    phba->ctrl.mbox_mem_alloced.size,
4395			    phba->ctrl.mbox_mem_alloced.va,
4396			   phba->ctrl.mbox_mem_alloced.dma);
4397	beiscsi_unmap_pci_function(phba);
4398hba_free:
4399	if (phba->msix_enabled)
4400		pci_disable_msix(phba->pcidev);
4401	iscsi_host_remove(phba->shost);
4402	pci_dev_put(phba->pcidev);
4403	iscsi_host_free(phba->shost);
4404disable_pci:
4405	pci_disable_device(pcidev);
4406	return ret;
4407}
4408
4409struct iscsi_transport beiscsi_iscsi_transport = {
4410	.owner = THIS_MODULE,
4411	.name = DRV_NAME,
4412	.caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
4413		CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
4414	.create_session = beiscsi_session_create,
4415	.destroy_session = beiscsi_session_destroy,
4416	.create_conn = beiscsi_conn_create,
4417	.bind_conn = beiscsi_conn_bind,
4418	.destroy_conn = iscsi_conn_teardown,
4419	.attr_is_visible = be2iscsi_attr_is_visible,
4420	.set_param = beiscsi_set_param,
4421	.get_conn_param = iscsi_conn_get_param,
4422	.get_session_param = iscsi_session_get_param,
4423	.get_host_param = beiscsi_get_host_param,
4424	.start_conn = beiscsi_conn_start,
4425	.stop_conn = iscsi_conn_stop,
4426	.send_pdu = iscsi_conn_send_pdu,
4427	.xmit_task = beiscsi_task_xmit,
4428	.cleanup_task = beiscsi_cleanup_task,
4429	.alloc_pdu = beiscsi_alloc_pdu,
4430	.parse_pdu_itt = beiscsi_parse_pdu,
4431	.get_stats = beiscsi_conn_get_stats,
4432	.get_ep_param = beiscsi_ep_get_param,
4433	.ep_connect = beiscsi_ep_connect,
4434	.ep_poll = beiscsi_ep_poll,
4435	.ep_disconnect = beiscsi_ep_disconnect,
4436	.session_recovery_timedout = iscsi_session_recovery_timedout,
4437};
4438
4439static struct pci_driver beiscsi_pci_driver = {
4440	.name = DRV_NAME,
4441	.probe = beiscsi_dev_probe,
4442	.remove = beiscsi_remove,
4443	.shutdown = beiscsi_shutdown,
4444	.id_table = beiscsi_pci_id_table
4445};
4446
4447
4448static int __init beiscsi_module_init(void)
4449{
4450	int ret;
4451
4452	beiscsi_scsi_transport =
4453			iscsi_register_transport(&beiscsi_iscsi_transport);
4454	if (!beiscsi_scsi_transport) {
4455		SE_DEBUG(DBG_LVL_1,
4456			 "beiscsi_module_init - Unable to  register beiscsi"
4457			 "transport.\n");
4458		return -ENOMEM;
4459	}
4460	SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p\n",
4461		 &beiscsi_iscsi_transport);
4462
4463	ret = pci_register_driver(&beiscsi_pci_driver);
4464	if (ret) {
4465		SE_DEBUG(DBG_LVL_1,
4466			 "beiscsi_module_init - Unable to  register"
4467			 "beiscsi pci driver.\n");
4468		goto unregister_iscsi_transport;
4469	}
4470	return 0;
4471
4472unregister_iscsi_transport:
4473	iscsi_unregister_transport(&beiscsi_iscsi_transport);
4474	return ret;
4475}
4476
4477static void __exit beiscsi_module_exit(void)
4478{
4479	pci_unregister_driver(&beiscsi_pci_driver);
4480	iscsi_unregister_transport(&beiscsi_iscsi_transport);
4481}
4482
4483module_init(beiscsi_module_init);
4484module_exit(beiscsi_module_exit);
4485