be_main.c revision 0ecb0b45f22df911c564070b64af21db36934f0f
1/**
2 * Copyright (C) 2005 - 2009 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation.  The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11 *
12 * Contact Information:
13 * linux-drivers@serverengines.com
14 *
15 *  ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 *
19 */
20#include <linux/reboot.h>
21#include <linux/delay.h>
22#include <linux/interrupt.h>
23#include <linux/blkdev.h>
24#include <linux/pci.h>
25#include <linux/string.h>
26#include <linux/kernel.h>
27#include <linux/semaphore.h>
28
29#include <scsi/libiscsi.h>
30#include <scsi/scsi_transport_iscsi.h>
31#include <scsi/scsi_transport.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi.h>
36#include "be_main.h"
37#include "be_iscsi.h"
38#include "be_mgmt.h"
39
40static unsigned int be_iopoll_budget = 10;
41static unsigned int be_max_phys_size = 64;
42static unsigned int enable_msix = 1;
43static unsigned int ring_mode;
44
45MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
46MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
47MODULE_AUTHOR("ServerEngines Corporation");
48MODULE_LICENSE("GPL");
49module_param(be_iopoll_budget, int, 0);
50module_param(enable_msix, int, 0);
51module_param(be_max_phys_size, uint, S_IRUGO);
52MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
53				   "contiguous memory that can be allocated."
54				   "Range is 16 - 128");
55
56static int beiscsi_slave_configure(struct scsi_device *sdev)
57{
58	blk_queue_max_segment_size(sdev->request_queue, 65536);
59	return 0;
60}
61
62/*------------------- PCI Driver operations and data ----------------- */
63static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
64	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
65	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
66	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
67	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
68	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID4) },
69	{ 0 }
70};
71MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
72
73static struct scsi_host_template beiscsi_sht = {
74	.module = THIS_MODULE,
75	.name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
76	.proc_name = DRV_NAME,
77	.queuecommand = iscsi_queuecommand,
78	.eh_abort_handler = iscsi_eh_abort,
79	.change_queue_depth = iscsi_change_queue_depth,
80	.slave_configure = beiscsi_slave_configure,
81	.target_alloc = iscsi_target_alloc,
82	.eh_device_reset_handler = iscsi_eh_device_reset,
83	.eh_target_reset_handler = iscsi_eh_target_reset,
84	.sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
85	.can_queue = BE2_IO_DEPTH,
86	.this_id = -1,
87	.max_sectors = BEISCSI_MAX_SECTORS,
88	.cmd_per_lun = BEISCSI_CMD_PER_LUN,
89	.use_clustering = ENABLE_CLUSTERING,
90};
91
92static struct scsi_transport_template *beiscsi_scsi_transport;
93
94static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
95{
96	struct beiscsi_hba *phba;
97	struct Scsi_Host *shost;
98
99	shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
100	if (!shost) {
101		dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
102			"iscsi_host_alloc failed \n");
103		return NULL;
104	}
105	shost->dma_boundary = pcidev->dma_mask;
106	shost->max_id = BE2_MAX_SESSIONS;
107	shost->max_channel = 0;
108	shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
109	shost->max_lun = BEISCSI_NUM_MAX_LUN;
110	shost->transportt = beiscsi_scsi_transport;
111	phba = iscsi_host_priv(shost);
112	memset(phba, 0, sizeof(*phba));
113	phba->shost = shost;
114	phba->pcidev = pci_dev_get(pcidev);
115	pci_set_drvdata(pcidev, phba);
116
117	if (iscsi_host_add(shost, &phba->pcidev->dev))
118		goto free_devices;
119	return phba;
120
121free_devices:
122	pci_dev_put(phba->pcidev);
123	iscsi_host_free(phba->shost);
124	return NULL;
125}
126
127static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
128{
129	if (phba->csr_va) {
130		iounmap(phba->csr_va);
131		phba->csr_va = NULL;
132	}
133	if (phba->db_va) {
134		iounmap(phba->db_va);
135		phba->db_va = NULL;
136	}
137	if (phba->pci_va) {
138		iounmap(phba->pci_va);
139		phba->pci_va = NULL;
140	}
141}
142
143static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
144				struct pci_dev *pcidev)
145{
146	u8 __iomem *addr;
147
148	addr = ioremap_nocache(pci_resource_start(pcidev, 2),
149			       pci_resource_len(pcidev, 2));
150	if (addr == NULL)
151		return -ENOMEM;
152	phba->ctrl.csr = addr;
153	phba->csr_va = addr;
154	phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
155
156	addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
157	if (addr == NULL)
158		goto pci_map_err;
159	phba->ctrl.db = addr;
160	phba->db_va = addr;
161	phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
162
163	addr = ioremap_nocache(pci_resource_start(pcidev, 1),
164			       pci_resource_len(pcidev, 1));
165	if (addr == NULL)
166		goto pci_map_err;
167	phba->ctrl.pcicfg = addr;
168	phba->pci_va = addr;
169	phba->pci_pa.u.a64.address = pci_resource_start(pcidev, 1);
170	return 0;
171
172pci_map_err:
173	beiscsi_unmap_pci_function(phba);
174	return -ENOMEM;
175}
176
177static int beiscsi_enable_pci(struct pci_dev *pcidev)
178{
179	int ret;
180
181	ret = pci_enable_device(pcidev);
182	if (ret) {
183		dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
184			"failed. Returning -ENODEV\n");
185		return ret;
186	}
187
188	pci_set_master(pcidev);
189	if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
190		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
191		if (ret) {
192			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
193			pci_disable_device(pcidev);
194			return ret;
195		}
196	}
197	return 0;
198}
199
200static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
201{
202	struct be_ctrl_info *ctrl = &phba->ctrl;
203	struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
204	struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
205	int status = 0;
206
207	ctrl->pdev = pdev;
208	status = beiscsi_map_pci_bars(phba, pdev);
209	if (status)
210		return status;
211	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
212	mbox_mem_alloc->va = pci_alloc_consistent(pdev,
213						  mbox_mem_alloc->size,
214						  &mbox_mem_alloc->dma);
215	if (!mbox_mem_alloc->va) {
216		beiscsi_unmap_pci_function(phba);
217		status = -ENOMEM;
218		return status;
219	}
220
221	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
222	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
223	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
224	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
225	spin_lock_init(&ctrl->mbox_lock);
226	spin_lock_init(&phba->ctrl.mcc_lock);
227	spin_lock_init(&phba->ctrl.mcc_cq_lock);
228
229	return status;
230}
231
232static void beiscsi_get_params(struct beiscsi_hba *phba)
233{
234	phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
235				    - (phba->fw_config.iscsi_cid_count
236				    + BE2_TMFS
237				    + BE2_NOPOUT_REQ));
238	phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
239	phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;;
240	phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
241	phba->params.num_sge_per_io = BE2_SGE;
242	phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
243	phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
244	phba->params.eq_timer = 64;
245	phba->params.num_eq_entries =
246	    (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
247				    + BE2_TMFS) / 512) + 1) * 512;
248	phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
249				? 1024 : phba->params.num_eq_entries;
250	SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
251			     phba->params.num_eq_entries);
252	phba->params.num_cq_entries =
253	    (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
254				    + BE2_TMFS) / 512) + 1) * 512;
255	phba->params.wrbs_per_cxn = 256;
256}
257
258static void hwi_ring_eq_db(struct beiscsi_hba *phba,
259			   unsigned int id, unsigned int clr_interrupt,
260			   unsigned int num_processed,
261			   unsigned char rearm, unsigned char event)
262{
263	u32 val = 0;
264	val |= id & DB_EQ_RING_ID_MASK;
265	if (rearm)
266		val |= 1 << DB_EQ_REARM_SHIFT;
267	if (clr_interrupt)
268		val |= 1 << DB_EQ_CLR_SHIFT;
269	if (event)
270		val |= 1 << DB_EQ_EVNT_SHIFT;
271	val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
272	iowrite32(val, phba->db_va + DB_EQ_OFFSET);
273}
274
275/**
276 * be_isr_mcc - The isr routine of the driver.
277 * @irq: Not used
278 * @dev_id: Pointer to host adapter structure
279 */
280static irqreturn_t be_isr_mcc(int irq, void *dev_id)
281{
282	struct beiscsi_hba *phba;
283	struct be_eq_entry *eqe = NULL;
284	struct be_queue_info *eq;
285	struct be_queue_info *mcc;
286	unsigned int num_eq_processed;
287	struct be_eq_obj *pbe_eq;
288	unsigned long flags;
289
290	pbe_eq = dev_id;
291	eq = &pbe_eq->q;
292	phba =  pbe_eq->phba;
293	mcc = &phba->ctrl.mcc_obj.cq;
294	eqe = queue_tail_node(eq);
295	if (!eqe)
296		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
297
298	num_eq_processed = 0;
299
300	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
301				& EQE_VALID_MASK) {
302		if (((eqe->dw[offsetof(struct amap_eq_entry,
303		     resource_id) / 32] &
304		     EQE_RESID_MASK) >> 16) == mcc->id) {
305			spin_lock_irqsave(&phba->isr_lock, flags);
306			phba->todo_mcc_cq = 1;
307			spin_unlock_irqrestore(&phba->isr_lock, flags);
308		}
309		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
310		queue_tail_inc(eq);
311		eqe = queue_tail_node(eq);
312		num_eq_processed++;
313	}
314	if (phba->todo_mcc_cq)
315		queue_work(phba->wq, &phba->work_cqs);
316	if (num_eq_processed)
317		hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 1, 1);
318
319	return IRQ_HANDLED;
320}
321
322/**
323 * be_isr_msix - The isr routine of the driver.
324 * @irq: Not used
325 * @dev_id: Pointer to host adapter structure
326 */
327static irqreturn_t be_isr_msix(int irq, void *dev_id)
328{
329	struct beiscsi_hba *phba;
330	struct be_eq_entry *eqe = NULL;
331	struct be_queue_info *eq;
332	struct be_queue_info *cq;
333	unsigned int num_eq_processed;
334	struct be_eq_obj *pbe_eq;
335	unsigned long flags;
336
337	pbe_eq = dev_id;
338	eq = &pbe_eq->q;
339	cq = pbe_eq->cq;
340	eqe = queue_tail_node(eq);
341	if (!eqe)
342		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
343
344	phba = pbe_eq->phba;
345	num_eq_processed = 0;
346	if (blk_iopoll_enabled) {
347		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
348					& EQE_VALID_MASK) {
349			if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
350				blk_iopoll_sched(&pbe_eq->iopoll);
351
352			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
353			queue_tail_inc(eq);
354			eqe = queue_tail_node(eq);
355			num_eq_processed++;
356		}
357		if (num_eq_processed)
358			hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 0, 1);
359
360		return IRQ_HANDLED;
361	} else {
362		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
363						& EQE_VALID_MASK) {
364			spin_lock_irqsave(&phba->isr_lock, flags);
365			phba->todo_cq = 1;
366			spin_unlock_irqrestore(&phba->isr_lock, flags);
367			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
368			queue_tail_inc(eq);
369			eqe = queue_tail_node(eq);
370			num_eq_processed++;
371		}
372		if (phba->todo_cq)
373			queue_work(phba->wq, &phba->work_cqs);
374
375		if (num_eq_processed)
376			hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
377
378		return IRQ_HANDLED;
379	}
380}
381
382/**
383 * be_isr - The isr routine of the driver.
384 * @irq: Not used
385 * @dev_id: Pointer to host adapter structure
386 */
387static irqreturn_t be_isr(int irq, void *dev_id)
388{
389	struct beiscsi_hba *phba;
390	struct hwi_controller *phwi_ctrlr;
391	struct hwi_context_memory *phwi_context;
392	struct be_eq_entry *eqe = NULL;
393	struct be_queue_info *eq;
394	struct be_queue_info *cq;
395	struct be_queue_info *mcc;
396	unsigned long flags, index;
397	unsigned int num_mcceq_processed, num_ioeq_processed;
398	struct be_ctrl_info *ctrl;
399	struct be_eq_obj *pbe_eq;
400	int isr;
401
402	phba = dev_id;
403	ctrl = &phba->ctrl;;
404	isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
405		       (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
406	if (!isr)
407		return IRQ_NONE;
408
409	phwi_ctrlr = phba->phwi_ctrlr;
410	phwi_context = phwi_ctrlr->phwi_ctxt;
411	pbe_eq = &phwi_context->be_eq[0];
412
413	eq = &phwi_context->be_eq[0].q;
414	mcc = &phba->ctrl.mcc_obj.cq;
415	index = 0;
416	eqe = queue_tail_node(eq);
417	if (!eqe)
418		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
419
420	num_ioeq_processed = 0;
421	num_mcceq_processed = 0;
422	if (blk_iopoll_enabled) {
423		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
424					& EQE_VALID_MASK) {
425			if (((eqe->dw[offsetof(struct amap_eq_entry,
426			     resource_id) / 32] &
427			     EQE_RESID_MASK) >> 16) == mcc->id) {
428				spin_lock_irqsave(&phba->isr_lock, flags);
429				phba->todo_mcc_cq = 1;
430				spin_unlock_irqrestore(&phba->isr_lock, flags);
431				num_mcceq_processed++;
432			} else {
433				if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
434					blk_iopoll_sched(&pbe_eq->iopoll);
435				num_ioeq_processed++;
436			}
437			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
438			queue_tail_inc(eq);
439			eqe = queue_tail_node(eq);
440		}
441		if (num_ioeq_processed || num_mcceq_processed) {
442			if (phba->todo_mcc_cq)
443				queue_work(phba->wq, &phba->work_cqs);
444
445		if ((num_mcceq_processed) && (!num_ioeq_processed))
446				hwi_ring_eq_db(phba, eq->id, 0,
447					      (num_ioeq_processed +
448					       num_mcceq_processed) , 1, 1);
449			else
450				hwi_ring_eq_db(phba, eq->id, 0,
451					       (num_ioeq_processed +
452						num_mcceq_processed), 0, 1);
453
454			return IRQ_HANDLED;
455		} else
456			return IRQ_NONE;
457	} else {
458		cq = &phwi_context->be_cq[0];
459		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
460						& EQE_VALID_MASK) {
461
462			if (((eqe->dw[offsetof(struct amap_eq_entry,
463			     resource_id) / 32] &
464			     EQE_RESID_MASK) >> 16) != cq->id) {
465				spin_lock_irqsave(&phba->isr_lock, flags);
466				phba->todo_mcc_cq = 1;
467				spin_unlock_irqrestore(&phba->isr_lock, flags);
468			} else {
469				spin_lock_irqsave(&phba->isr_lock, flags);
470				phba->todo_cq = 1;
471				spin_unlock_irqrestore(&phba->isr_lock, flags);
472			}
473			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
474			queue_tail_inc(eq);
475			eqe = queue_tail_node(eq);
476			num_ioeq_processed++;
477		}
478		if (phba->todo_cq || phba->todo_mcc_cq)
479			queue_work(phba->wq, &phba->work_cqs);
480
481		if (num_ioeq_processed) {
482			hwi_ring_eq_db(phba, eq->id, 0,
483				       num_ioeq_processed, 1, 1);
484			return IRQ_HANDLED;
485		} else
486			return IRQ_NONE;
487	}
488}
489
490static int beiscsi_init_irqs(struct beiscsi_hba *phba)
491{
492	struct pci_dev *pcidev = phba->pcidev;
493	struct hwi_controller *phwi_ctrlr;
494	struct hwi_context_memory *phwi_context;
495	int ret, msix_vec, i = 0;
496	char desc[32];
497
498	phwi_ctrlr = phba->phwi_ctrlr;
499	phwi_context = phwi_ctrlr->phwi_ctxt;
500
501	if (phba->msix_enabled) {
502		for (i = 0; i < phba->num_cpus; i++) {
503			sprintf(desc, "beiscsi_msix_%04x", i);
504			msix_vec = phba->msix_entries[i].vector;
505			ret = request_irq(msix_vec, be_isr_msix, 0, desc,
506					  &phwi_context->be_eq[i]);
507		}
508		msix_vec = phba->msix_entries[i].vector;
509		ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
510				  &phwi_context->be_eq[i]);
511	} else {
512		ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
513				  "beiscsi", phba);
514		if (ret) {
515			shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
516				     "Failed to register irq\\n");
517			return ret;
518		}
519	}
520	return 0;
521}
522
523static void hwi_ring_cq_db(struct beiscsi_hba *phba,
524			   unsigned int id, unsigned int num_processed,
525			   unsigned char rearm, unsigned char event)
526{
527	u32 val = 0;
528	val |= id & DB_CQ_RING_ID_MASK;
529	if (rearm)
530		val |= 1 << DB_CQ_REARM_SHIFT;
531	val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
532	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
533}
534
535static unsigned int
536beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
537			  struct beiscsi_hba *phba,
538			  unsigned short cid,
539			  struct pdu_base *ppdu,
540			  unsigned long pdu_len,
541			  void *pbuffer, unsigned long buf_len)
542{
543	struct iscsi_conn *conn = beiscsi_conn->conn;
544	struct iscsi_session *session = conn->session;
545	struct iscsi_task *task;
546	struct beiscsi_io_task *io_task;
547	struct iscsi_hdr *login_hdr;
548
549	switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
550						PDUBASE_OPCODE_MASK) {
551	case ISCSI_OP_NOOP_IN:
552		pbuffer = NULL;
553		buf_len = 0;
554		break;
555	case ISCSI_OP_ASYNC_EVENT:
556		break;
557	case ISCSI_OP_REJECT:
558		WARN_ON(!pbuffer);
559		WARN_ON(!(buf_len == 48));
560		SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
561		break;
562	case ISCSI_OP_LOGIN_RSP:
563	case ISCSI_OP_TEXT_RSP:
564		task = conn->login_task;
565		io_task = task->dd_data;
566		login_hdr = (struct iscsi_hdr *)ppdu;
567		login_hdr->itt = io_task->libiscsi_itt;
568		break;
569	default:
570		shost_printk(KERN_WARNING, phba->shost,
571			     "Unrecognized opcode 0x%x in async msg \n",
572			     (ppdu->
573			     dw[offsetof(struct amap_pdu_base, opcode) / 32]
574						& PDUBASE_OPCODE_MASK));
575		return 1;
576	}
577
578	spin_lock_bh(&session->lock);
579	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
580	spin_unlock_bh(&session->lock);
581	return 0;
582}
583
584static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
585{
586	struct sgl_handle *psgl_handle;
587
588	if (phba->io_sgl_hndl_avbl) {
589		SE_DEBUG(DBG_LVL_8,
590			 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
591			 phba->io_sgl_alloc_index);
592		psgl_handle = phba->io_sgl_hndl_base[phba->
593						io_sgl_alloc_index];
594		phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
595		phba->io_sgl_hndl_avbl--;
596		if (phba->io_sgl_alloc_index == (phba->params.
597						 ios_per_ctrl - 1))
598			phba->io_sgl_alloc_index = 0;
599		else
600			phba->io_sgl_alloc_index++;
601	} else
602		psgl_handle = NULL;
603	return psgl_handle;
604}
605
606static void
607free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
608{
609	SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
610		 phba->io_sgl_free_index);
611	if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
612		/*
613		 * this can happen if clean_task is called on a task that
614		 * failed in xmit_task or alloc_pdu.
615		 */
616		 SE_DEBUG(DBG_LVL_8,
617			 "Double Free in IO SGL io_sgl_free_index=%d,"
618			 "value there=%p \n", phba->io_sgl_free_index,
619			 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
620		return;
621	}
622	phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
623	phba->io_sgl_hndl_avbl++;
624	if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
625		phba->io_sgl_free_index = 0;
626	else
627		phba->io_sgl_free_index++;
628}
629
630/**
631 * alloc_wrb_handle - To allocate a wrb handle
632 * @phba: The hba pointer
633 * @cid: The cid to use for allocation
634 *
635 * This happens under session_lock until submission to chip
636 */
637struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
638{
639	struct hwi_wrb_context *pwrb_context;
640	struct hwi_controller *phwi_ctrlr;
641	struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
642
643	phwi_ctrlr = phba->phwi_ctrlr;
644	pwrb_context = &phwi_ctrlr->wrb_context[cid];
645	if (pwrb_context->wrb_handles_available >= 2) {
646		pwrb_handle = pwrb_context->pwrb_handle_base[
647					    pwrb_context->alloc_index];
648		pwrb_context->wrb_handles_available--;
649		if (pwrb_context->alloc_index ==
650						(phba->params.wrbs_per_cxn - 1))
651			pwrb_context->alloc_index = 0;
652		else
653			pwrb_context->alloc_index++;
654
655		pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
656						pwrb_context->alloc_index];
657		pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
658	} else
659		pwrb_handle = NULL;
660	return pwrb_handle;
661}
662
663/**
664 * free_wrb_handle - To free the wrb handle back to pool
665 * @phba: The hba pointer
666 * @pwrb_context: The context to free from
667 * @pwrb_handle: The wrb_handle to free
668 *
669 * This happens under session_lock until submission to chip
670 */
671static void
672free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
673		struct wrb_handle *pwrb_handle)
674{
675	if (!ring_mode)
676		pwrb_context->pwrb_handle_base[pwrb_context->free_index] =
677					       pwrb_handle;
678	pwrb_context->wrb_handles_available++;
679	if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
680		pwrb_context->free_index = 0;
681	else
682		pwrb_context->free_index++;
683
684	SE_DEBUG(DBG_LVL_8,
685		 "FREE WRB: pwrb_handle=%p free_index=0x%x"
686		 "wrb_handles_available=%d \n",
687		 pwrb_handle, pwrb_context->free_index,
688		 pwrb_context->wrb_handles_available);
689}
690
691static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
692{
693	struct sgl_handle *psgl_handle;
694
695	if (phba->eh_sgl_hndl_avbl) {
696		psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
697		phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
698		SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
699			 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
700		phba->eh_sgl_hndl_avbl--;
701		if (phba->eh_sgl_alloc_index ==
702		    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
703		     1))
704			phba->eh_sgl_alloc_index = 0;
705		else
706			phba->eh_sgl_alloc_index++;
707	} else
708		psgl_handle = NULL;
709	return psgl_handle;
710}
711
712void
713free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
714{
715
716	SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
717			     phba->eh_sgl_free_index);
718	if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
719		/*
720		 * this can happen if clean_task is called on a task that
721		 * failed in xmit_task or alloc_pdu.
722		 */
723		SE_DEBUG(DBG_LVL_8,
724			 "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
725			 phba->eh_sgl_free_index);
726		return;
727	}
728	phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
729	phba->eh_sgl_hndl_avbl++;
730	if (phba->eh_sgl_free_index ==
731	    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
732		phba->eh_sgl_free_index = 0;
733	else
734		phba->eh_sgl_free_index++;
735}
736
737static void
738be_complete_io(struct beiscsi_conn *beiscsi_conn,
739	       struct iscsi_task *task, struct sol_cqe *psol)
740{
741	struct beiscsi_io_task *io_task = task->dd_data;
742	struct be_status_bhs *sts_bhs =
743				(struct be_status_bhs *)io_task->cmd_bhs;
744	struct iscsi_conn *conn = beiscsi_conn->conn;
745	unsigned int sense_len;
746	unsigned char *sense;
747	u32 resid = 0, exp_cmdsn, max_cmdsn;
748	u8 rsp, status, flags;
749
750	exp_cmdsn = (psol->
751			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
752			& SOL_EXP_CMD_SN_MASK);
753	max_cmdsn = ((psol->
754			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
755			& SOL_EXP_CMD_SN_MASK) +
756			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
757				/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
758	rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
759						& SOL_RESP_MASK) >> 16);
760	status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
761						& SOL_STS_MASK) >> 8);
762	flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
763					& SOL_FLAGS_MASK) >> 24) | 0x80;
764
765	task->sc->result = (DID_OK << 16) | status;
766	if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
767		task->sc->result = DID_ERROR << 16;
768		goto unmap;
769	}
770
771	/* bidi not initially supported */
772	if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
773		resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
774				32] & SOL_RES_CNT_MASK);
775
776		if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
777			task->sc->result = DID_ERROR << 16;
778
779		if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
780			scsi_set_resid(task->sc, resid);
781			if (!status && (scsi_bufflen(task->sc) - resid <
782			    task->sc->underflow))
783				task->sc->result = DID_ERROR << 16;
784		}
785	}
786
787	if (status == SAM_STAT_CHECK_CONDITION) {
788		unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
789		sense = sts_bhs->sense_info + sizeof(unsigned short);
790		sense_len =  cpu_to_be16(*slen);
791		memcpy(task->sc->sense_buffer, sense,
792		       min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
793	}
794	if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
795		if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
796							& SOL_RES_CNT_MASK)
797			 conn->rxdata_octets += (psol->
798			     dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
799			     & SOL_RES_CNT_MASK);
800	}
801unmap:
802	scsi_dma_unmap(io_task->scsi_cmnd);
803	iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
804}
805
806static void
807be_complete_logout(struct beiscsi_conn *beiscsi_conn,
808		   struct iscsi_task *task, struct sol_cqe *psol)
809{
810	struct iscsi_logout_rsp *hdr;
811	struct beiscsi_io_task *io_task = task->dd_data;
812	struct iscsi_conn *conn = beiscsi_conn->conn;
813
814	hdr = (struct iscsi_logout_rsp *)task->hdr;
815	hdr->opcode = ISCSI_OP_LOGOUT_RSP;
816	hdr->t2wait = 5;
817	hdr->t2retain = 0;
818	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
819					& SOL_FLAGS_MASK) >> 24) | 0x80;
820	hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
821					32] & SOL_RESP_MASK);
822	hdr->exp_cmdsn = cpu_to_be32(psol->
823			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
824					& SOL_EXP_CMD_SN_MASK);
825	hdr->max_cmdsn = be32_to_cpu((psol->
826			 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
827					& SOL_EXP_CMD_SN_MASK) +
828			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
829					/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
830	hdr->dlength[0] = 0;
831	hdr->dlength[1] = 0;
832	hdr->dlength[2] = 0;
833	hdr->hlength = 0;
834	hdr->itt = io_task->libiscsi_itt;
835	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
836}
837
838static void
839be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
840		struct iscsi_task *task, struct sol_cqe *psol)
841{
842	struct iscsi_tm_rsp *hdr;
843	struct iscsi_conn *conn = beiscsi_conn->conn;
844	struct beiscsi_io_task *io_task = task->dd_data;
845
846	hdr = (struct iscsi_tm_rsp *)task->hdr;
847	hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
848	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
849					& SOL_FLAGS_MASK) >> 24) | 0x80;
850	hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
851					32] & SOL_RESP_MASK);
852	hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
853				    i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
854	hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
855			i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
856			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
857			/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
858	hdr->itt = io_task->libiscsi_itt;
859	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
860}
861
862static void
863hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
864		       struct beiscsi_hba *phba, struct sol_cqe *psol)
865{
866	struct hwi_wrb_context *pwrb_context;
867	struct wrb_handle *pwrb_handle = NULL;
868	struct sgl_handle *psgl_handle = NULL;
869	struct hwi_controller *phwi_ctrlr;
870	struct iscsi_task *task;
871	struct beiscsi_io_task *io_task;
872	struct iscsi_conn *conn = beiscsi_conn->conn;
873	struct iscsi_session *session = conn->session;
874
875	phwi_ctrlr = phba->phwi_ctrlr;
876	if (ring_mode) {
877		psgl_handle = phba->sgl_hndl_array[((psol->
878			      dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
879				32] & SOL_ICD_INDEX_MASK) >> 6)];
880		pwrb_context = &phwi_ctrlr->wrb_context[psgl_handle->cid];
881		task = psgl_handle->task;
882		pwrb_handle = NULL;
883	} else {
884		pwrb_context = &phwi_ctrlr->wrb_context[((psol->
885				dw[offsetof(struct amap_sol_cqe, cid) / 32] &
886				SOL_CID_MASK) >> 6) -
887				phba->fw_config.iscsi_cid_start];
888		pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
889				dw[offsetof(struct amap_sol_cqe, wrb_index) /
890				32] & SOL_WRB_INDEX_MASK) >> 16)];
891		task = pwrb_handle->pio_handle;
892	}
893
894	io_task = task->dd_data;
895	spin_lock(&phba->mgmt_sgl_lock);
896	free_mgmt_sgl_handle(phba, io_task->psgl_handle);
897	spin_unlock(&phba->mgmt_sgl_lock);
898	spin_lock_bh(&session->lock);
899	free_wrb_handle(phba, pwrb_context, pwrb_handle);
900	spin_unlock_bh(&session->lock);
901}
902
903static void
904be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
905		       struct iscsi_task *task, struct sol_cqe *psol)
906{
907	struct iscsi_nopin *hdr;
908	struct iscsi_conn *conn = beiscsi_conn->conn;
909	struct beiscsi_io_task *io_task = task->dd_data;
910
911	hdr = (struct iscsi_nopin *)task->hdr;
912	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
913			& SOL_FLAGS_MASK) >> 24) | 0x80;
914	hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
915				     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
916	hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
917			i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
918			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
919			/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
920	hdr->opcode = ISCSI_OP_NOOP_IN;
921	hdr->itt = io_task->libiscsi_itt;
922	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
923}
924
925static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
926			     struct beiscsi_hba *phba, struct sol_cqe *psol)
927{
928	struct hwi_wrb_context *pwrb_context;
929	struct wrb_handle *pwrb_handle;
930	struct iscsi_wrb *pwrb = NULL;
931	struct hwi_controller *phwi_ctrlr;
932	struct iscsi_task *task;
933	struct sgl_handle *psgl_handle = NULL;
934	unsigned int type;
935	struct iscsi_conn *conn = beiscsi_conn->conn;
936	struct iscsi_session *session = conn->session;
937
938	phwi_ctrlr = phba->phwi_ctrlr;
939	if (ring_mode) {
940		psgl_handle = phba->sgl_hndl_array[((psol->
941			      dw[offsetof(struct amap_sol_cqe_ring, icd_index) /
942			      32] & SOL_ICD_INDEX_MASK) >> 6)];
943		task = psgl_handle->task;
944		type = psgl_handle->type;
945	} else {
946		pwrb_context = &phwi_ctrlr->
947				wrb_context[((psol->dw[offsetof
948				(struct amap_sol_cqe, cid) / 32]
949				& SOL_CID_MASK) >> 6) -
950				phba->fw_config.iscsi_cid_start];
951		pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
952				dw[offsetof(struct amap_sol_cqe, wrb_index) /
953				32] & SOL_WRB_INDEX_MASK) >> 16)];
954		task = pwrb_handle->pio_handle;
955		pwrb = pwrb_handle->pwrb;
956		type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
957			 WRB_TYPE_MASK) >> 28;
958	}
959	spin_lock_bh(&session->lock);
960	switch (type) {
961	case HWH_TYPE_IO:
962	case HWH_TYPE_IO_RD:
963		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
964		    ISCSI_OP_NOOP_OUT) {
965			be_complete_nopin_resp(beiscsi_conn, task, psol);
966		} else
967			be_complete_io(beiscsi_conn, task, psol);
968		break;
969
970	case HWH_TYPE_LOGOUT:
971		be_complete_logout(beiscsi_conn, task, psol);
972		break;
973
974	case HWH_TYPE_LOGIN:
975		SE_DEBUG(DBG_LVL_1,
976			 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
977			 "- Solicited path \n");
978		break;
979
980	case HWH_TYPE_TMF:
981		be_complete_tmf(beiscsi_conn, task, psol);
982		break;
983
984	case HWH_TYPE_NOP:
985		be_complete_nopin_resp(beiscsi_conn, task, psol);
986		break;
987
988	default:
989		if (ring_mode)
990			shost_printk(KERN_WARNING, phba->shost,
991				"In hwi_complete_cmd, unknown type = %d"
992				"icd_index 0x%x CID 0x%x\n", type,
993				((psol->dw[offsetof(struct amap_sol_cqe_ring,
994				icd_index) / 32] & SOL_ICD_INDEX_MASK) >> 6),
995				psgl_handle->cid);
996		else
997			shost_printk(KERN_WARNING, phba->shost,
998				"In hwi_complete_cmd, unknown type = %d"
999				"wrb_index 0x%x CID 0x%x\n", type,
1000				((psol->dw[offsetof(struct amap_iscsi_wrb,
1001				type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1002				((psol->dw[offsetof(struct amap_sol_cqe,
1003				cid) / 32] & SOL_CID_MASK) >> 6));
1004		break;
1005	}
1006
1007	spin_unlock_bh(&session->lock);
1008}
1009
1010static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1011					  *pasync_ctx, unsigned int is_header,
1012					  unsigned int host_write_ptr)
1013{
1014	if (is_header)
1015		return &pasync_ctx->async_entry[host_write_ptr].
1016		    header_busy_list;
1017	else
1018		return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1019}
1020
1021static struct async_pdu_handle *
1022hwi_get_async_handle(struct beiscsi_hba *phba,
1023		     struct beiscsi_conn *beiscsi_conn,
1024		     struct hwi_async_pdu_context *pasync_ctx,
1025		     struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1026{
1027	struct be_bus_address phys_addr;
1028	struct list_head *pbusy_list;
1029	struct async_pdu_handle *pasync_handle = NULL;
1030	int buffer_len = 0;
1031	unsigned char buffer_index = -1;
1032	unsigned char is_header = 0;
1033
1034	phys_addr.u.a32.address_lo =
1035	    pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1036	    ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1037						& PDUCQE_DPL_MASK) >> 16);
1038	phys_addr.u.a32.address_hi =
1039	    pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1040
1041	phys_addr.u.a64.address =
1042			*((unsigned long long *)(&phys_addr.u.a64.address));
1043
1044	switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1045			& PDUCQE_CODE_MASK) {
1046	case UNSOL_HDR_NOTIFY:
1047		is_header = 1;
1048
1049		pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1050			(pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1051			index) / 32] & PDUCQE_INDEX_MASK));
1052
1053		buffer_len = (unsigned int)(phys_addr.u.a64.address -
1054				pasync_ctx->async_header.pa_base.u.a64.address);
1055
1056		buffer_index = buffer_len /
1057				pasync_ctx->async_header.buffer_size;
1058
1059		break;
1060	case UNSOL_DATA_NOTIFY:
1061		pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1062					dw[offsetof(struct amap_i_t_dpdu_cqe,
1063					index) / 32] & PDUCQE_INDEX_MASK));
1064		buffer_len = (unsigned long)(phys_addr.u.a64.address -
1065					pasync_ctx->async_data.pa_base.u.
1066					a64.address);
1067		buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1068		break;
1069	default:
1070		pbusy_list = NULL;
1071		shost_printk(KERN_WARNING, phba->shost,
1072			"Unexpected code=%d \n",
1073			 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1074					code) / 32] & PDUCQE_CODE_MASK);
1075		return NULL;
1076	}
1077
1078	WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1079	WARN_ON(list_empty(pbusy_list));
1080	list_for_each_entry(pasync_handle, pbusy_list, link) {
1081		WARN_ON(pasync_handle->consumed);
1082		if (pasync_handle->index == buffer_index)
1083			break;
1084	}
1085
1086	WARN_ON(!pasync_handle);
1087
1088	pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1089					     phba->fw_config.iscsi_cid_start;
1090	pasync_handle->is_header = is_header;
1091	pasync_handle->buffer_len = ((pdpdu_cqe->
1092			dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1093			& PDUCQE_DPL_MASK) >> 16);
1094
1095	*pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1096			index) / 32] & PDUCQE_INDEX_MASK);
1097	return pasync_handle;
1098}
1099
1100static unsigned int
1101hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1102			   unsigned int is_header, unsigned int cq_index)
1103{
1104	struct list_head *pbusy_list;
1105	struct async_pdu_handle *pasync_handle;
1106	unsigned int num_entries, writables = 0;
1107	unsigned int *pep_read_ptr, *pwritables;
1108
1109
1110	if (is_header) {
1111		pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1112		pwritables = &pasync_ctx->async_header.writables;
1113		num_entries = pasync_ctx->async_header.num_entries;
1114	} else {
1115		pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1116		pwritables = &pasync_ctx->async_data.writables;
1117		num_entries = pasync_ctx->async_data.num_entries;
1118	}
1119
1120	while ((*pep_read_ptr) != cq_index) {
1121		(*pep_read_ptr)++;
1122		*pep_read_ptr = (*pep_read_ptr) % num_entries;
1123
1124		pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1125						     *pep_read_ptr);
1126		if (writables == 0)
1127			WARN_ON(list_empty(pbusy_list));
1128
1129		if (!list_empty(pbusy_list)) {
1130			pasync_handle = list_entry(pbusy_list->next,
1131						   struct async_pdu_handle,
1132						   link);
1133			WARN_ON(!pasync_handle);
1134			pasync_handle->consumed = 1;
1135		}
1136
1137		writables++;
1138	}
1139
1140	if (!writables) {
1141		SE_DEBUG(DBG_LVL_1,
1142			 "Duplicate notification received - index 0x%x!!\n",
1143			 cq_index);
1144		WARN_ON(1);
1145	}
1146
1147	*pwritables = *pwritables + writables;
1148	return 0;
1149}
1150
1151static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1152				       unsigned int cri)
1153{
1154	struct hwi_controller *phwi_ctrlr;
1155	struct hwi_async_pdu_context *pasync_ctx;
1156	struct async_pdu_handle *pasync_handle, *tmp_handle;
1157	struct list_head *plist;
1158	unsigned int i = 0;
1159
1160	phwi_ctrlr = phba->phwi_ctrlr;
1161	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1162
1163	plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1164
1165	list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1166		list_del(&pasync_handle->link);
1167
1168		if (i == 0) {
1169			list_add_tail(&pasync_handle->link,
1170				      &pasync_ctx->async_header.free_list);
1171			pasync_ctx->async_header.free_entries++;
1172			i++;
1173		} else {
1174			list_add_tail(&pasync_handle->link,
1175				      &pasync_ctx->async_data.free_list);
1176			pasync_ctx->async_data.free_entries++;
1177			i++;
1178		}
1179	}
1180
1181	INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1182	pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1183	pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1184	return 0;
1185}
1186
1187static struct phys_addr *
1188hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1189		     unsigned int is_header, unsigned int host_write_ptr)
1190{
1191	struct phys_addr *pasync_sge = NULL;
1192
1193	if (is_header)
1194		pasync_sge = pasync_ctx->async_header.ring_base;
1195	else
1196		pasync_sge = pasync_ctx->async_data.ring_base;
1197
1198	return pasync_sge + host_write_ptr;
1199}
1200
1201static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1202				   unsigned int is_header)
1203{
1204	struct hwi_controller *phwi_ctrlr;
1205	struct hwi_async_pdu_context *pasync_ctx;
1206	struct async_pdu_handle *pasync_handle;
1207	struct list_head *pfree_link, *pbusy_list;
1208	struct phys_addr *pasync_sge;
1209	unsigned int ring_id, num_entries;
1210	unsigned int host_write_num;
1211	unsigned int writables;
1212	unsigned int i = 0;
1213	u32 doorbell = 0;
1214
1215	phwi_ctrlr = phba->phwi_ctrlr;
1216	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1217
1218	if (is_header) {
1219		num_entries = pasync_ctx->async_header.num_entries;
1220		writables = min(pasync_ctx->async_header.writables,
1221				pasync_ctx->async_header.free_entries);
1222		pfree_link = pasync_ctx->async_header.free_list.next;
1223		host_write_num = pasync_ctx->async_header.host_write_ptr;
1224		ring_id = phwi_ctrlr->default_pdu_hdr.id;
1225	} else {
1226		num_entries = pasync_ctx->async_data.num_entries;
1227		writables = min(pasync_ctx->async_data.writables,
1228				pasync_ctx->async_data.free_entries);
1229		pfree_link = pasync_ctx->async_data.free_list.next;
1230		host_write_num = pasync_ctx->async_data.host_write_ptr;
1231		ring_id = phwi_ctrlr->default_pdu_data.id;
1232	}
1233
1234	writables = (writables / 8) * 8;
1235	if (writables) {
1236		for (i = 0; i < writables; i++) {
1237			pbusy_list =
1238			    hwi_get_async_busy_list(pasync_ctx, is_header,
1239						    host_write_num);
1240			pasync_handle =
1241			    list_entry(pfree_link, struct async_pdu_handle,
1242								link);
1243			WARN_ON(!pasync_handle);
1244			pasync_handle->consumed = 0;
1245
1246			pfree_link = pfree_link->next;
1247
1248			pasync_sge = hwi_get_ring_address(pasync_ctx,
1249						is_header, host_write_num);
1250
1251			pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1252			pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1253
1254			list_move(&pasync_handle->link, pbusy_list);
1255
1256			host_write_num++;
1257			host_write_num = host_write_num % num_entries;
1258		}
1259
1260		if (is_header) {
1261			pasync_ctx->async_header.host_write_ptr =
1262							host_write_num;
1263			pasync_ctx->async_header.free_entries -= writables;
1264			pasync_ctx->async_header.writables -= writables;
1265			pasync_ctx->async_header.busy_entries += writables;
1266		} else {
1267			pasync_ctx->async_data.host_write_ptr = host_write_num;
1268			pasync_ctx->async_data.free_entries -= writables;
1269			pasync_ctx->async_data.writables -= writables;
1270			pasync_ctx->async_data.busy_entries += writables;
1271		}
1272
1273		doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1274		doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1275		doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1276		doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1277					<< DB_DEF_PDU_CQPROC_SHIFT;
1278
1279		iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1280	}
1281}
1282
1283static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1284					 struct beiscsi_conn *beiscsi_conn,
1285					 struct i_t_dpdu_cqe *pdpdu_cqe)
1286{
1287	struct hwi_controller *phwi_ctrlr;
1288	struct hwi_async_pdu_context *pasync_ctx;
1289	struct async_pdu_handle *pasync_handle = NULL;
1290	unsigned int cq_index = -1;
1291
1292	phwi_ctrlr = phba->phwi_ctrlr;
1293	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1294
1295	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1296					     pdpdu_cqe, &cq_index);
1297	BUG_ON(pasync_handle->is_header != 0);
1298	if (pasync_handle->consumed == 0)
1299		hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1300					   cq_index);
1301
1302	hwi_free_async_msg(phba, pasync_handle->cri);
1303	hwi_post_async_buffers(phba, pasync_handle->is_header);
1304}
1305
1306static unsigned int
1307hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1308		  struct beiscsi_hba *phba,
1309		  struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1310{
1311	struct list_head *plist;
1312	struct async_pdu_handle *pasync_handle;
1313	void *phdr = NULL;
1314	unsigned int hdr_len = 0, buf_len = 0;
1315	unsigned int status, index = 0, offset = 0;
1316	void *pfirst_buffer = NULL;
1317	unsigned int num_buf = 0;
1318
1319	plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1320
1321	list_for_each_entry(pasync_handle, plist, link) {
1322		if (index == 0) {
1323			phdr = pasync_handle->pbuffer;
1324			hdr_len = pasync_handle->buffer_len;
1325		} else {
1326			buf_len = pasync_handle->buffer_len;
1327			if (!num_buf) {
1328				pfirst_buffer = pasync_handle->pbuffer;
1329				num_buf++;
1330			}
1331			memcpy(pfirst_buffer + offset,
1332			       pasync_handle->pbuffer, buf_len);
1333			offset = buf_len;
1334		}
1335		index++;
1336	}
1337
1338	status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1339					   (beiscsi_conn->beiscsi_conn_cid -
1340					    phba->fw_config.iscsi_cid_start),
1341					    phdr, hdr_len, pfirst_buffer,
1342					    buf_len);
1343
1344	if (status == 0)
1345		hwi_free_async_msg(phba, cri);
1346	return 0;
1347}
1348
1349static unsigned int
1350hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1351		     struct beiscsi_hba *phba,
1352		     struct async_pdu_handle *pasync_handle)
1353{
1354	struct hwi_async_pdu_context *pasync_ctx;
1355	struct hwi_controller *phwi_ctrlr;
1356	unsigned int bytes_needed = 0, status = 0;
1357	unsigned short cri = pasync_handle->cri;
1358	struct pdu_base *ppdu;
1359
1360	phwi_ctrlr = phba->phwi_ctrlr;
1361	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1362
1363	list_del(&pasync_handle->link);
1364	if (pasync_handle->is_header) {
1365		pasync_ctx->async_header.busy_entries--;
1366		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1367			hwi_free_async_msg(phba, cri);
1368			BUG();
1369		}
1370
1371		pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1372		pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1373		pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1374				(unsigned short)pasync_handle->buffer_len;
1375		list_add_tail(&pasync_handle->link,
1376			      &pasync_ctx->async_entry[cri].wait_queue.list);
1377
1378		ppdu = pasync_handle->pbuffer;
1379		bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1380			data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1381			0xFFFF0000) | ((be16_to_cpu((ppdu->
1382			dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1383			& PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1384
1385		if (status == 0) {
1386			pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1387			    bytes_needed;
1388
1389			if (bytes_needed == 0)
1390				status = hwi_fwd_async_msg(beiscsi_conn, phba,
1391							   pasync_ctx, cri);
1392		}
1393	} else {
1394		pasync_ctx->async_data.busy_entries--;
1395		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1396			list_add_tail(&pasync_handle->link,
1397				      &pasync_ctx->async_entry[cri].wait_queue.
1398				      list);
1399			pasync_ctx->async_entry[cri].wait_queue.
1400				bytes_received +=
1401				(unsigned short)pasync_handle->buffer_len;
1402
1403			if (pasync_ctx->async_entry[cri].wait_queue.
1404			    bytes_received >=
1405			    pasync_ctx->async_entry[cri].wait_queue.
1406			    bytes_needed)
1407				status = hwi_fwd_async_msg(beiscsi_conn, phba,
1408							   pasync_ctx, cri);
1409		}
1410	}
1411	return status;
1412}
1413
1414static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1415					 struct beiscsi_hba *phba,
1416					 struct i_t_dpdu_cqe *pdpdu_cqe)
1417{
1418	struct hwi_controller *phwi_ctrlr;
1419	struct hwi_async_pdu_context *pasync_ctx;
1420	struct async_pdu_handle *pasync_handle = NULL;
1421	unsigned int cq_index = -1;
1422
1423	phwi_ctrlr = phba->phwi_ctrlr;
1424	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1425	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1426					     pdpdu_cqe, &cq_index);
1427
1428	if (pasync_handle->consumed == 0)
1429		hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1430					   cq_index);
1431	hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1432	hwi_post_async_buffers(phba, pasync_handle->is_header);
1433}
1434
1435
1436static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1437{
1438	struct be_queue_info *cq;
1439	struct sol_cqe *sol;
1440	struct dmsg_cqe *dmsg;
1441	unsigned int num_processed = 0;
1442	unsigned int tot_nump = 0;
1443	struct beiscsi_conn *beiscsi_conn;
1444	struct sgl_handle *psgl_handle = NULL;
1445	struct beiscsi_endpoint *beiscsi_ep;
1446	struct iscsi_endpoint *ep;
1447	struct beiscsi_hba *phba;
1448
1449	cq = pbe_eq->cq;
1450	sol = queue_tail_node(cq);
1451	phba = pbe_eq->phba;
1452
1453	while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1454	       CQE_VALID_MASK) {
1455		be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1456
1457		if (ring_mode) {
1458			psgl_handle = phba->sgl_hndl_array[((sol->
1459				      dw[offsetof(struct amap_sol_cqe_ring,
1460				      icd_index) / 32] & SOL_ICD_INDEX_MASK)
1461				      >> 6)];
1462			ep = phba->ep_array[psgl_handle->cid];
1463		} else {
1464			ep = phba->ep_array[(u32) ((sol->
1465				   dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1466				   SOL_CID_MASK) >> 6) -
1467				   phba->fw_config.iscsi_cid_start];
1468		}
1469		beiscsi_ep = ep->dd_data;
1470		beiscsi_conn = beiscsi_ep->conn;
1471		if (num_processed >= 32) {
1472			hwi_ring_cq_db(phba, cq->id,
1473					num_processed, 0, 0);
1474			tot_nump += num_processed;
1475			num_processed = 0;
1476		}
1477
1478		switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1479			32] & CQE_CODE_MASK) {
1480		case SOL_CMD_COMPLETE:
1481			hwi_complete_cmd(beiscsi_conn, phba, sol);
1482			break;
1483		case DRIVERMSG_NOTIFY:
1484			SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
1485			dmsg = (struct dmsg_cqe *)sol;
1486			hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1487			break;
1488		case UNSOL_HDR_NOTIFY:
1489			SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1490			hwi_process_default_pdu_ring(beiscsi_conn, phba,
1491					     (struct i_t_dpdu_cqe *)sol);
1492			break;
1493		case UNSOL_DATA_NOTIFY:
1494			SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1495			hwi_process_default_pdu_ring(beiscsi_conn, phba,
1496					     (struct i_t_dpdu_cqe *)sol);
1497			break;
1498		case CXN_INVALIDATE_INDEX_NOTIFY:
1499		case CMD_INVALIDATED_NOTIFY:
1500		case CXN_INVALIDATE_NOTIFY:
1501			SE_DEBUG(DBG_LVL_1,
1502				 "Ignoring CQ Error notification for cmd/cxn"
1503				 "invalidate\n");
1504			break;
1505		case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1506		case CMD_KILLED_INVALID_STATSN_RCVD:
1507		case CMD_KILLED_INVALID_R2T_RCVD:
1508		case CMD_CXN_KILLED_LUN_INVALID:
1509		case CMD_CXN_KILLED_ICD_INVALID:
1510		case CMD_CXN_KILLED_ITT_INVALID:
1511		case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1512		case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1513			if (ring_mode) {
1514				SE_DEBUG(DBG_LVL_1,
1515				 "CQ Error notification for cmd.. "
1516				 "code %d cid 0x%x\n",
1517				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1518				 32] & CQE_CODE_MASK, psgl_handle->cid);
1519			} else {
1520				SE_DEBUG(DBG_LVL_1,
1521				 "CQ Error notification for cmd.. "
1522				 "code %d cid 0x%x\n",
1523				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1524				 32] & CQE_CODE_MASK,
1525				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1526				 32] & SOL_CID_MASK));
1527			}
1528			break;
1529		case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1530			SE_DEBUG(DBG_LVL_1,
1531				 "Digest error on def pdu ring, dropping..\n");
1532			hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1533					     (struct i_t_dpdu_cqe *) sol);
1534			break;
1535		case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1536		case CXN_KILLED_BURST_LEN_MISMATCH:
1537		case CXN_KILLED_AHS_RCVD:
1538		case CXN_KILLED_HDR_DIGEST_ERR:
1539		case CXN_KILLED_UNKNOWN_HDR:
1540		case CXN_KILLED_STALE_ITT_TTT_RCVD:
1541		case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1542		case CXN_KILLED_TIMED_OUT:
1543		case CXN_KILLED_FIN_RCVD:
1544		case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1545		case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1546		case CXN_KILLED_OVER_RUN_RESIDUAL:
1547		case CXN_KILLED_UNDER_RUN_RESIDUAL:
1548		case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1549			if (ring_mode) {
1550				SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1551				 "0x%x...\n",
1552				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1553				 32] & CQE_CODE_MASK, psgl_handle->cid);
1554			} else {
1555				SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1556				 "0x%x...\n",
1557				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1558				 32] & CQE_CODE_MASK,
1559				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1560				 32] & CQE_CID_MASK));
1561			}
1562			iscsi_conn_failure(beiscsi_conn->conn,
1563					   ISCSI_ERR_CONN_FAILED);
1564			break;
1565		case CXN_KILLED_RST_SENT:
1566		case CXN_KILLED_RST_RCVD:
1567			if (ring_mode) {
1568				SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1569				"received/sent on CID 0x%x...\n",
1570				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1571				 32] & CQE_CODE_MASK, psgl_handle->cid);
1572			} else {
1573				SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1574				"received/sent on CID 0x%x...\n",
1575				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1576				 32] & CQE_CODE_MASK,
1577				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1578				 32] & CQE_CID_MASK));
1579			}
1580			iscsi_conn_failure(beiscsi_conn->conn,
1581					   ISCSI_ERR_CONN_FAILED);
1582			break;
1583		default:
1584			SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1585				 "received on CID 0x%x...\n",
1586				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1587				 32] & CQE_CODE_MASK,
1588				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1589				 32] & CQE_CID_MASK));
1590			break;
1591		}
1592
1593		AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1594		queue_tail_inc(cq);
1595		sol = queue_tail_node(cq);
1596		num_processed++;
1597	}
1598
1599	if (num_processed > 0) {
1600		tot_nump += num_processed;
1601		hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1602	}
1603	return tot_nump;
1604}
1605
1606static void beiscsi_process_all_cqs(struct work_struct *work)
1607{
1608	unsigned long flags;
1609	struct hwi_controller *phwi_ctrlr;
1610	struct hwi_context_memory *phwi_context;
1611	struct be_eq_obj *pbe_eq;
1612	struct beiscsi_hba *phba =
1613	    container_of(work, struct beiscsi_hba, work_cqs);
1614
1615	phwi_ctrlr = phba->phwi_ctrlr;
1616	phwi_context = phwi_ctrlr->phwi_ctxt;
1617	if (phba->msix_enabled)
1618		pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1619	else
1620		pbe_eq = &phwi_context->be_eq[0];
1621
1622	if (phba->todo_mcc_cq) {
1623		spin_lock_irqsave(&phba->isr_lock, flags);
1624		phba->todo_mcc_cq = 0;
1625		spin_unlock_irqrestore(&phba->isr_lock, flags);
1626	}
1627
1628	if (phba->todo_cq) {
1629		spin_lock_irqsave(&phba->isr_lock, flags);
1630		phba->todo_cq = 0;
1631		spin_unlock_irqrestore(&phba->isr_lock, flags);
1632		beiscsi_process_cq(pbe_eq);
1633	}
1634}
1635
1636static int be_iopoll(struct blk_iopoll *iop, int budget)
1637{
1638	static unsigned int ret;
1639	struct beiscsi_hba *phba;
1640	struct be_eq_obj *pbe_eq;
1641
1642	pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1643	ret = beiscsi_process_cq(pbe_eq);
1644	if (ret < budget) {
1645		phba = pbe_eq->phba;
1646		blk_iopoll_complete(iop);
1647		SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1648		hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1649	}
1650	return ret;
1651}
1652
1653static void
1654hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1655	      unsigned int num_sg, struct beiscsi_io_task *io_task)
1656{
1657	struct iscsi_sge *psgl;
1658	unsigned short sg_len, index;
1659	unsigned int sge_len = 0;
1660	unsigned long long addr;
1661	struct scatterlist *l_sg;
1662	unsigned int offset;
1663
1664	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1665				      io_task->bhs_pa.u.a32.address_lo);
1666	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1667				      io_task->bhs_pa.u.a32.address_hi);
1668
1669	l_sg = sg;
1670	for (index = 0; (index < num_sg) && (index < 2); index++, sg_next(sg)) {
1671		if (index == 0) {
1672			sg_len = sg_dma_len(sg);
1673			addr = (u64) sg_dma_address(sg);
1674			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1675							(addr & 0xFFFFFFFF));
1676			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1677							(addr >> 32));
1678			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1679							sg_len);
1680			sge_len = sg_len;
1681			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1682							1);
1683		} else {
1684			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1685							0);
1686			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1687							pwrb, sge_len);
1688			sg_len = sg_dma_len(sg);
1689			addr = (u64) sg_dma_address(sg);
1690			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1691							(addr & 0xFFFFFFFF));
1692			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1693							(addr >> 32));
1694			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1695							sg_len);
1696		}
1697	}
1698	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1699	memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1700
1701	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1702
1703	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1704			io_task->bhs_pa.u.a32.address_hi);
1705	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1706			io_task->bhs_pa.u.a32.address_lo);
1707
1708	if (num_sg == 2)
1709		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb, 1);
1710	sg = l_sg;
1711	psgl++;
1712	psgl++;
1713	offset = 0;
1714	for (index = 0; index < num_sg; index++, sg_next(sg), psgl++) {
1715		sg_len = sg_dma_len(sg);
1716		addr = (u64) sg_dma_address(sg);
1717		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1718						(addr & 0xFFFFFFFF));
1719		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1720						(addr >> 32));
1721		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1722		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1723		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1724		offset += sg_len;
1725	}
1726	psgl--;
1727	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1728}
1729
1730static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1731{
1732	struct iscsi_sge *psgl;
1733	unsigned long long addr;
1734	struct beiscsi_io_task *io_task = task->dd_data;
1735	struct beiscsi_conn *beiscsi_conn = io_task->conn;
1736	struct beiscsi_hba *phba = beiscsi_conn->phba;
1737
1738	io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1739	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1740				io_task->bhs_pa.u.a32.address_lo);
1741	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1742				io_task->bhs_pa.u.a32.address_hi);
1743
1744	if (task->data) {
1745		if (task->data_count) {
1746			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1747			addr = (u64) pci_map_single(phba->pcidev,
1748						    task->data,
1749						    task->data_count, 1);
1750		} else {
1751			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1752			addr = 0;
1753		}
1754		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1755						(addr & 0xFFFFFFFF));
1756		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1757						(addr >> 32));
1758		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1759						task->data_count);
1760
1761		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1762	} else {
1763		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1764		addr = 0;
1765	}
1766
1767	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1768
1769	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1770
1771	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1772		      io_task->bhs_pa.u.a32.address_hi);
1773	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1774		      io_task->bhs_pa.u.a32.address_lo);
1775	if (task->data) {
1776		psgl++;
1777		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1778		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1779		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1780		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1781		AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1782		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1783
1784		psgl++;
1785		if (task->data) {
1786			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1787						(addr & 0xFFFFFFFF));
1788			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1789						(addr >> 32));
1790		}
1791		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1792	}
1793	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1794}
1795
1796static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1797{
1798	unsigned int num_cq_pages, num_async_pdu_buf_pages;
1799	unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1800	unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1801
1802	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1803				      sizeof(struct sol_cqe));
1804	num_async_pdu_buf_pages =
1805			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1806				       phba->params.defpdu_hdr_sz);
1807	num_async_pdu_buf_sgl_pages =
1808			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1809				       sizeof(struct phys_addr));
1810	num_async_pdu_data_pages =
1811			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1812				       phba->params.defpdu_data_sz);
1813	num_async_pdu_data_sgl_pages =
1814			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1815				       sizeof(struct phys_addr));
1816
1817	phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1818
1819	phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1820						 BE_ISCSI_PDU_HEADER_SIZE;
1821	phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1822					    sizeof(struct hwi_context_memory);
1823
1824
1825	phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1826	    * (phba->params.wrbs_per_cxn)
1827	    * phba->params.cxns_per_ctrl;
1828	wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
1829				 (phba->params.wrbs_per_cxn);
1830	phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1831				phba->params.cxns_per_ctrl);
1832
1833	phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1834		phba->params.icds_per_ctrl;
1835	phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1836		phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1837
1838	phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1839		num_async_pdu_buf_pages * PAGE_SIZE;
1840	phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1841		num_async_pdu_data_pages * PAGE_SIZE;
1842	phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1843		num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1844	phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1845		num_async_pdu_data_sgl_pages * PAGE_SIZE;
1846	phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1847		phba->params.asyncpdus_per_ctrl *
1848		sizeof(struct async_pdu_handle);
1849	phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1850		phba->params.asyncpdus_per_ctrl *
1851		sizeof(struct async_pdu_handle);
1852	phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1853		sizeof(struct hwi_async_pdu_context) +
1854		(phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1855}
1856
1857static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1858{
1859	struct be_mem_descriptor *mem_descr;
1860	dma_addr_t bus_add;
1861	struct mem_array *mem_arr, *mem_arr_orig;
1862	unsigned int i, j, alloc_size, curr_alloc_size;
1863
1864	phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
1865	if (!phba->phwi_ctrlr)
1866		return -ENOMEM;
1867
1868	phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
1869				 GFP_KERNEL);
1870	if (!phba->init_mem) {
1871		kfree(phba->phwi_ctrlr);
1872		return -ENOMEM;
1873	}
1874
1875	mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
1876			       GFP_KERNEL);
1877	if (!mem_arr_orig) {
1878		kfree(phba->init_mem);
1879		kfree(phba->phwi_ctrlr);
1880		return -ENOMEM;
1881	}
1882
1883	mem_descr = phba->init_mem;
1884	for (i = 0; i < SE_MEM_MAX; i++) {
1885		j = 0;
1886		mem_arr = mem_arr_orig;
1887		alloc_size = phba->mem_req[i];
1888		memset(mem_arr, 0, sizeof(struct mem_array) *
1889		       BEISCSI_MAX_FRAGS_INIT);
1890		curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
1891		do {
1892			mem_arr->virtual_address = pci_alloc_consistent(
1893							phba->pcidev,
1894							curr_alloc_size,
1895							&bus_add);
1896			if (!mem_arr->virtual_address) {
1897				if (curr_alloc_size <= BE_MIN_MEM_SIZE)
1898					goto free_mem;
1899				if (curr_alloc_size -
1900					rounddown_pow_of_two(curr_alloc_size))
1901					curr_alloc_size = rounddown_pow_of_two
1902							     (curr_alloc_size);
1903				else
1904					curr_alloc_size = curr_alloc_size / 2;
1905			} else {
1906				mem_arr->bus_address.u.
1907				    a64.address = (__u64) bus_add;
1908				mem_arr->size = curr_alloc_size;
1909				alloc_size -= curr_alloc_size;
1910				curr_alloc_size = min(be_max_phys_size *
1911						      1024, alloc_size);
1912				j++;
1913				mem_arr++;
1914			}
1915		} while (alloc_size);
1916		mem_descr->num_elements = j;
1917		mem_descr->size_in_bytes = phba->mem_req[i];
1918		mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
1919					       GFP_KERNEL);
1920		if (!mem_descr->mem_array)
1921			goto free_mem;
1922
1923		memcpy(mem_descr->mem_array, mem_arr_orig,
1924		       sizeof(struct mem_array) * j);
1925		mem_descr++;
1926	}
1927	kfree(mem_arr_orig);
1928	return 0;
1929free_mem:
1930	mem_descr->num_elements = j;
1931	while ((i) || (j)) {
1932		for (j = mem_descr->num_elements; j > 0; j--) {
1933			pci_free_consistent(phba->pcidev,
1934					    mem_descr->mem_array[j - 1].size,
1935					    mem_descr->mem_array[j - 1].
1936					    virtual_address,
1937					    mem_descr->mem_array[j - 1].
1938					    bus_address.u.a64.address);
1939		}
1940		if (i) {
1941			i--;
1942			kfree(mem_descr->mem_array);
1943			mem_descr--;
1944		}
1945	}
1946	kfree(mem_arr_orig);
1947	kfree(phba->init_mem);
1948	kfree(phba->phwi_ctrlr);
1949	return -ENOMEM;
1950}
1951
1952static int beiscsi_get_memory(struct beiscsi_hba *phba)
1953{
1954	beiscsi_find_mem_req(phba);
1955	return beiscsi_alloc_mem(phba);
1956}
1957
1958static void iscsi_init_global_templates(struct beiscsi_hba *phba)
1959{
1960	struct pdu_data_out *pdata_out;
1961	struct pdu_nop_out *pnop_out;
1962	struct be_mem_descriptor *mem_descr;
1963
1964	mem_descr = phba->init_mem;
1965	mem_descr += ISCSI_MEM_GLOBAL_HEADER;
1966	pdata_out =
1967	    (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
1968	memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1969
1970	AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
1971		      IIOC_SCSI_DATA);
1972
1973	pnop_out =
1974	    (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
1975				   virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
1976
1977	memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
1978	AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
1979	AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
1980	AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
1981}
1982
1983static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
1984{
1985	struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
1986	struct wrb_handle *pwrb_handle;
1987	struct hwi_controller *phwi_ctrlr;
1988	struct hwi_wrb_context *pwrb_context;
1989	struct iscsi_wrb *pwrb;
1990	unsigned int num_cxn_wrbh;
1991	unsigned int num_cxn_wrb, j, idx, index;
1992
1993	mem_descr_wrbh = phba->init_mem;
1994	mem_descr_wrbh += HWI_MEM_WRBH;
1995
1996	mem_descr_wrb = phba->init_mem;
1997	mem_descr_wrb += HWI_MEM_WRB;
1998
1999	idx = 0;
2000	pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2001	num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2002			((sizeof(struct wrb_handle)) *
2003			 phba->params.wrbs_per_cxn));
2004	phwi_ctrlr = phba->phwi_ctrlr;
2005
2006	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2007		pwrb_context = &phwi_ctrlr->wrb_context[index];
2008		pwrb_context->pwrb_handle_base =
2009				kzalloc(sizeof(struct wrb_handle *) *
2010					phba->params.wrbs_per_cxn, GFP_KERNEL);
2011		pwrb_context->pwrb_handle_basestd =
2012				kzalloc(sizeof(struct wrb_handle *) *
2013					phba->params.wrbs_per_cxn, GFP_KERNEL);
2014		if (num_cxn_wrbh) {
2015			pwrb_context->alloc_index = 0;
2016			pwrb_context->wrb_handles_available = 0;
2017			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2018				pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2019				pwrb_context->pwrb_handle_basestd[j] =
2020								pwrb_handle;
2021				pwrb_context->wrb_handles_available++;
2022				pwrb_handle->wrb_index = j;
2023				pwrb_handle++;
2024			}
2025			pwrb_context->free_index = 0;
2026			num_cxn_wrbh--;
2027		} else {
2028			idx++;
2029			pwrb_handle =
2030			    mem_descr_wrbh->mem_array[idx].virtual_address;
2031			num_cxn_wrbh =
2032			    ((mem_descr_wrbh->mem_array[idx].size) /
2033			     ((sizeof(struct wrb_handle)) *
2034			      phba->params.wrbs_per_cxn));
2035			pwrb_context->alloc_index = 0;
2036			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2037				pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2038				pwrb_context->pwrb_handle_basestd[j] =
2039				    pwrb_handle;
2040				pwrb_context->wrb_handles_available++;
2041				pwrb_handle->wrb_index = j;
2042				pwrb_handle++;
2043			}
2044			pwrb_context->free_index = 0;
2045			num_cxn_wrbh--;
2046		}
2047	}
2048	idx = 0;
2049	pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2050	num_cxn_wrb =
2051	    ((mem_descr_wrb->mem_array[idx].size) / (sizeof(struct iscsi_wrb)) *
2052	     phba->params.wrbs_per_cxn);
2053
2054	for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
2055		pwrb_context = &phwi_ctrlr->wrb_context[index];
2056		if (num_cxn_wrb) {
2057			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2058				pwrb_handle = pwrb_context->pwrb_handle_base[j];
2059				pwrb_handle->pwrb = pwrb;
2060				pwrb++;
2061			}
2062			num_cxn_wrb--;
2063		} else {
2064			idx++;
2065			pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2066			num_cxn_wrb = ((mem_descr_wrb->mem_array[idx].size) /
2067					(sizeof(struct iscsi_wrb)) *
2068					phba->params.wrbs_per_cxn);
2069			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2070				pwrb_handle = pwrb_context->pwrb_handle_base[j];
2071				pwrb_handle->pwrb = pwrb;
2072				pwrb++;
2073			}
2074			num_cxn_wrb--;
2075		}
2076	}
2077}
2078
2079static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2080{
2081	struct hwi_controller *phwi_ctrlr;
2082	struct hba_parameters *p = &phba->params;
2083	struct hwi_async_pdu_context *pasync_ctx;
2084	struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2085	unsigned int index;
2086	struct be_mem_descriptor *mem_descr;
2087
2088	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2089	mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2090
2091	phwi_ctrlr = phba->phwi_ctrlr;
2092	phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2093				mem_descr->mem_array[0].virtual_address;
2094	pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2095	memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2096
2097	pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2098	pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2099	pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2100	pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2101
2102	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2103	mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2104	if (mem_descr->mem_array[0].virtual_address) {
2105		SE_DEBUG(DBG_LVL_8,
2106			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2107			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2108	} else
2109		shost_printk(KERN_WARNING, phba->shost,
2110			     "No Virtual address \n");
2111
2112	pasync_ctx->async_header.va_base =
2113			mem_descr->mem_array[0].virtual_address;
2114
2115	pasync_ctx->async_header.pa_base.u.a64.address =
2116			mem_descr->mem_array[0].bus_address.u.a64.address;
2117
2118	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2119	mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2120	if (mem_descr->mem_array[0].virtual_address) {
2121		SE_DEBUG(DBG_LVL_8,
2122			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2123			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2124	} else
2125		shost_printk(KERN_WARNING, phba->shost,
2126			    "No Virtual address \n");
2127	pasync_ctx->async_header.ring_base =
2128			mem_descr->mem_array[0].virtual_address;
2129
2130	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2131	mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2132	if (mem_descr->mem_array[0].virtual_address) {
2133		SE_DEBUG(DBG_LVL_8,
2134			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2135			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2136	} else
2137		shost_printk(KERN_WARNING, phba->shost,
2138			    "No Virtual address \n");
2139
2140	pasync_ctx->async_header.handle_base =
2141			mem_descr->mem_array[0].virtual_address;
2142	pasync_ctx->async_header.writables = 0;
2143	INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2144
2145	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2146	mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2147	if (mem_descr->mem_array[0].virtual_address) {
2148		SE_DEBUG(DBG_LVL_8,
2149			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2150			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2151	} else
2152		shost_printk(KERN_WARNING, phba->shost,
2153			    "No Virtual address \n");
2154	pasync_ctx->async_data.va_base =
2155			mem_descr->mem_array[0].virtual_address;
2156	pasync_ctx->async_data.pa_base.u.a64.address =
2157			mem_descr->mem_array[0].bus_address.u.a64.address;
2158
2159	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2160	mem_descr += HWI_MEM_ASYNC_DATA_RING;
2161	if (mem_descr->mem_array[0].virtual_address) {
2162		SE_DEBUG(DBG_LVL_8,
2163			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2164			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2165	} else
2166		shost_printk(KERN_WARNING, phba->shost,
2167			     "No Virtual address \n");
2168
2169	pasync_ctx->async_data.ring_base =
2170			mem_descr->mem_array[0].virtual_address;
2171
2172	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2173	mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2174	if (!mem_descr->mem_array[0].virtual_address)
2175		shost_printk(KERN_WARNING, phba->shost,
2176			    "No Virtual address \n");
2177
2178	pasync_ctx->async_data.handle_base =
2179			mem_descr->mem_array[0].virtual_address;
2180	pasync_ctx->async_data.writables = 0;
2181	INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2182
2183	pasync_header_h =
2184		(struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2185	pasync_data_h =
2186		(struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2187
2188	for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2189		pasync_header_h->cri = -1;
2190		pasync_header_h->index = (char)index;
2191		INIT_LIST_HEAD(&pasync_header_h->link);
2192		pasync_header_h->pbuffer =
2193			(void *)((unsigned long)
2194			(pasync_ctx->async_header.va_base) +
2195			(p->defpdu_hdr_sz * index));
2196
2197		pasync_header_h->pa.u.a64.address =
2198			pasync_ctx->async_header.pa_base.u.a64.address +
2199			(p->defpdu_hdr_sz * index);
2200
2201		list_add_tail(&pasync_header_h->link,
2202				&pasync_ctx->async_header.free_list);
2203		pasync_header_h++;
2204		pasync_ctx->async_header.free_entries++;
2205		pasync_ctx->async_header.writables++;
2206
2207		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2208		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2209			       header_busy_list);
2210		pasync_data_h->cri = -1;
2211		pasync_data_h->index = (char)index;
2212		INIT_LIST_HEAD(&pasync_data_h->link);
2213		pasync_data_h->pbuffer =
2214			(void *)((unsigned long)
2215			(pasync_ctx->async_data.va_base) +
2216			(p->defpdu_data_sz * index));
2217
2218		pasync_data_h->pa.u.a64.address =
2219		    pasync_ctx->async_data.pa_base.u.a64.address +
2220		    (p->defpdu_data_sz * index);
2221
2222		list_add_tail(&pasync_data_h->link,
2223			      &pasync_ctx->async_data.free_list);
2224		pasync_data_h++;
2225		pasync_ctx->async_data.free_entries++;
2226		pasync_ctx->async_data.writables++;
2227
2228		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2229	}
2230
2231	pasync_ctx->async_header.host_write_ptr = 0;
2232	pasync_ctx->async_header.ep_read_ptr = -1;
2233	pasync_ctx->async_data.host_write_ptr = 0;
2234	pasync_ctx->async_data.ep_read_ptr = -1;
2235}
2236
2237static int
2238be_sgl_create_contiguous(void *virtual_address,
2239			 u64 physical_address, u32 length,
2240			 struct be_dma_mem *sgl)
2241{
2242	WARN_ON(!virtual_address);
2243	WARN_ON(!physical_address);
2244	WARN_ON(!length > 0);
2245	WARN_ON(!sgl);
2246
2247	sgl->va = virtual_address;
2248	sgl->dma = physical_address;
2249	sgl->size = length;
2250
2251	return 0;
2252}
2253
2254static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2255{
2256	memset(sgl, 0, sizeof(*sgl));
2257}
2258
2259static void
2260hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2261		     struct mem_array *pmem, struct be_dma_mem *sgl)
2262{
2263	if (sgl->va)
2264		be_sgl_destroy_contiguous(sgl);
2265
2266	be_sgl_create_contiguous(pmem->virtual_address,
2267				 pmem->bus_address.u.a64.address,
2268				 pmem->size, sgl);
2269}
2270
2271static void
2272hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2273			   struct mem_array *pmem, struct be_dma_mem *sgl)
2274{
2275	if (sgl->va)
2276		be_sgl_destroy_contiguous(sgl);
2277
2278	be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2279				 pmem->bus_address.u.a64.address,
2280				 pmem->size, sgl);
2281}
2282
2283static int be_fill_queue(struct be_queue_info *q,
2284		u16 len, u16 entry_size, void *vaddress)
2285{
2286	struct be_dma_mem *mem = &q->dma_mem;
2287
2288	memset(q, 0, sizeof(*q));
2289	q->len = len;
2290	q->entry_size = entry_size;
2291	mem->size = len * entry_size;
2292	mem->va = vaddress;
2293	if (!mem->va)
2294		return -ENOMEM;
2295	memset(mem->va, 0, mem->size);
2296	return 0;
2297}
2298
2299static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2300			     struct hwi_context_memory *phwi_context)
2301{
2302	unsigned int i, num_eq_pages;
2303	int ret, eq_for_mcc;
2304	struct be_queue_info *eq;
2305	struct be_dma_mem *mem;
2306	void *eq_vaddress;
2307	dma_addr_t paddr;
2308
2309	num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2310				      sizeof(struct be_eq_entry));
2311
2312	if (phba->msix_enabled)
2313		eq_for_mcc = 1;
2314	else
2315		eq_for_mcc = 0;
2316	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2317		eq = &phwi_context->be_eq[i].q;
2318		mem = &eq->dma_mem;
2319		phwi_context->be_eq[i].phba = phba;
2320		eq_vaddress = pci_alloc_consistent(phba->pcidev,
2321						     num_eq_pages * PAGE_SIZE,
2322						     &paddr);
2323		if (!eq_vaddress)
2324			goto create_eq_error;
2325
2326		mem->va = eq_vaddress;
2327		ret = be_fill_queue(eq, phba->params.num_eq_entries,
2328				    sizeof(struct be_eq_entry), eq_vaddress);
2329		if (ret) {
2330			shost_printk(KERN_ERR, phba->shost,
2331				     "be_fill_queue Failed for EQ \n");
2332			goto create_eq_error;
2333		}
2334
2335		mem->dma = paddr;
2336		ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2337					    phwi_context->cur_eqd);
2338		if (ret) {
2339			shost_printk(KERN_ERR, phba->shost,
2340				     "beiscsi_cmd_eq_create"
2341				     "Failedfor EQ \n");
2342			goto create_eq_error;
2343		}
2344		SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2345	}
2346	return 0;
2347create_eq_error:
2348	for (i = 0; i < (phba->num_cpus + 1); i++) {
2349		eq = &phwi_context->be_eq[i].q;
2350		mem = &eq->dma_mem;
2351		if (mem->va)
2352			pci_free_consistent(phba->pcidev, num_eq_pages
2353					    * PAGE_SIZE,
2354					    mem->va, mem->dma);
2355	}
2356	return ret;
2357}
2358
2359static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2360			     struct hwi_context_memory *phwi_context)
2361{
2362	unsigned int i, num_cq_pages;
2363	int ret;
2364	struct be_queue_info *cq, *eq;
2365	struct be_dma_mem *mem;
2366	struct be_eq_obj *pbe_eq;
2367	void *cq_vaddress;
2368	dma_addr_t paddr;
2369
2370	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2371				      sizeof(struct sol_cqe));
2372
2373	for (i = 0; i < phba->num_cpus; i++) {
2374		cq = &phwi_context->be_cq[i];
2375		eq = &phwi_context->be_eq[i].q;
2376		pbe_eq = &phwi_context->be_eq[i];
2377		pbe_eq->cq = cq;
2378		pbe_eq->phba = phba;
2379		mem = &cq->dma_mem;
2380		cq_vaddress = pci_alloc_consistent(phba->pcidev,
2381						     num_cq_pages * PAGE_SIZE,
2382						     &paddr);
2383		if (!cq_vaddress)
2384			goto create_cq_error;
2385		ret = be_fill_queue(cq, phba->params.num_cq_entries,
2386				    sizeof(struct sol_cqe), cq_vaddress);
2387		if (ret) {
2388			shost_printk(KERN_ERR, phba->shost,
2389				     "be_fill_queue Failed for ISCSI CQ \n");
2390			goto create_cq_error;
2391		}
2392
2393		mem->dma = paddr;
2394		ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2395					    false, 0);
2396		if (ret) {
2397			shost_printk(KERN_ERR, phba->shost,
2398				     "beiscsi_cmd_eq_create"
2399				     "Failed for ISCSI CQ \n");
2400			goto create_cq_error;
2401		}
2402		SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2403						 cq->id, eq->id);
2404		SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2405	}
2406	return 0;
2407
2408create_cq_error:
2409	for (i = 0; i < phba->num_cpus; i++) {
2410		cq = &phwi_context->be_cq[i];
2411		mem = &cq->dma_mem;
2412		if (mem->va)
2413			pci_free_consistent(phba->pcidev, num_cq_pages
2414					    * PAGE_SIZE,
2415					    mem->va, mem->dma);
2416	}
2417	return ret;
2418
2419}
2420
2421static int
2422beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2423		       struct hwi_context_memory *phwi_context,
2424		       struct hwi_controller *phwi_ctrlr,
2425		       unsigned int def_pdu_ring_sz)
2426{
2427	unsigned int idx;
2428	int ret;
2429	struct be_queue_info *dq, *cq;
2430	struct be_dma_mem *mem;
2431	struct be_mem_descriptor *mem_descr;
2432	void *dq_vaddress;
2433
2434	idx = 0;
2435	dq = &phwi_context->be_def_hdrq;
2436	cq = &phwi_context->be_cq[0];
2437	mem = &dq->dma_mem;
2438	mem_descr = phba->init_mem;
2439	mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2440	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2441	ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2442			    sizeof(struct phys_addr),
2443			    sizeof(struct phys_addr), dq_vaddress);
2444	if (ret) {
2445		shost_printk(KERN_ERR, phba->shost,
2446			     "be_fill_queue Failed for DEF PDU HDR\n");
2447		return ret;
2448	}
2449	mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2450	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2451					      def_pdu_ring_sz,
2452					      phba->params.defpdu_hdr_sz);
2453	if (ret) {
2454		shost_printk(KERN_ERR, phba->shost,
2455			     "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2456		return ret;
2457	}
2458	phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2459	SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2460		 phwi_context->be_def_hdrq.id);
2461	hwi_post_async_buffers(phba, 1);
2462	return 0;
2463}
2464
2465static int
2466beiscsi_create_def_data(struct beiscsi_hba *phba,
2467			struct hwi_context_memory *phwi_context,
2468			struct hwi_controller *phwi_ctrlr,
2469			unsigned int def_pdu_ring_sz)
2470{
2471	unsigned int idx;
2472	int ret;
2473	struct be_queue_info *dataq, *cq;
2474	struct be_dma_mem *mem;
2475	struct be_mem_descriptor *mem_descr;
2476	void *dq_vaddress;
2477
2478	idx = 0;
2479	dataq = &phwi_context->be_def_dataq;
2480	cq = &phwi_context->be_cq[0];
2481	mem = &dataq->dma_mem;
2482	mem_descr = phba->init_mem;
2483	mem_descr += HWI_MEM_ASYNC_DATA_RING;
2484	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2485	ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2486			    sizeof(struct phys_addr),
2487			    sizeof(struct phys_addr), dq_vaddress);
2488	if (ret) {
2489		shost_printk(KERN_ERR, phba->shost,
2490			     "be_fill_queue Failed for DEF PDU DATA\n");
2491		return ret;
2492	}
2493	mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2494	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2495					      def_pdu_ring_sz,
2496					      phba->params.defpdu_data_sz);
2497	if (ret) {
2498		shost_printk(KERN_ERR, phba->shost,
2499			     "be_cmd_create_default_pdu_queue Failed"
2500			     " for DEF PDU DATA\n");
2501		return ret;
2502	}
2503	phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2504	SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2505		 phwi_context->be_def_dataq.id);
2506	hwi_post_async_buffers(phba, 0);
2507	SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
2508	return 0;
2509}
2510
2511static int
2512beiscsi_post_pages(struct beiscsi_hba *phba)
2513{
2514	struct be_mem_descriptor *mem_descr;
2515	struct mem_array *pm_arr;
2516	unsigned int page_offset, i;
2517	struct be_dma_mem sgl;
2518	int status;
2519
2520	mem_descr = phba->init_mem;
2521	mem_descr += HWI_MEM_SGE;
2522	pm_arr = mem_descr->mem_array;
2523
2524	page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2525			phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2526	for (i = 0; i < mem_descr->num_elements; i++) {
2527		hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2528		status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2529						page_offset,
2530						(pm_arr->size / PAGE_SIZE));
2531		page_offset += pm_arr->size / PAGE_SIZE;
2532		if (status != 0) {
2533			shost_printk(KERN_ERR, phba->shost,
2534				     "post sgl failed.\n");
2535			return status;
2536		}
2537		pm_arr++;
2538	}
2539	SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
2540	return 0;
2541}
2542
2543static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2544{
2545	struct be_dma_mem *mem = &q->dma_mem;
2546	if (mem->va)
2547		pci_free_consistent(phba->pcidev, mem->size,
2548			mem->va, mem->dma);
2549}
2550
2551static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2552		u16 len, u16 entry_size)
2553{
2554	struct be_dma_mem *mem = &q->dma_mem;
2555
2556	memset(q, 0, sizeof(*q));
2557	q->len = len;
2558	q->entry_size = entry_size;
2559	mem->size = len * entry_size;
2560	mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2561	if (!mem->va)
2562		return -1;
2563	memset(mem->va, 0, mem->size);
2564	return 0;
2565}
2566
2567static int
2568beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2569			 struct hwi_context_memory *phwi_context,
2570			 struct hwi_controller *phwi_ctrlr)
2571{
2572	unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2573	u64 pa_addr_lo;
2574	unsigned int idx, num, i;
2575	struct mem_array *pwrb_arr;
2576	void *wrb_vaddr;
2577	struct be_dma_mem sgl;
2578	struct be_mem_descriptor *mem_descr;
2579	int status;
2580
2581	idx = 0;
2582	mem_descr = phba->init_mem;
2583	mem_descr += HWI_MEM_WRB;
2584	pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2585			   GFP_KERNEL);
2586	if (!pwrb_arr) {
2587		shost_printk(KERN_ERR, phba->shost,
2588			     "Memory alloc failed in create wrb ring.\n");
2589		return -ENOMEM;
2590	}
2591	wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2592	pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2593	num_wrb_rings = mem_descr->mem_array[idx].size /
2594		(phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2595
2596	for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2597		if (num_wrb_rings) {
2598			pwrb_arr[num].virtual_address = wrb_vaddr;
2599			pwrb_arr[num].bus_address.u.a64.address	= pa_addr_lo;
2600			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2601					    sizeof(struct iscsi_wrb);
2602			wrb_vaddr += pwrb_arr[num].size;
2603			pa_addr_lo += pwrb_arr[num].size;
2604			num_wrb_rings--;
2605		} else {
2606			idx++;
2607			wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2608			pa_addr_lo = mem_descr->mem_array[idx].\
2609					bus_address.u.a64.address;
2610			num_wrb_rings = mem_descr->mem_array[idx].size /
2611					(phba->params.wrbs_per_cxn *
2612					sizeof(struct iscsi_wrb));
2613			pwrb_arr[num].virtual_address = wrb_vaddr;
2614			pwrb_arr[num].bus_address.u.a64.address\
2615						= pa_addr_lo;
2616			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2617						 sizeof(struct iscsi_wrb);
2618			wrb_vaddr += pwrb_arr[num].size;
2619			pa_addr_lo   += pwrb_arr[num].size;
2620			num_wrb_rings--;
2621		}
2622	}
2623	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2624		wrb_mem_index = 0;
2625		offset = 0;
2626		size = 0;
2627
2628		hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2629		status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2630					    &phwi_context->be_wrbq[i]);
2631		if (status != 0) {
2632			shost_printk(KERN_ERR, phba->shost,
2633				     "wrbq create failed.");
2634			return status;
2635		}
2636		phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2637								   id;
2638	}
2639	kfree(pwrb_arr);
2640	return 0;
2641}
2642
2643static void free_wrb_handles(struct beiscsi_hba *phba)
2644{
2645	unsigned int index;
2646	struct hwi_controller *phwi_ctrlr;
2647	struct hwi_wrb_context *pwrb_context;
2648
2649	phwi_ctrlr = phba->phwi_ctrlr;
2650	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2651		pwrb_context = &phwi_ctrlr->wrb_context[index];
2652		kfree(pwrb_context->pwrb_handle_base);
2653		kfree(pwrb_context->pwrb_handle_basestd);
2654	}
2655}
2656
2657static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2658{
2659	struct be_queue_info *q;
2660	struct be_ctrl_info *ctrl = &phba->ctrl;
2661
2662	q = &phba->ctrl.mcc_obj.q;
2663	if (q->created)
2664		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2665	be_queue_free(phba, q);
2666
2667	q = &phba->ctrl.mcc_obj.cq;
2668	if (q->created)
2669		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2670	be_queue_free(phba, q);
2671}
2672
2673static void hwi_cleanup(struct beiscsi_hba *phba)
2674{
2675	struct be_queue_info *q;
2676	struct be_ctrl_info *ctrl = &phba->ctrl;
2677	struct hwi_controller *phwi_ctrlr;
2678	struct hwi_context_memory *phwi_context;
2679	int i, eq_num;
2680
2681	phwi_ctrlr = phba->phwi_ctrlr;
2682	phwi_context = phwi_ctrlr->phwi_ctxt;
2683	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2684		q = &phwi_context->be_wrbq[i];
2685		if (q->created)
2686			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2687	}
2688	free_wrb_handles(phba);
2689
2690	q = &phwi_context->be_def_hdrq;
2691	if (q->created)
2692		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2693
2694	q = &phwi_context->be_def_dataq;
2695	if (q->created)
2696		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2697
2698	beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2699
2700	for (i = 0; i < (phba->num_cpus); i++) {
2701		q = &phwi_context->be_cq[i];
2702		if (q->created)
2703			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2704	}
2705	if (phba->msix_enabled)
2706		eq_num = 1;
2707	else
2708		eq_num = 0;
2709	for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2710		q = &phwi_context->be_eq[i].q;
2711		if (q->created)
2712			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2713	}
2714	be_mcc_queues_destroy(phba);
2715}
2716
2717static int be_mcc_queues_create(struct beiscsi_hba *phba,
2718				struct hwi_context_memory *phwi_context)
2719{
2720	struct be_queue_info *q, *cq;
2721	struct be_ctrl_info *ctrl = &phba->ctrl;
2722
2723	/* Alloc MCC compl queue */
2724	cq = &phba->ctrl.mcc_obj.cq;
2725	if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2726			sizeof(struct be_mcc_compl)))
2727		goto err;
2728	/* Ask BE to create MCC compl queue; */
2729	if (phba->msix_enabled) {
2730		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2731					 [phba->num_cpus].q, false, true, 0))
2732		goto mcc_cq_free;
2733	} else {
2734		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2735					  false, true, 0))
2736		goto mcc_cq_free;
2737	}
2738
2739	/* Alloc MCC queue */
2740	q = &phba->ctrl.mcc_obj.q;
2741	if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2742		goto mcc_cq_destroy;
2743
2744	/* Ask BE to create MCC queue */
2745	if (beiscsi_cmd_mccq_create(phba, q, cq))
2746		goto mcc_q_free;
2747
2748	return 0;
2749
2750mcc_q_free:
2751	be_queue_free(phba, q);
2752mcc_cq_destroy:
2753	beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2754mcc_cq_free:
2755	be_queue_free(phba, cq);
2756err:
2757	return -1;
2758}
2759
2760static int find_num_cpus(void)
2761{
2762	int  num_cpus = 0;
2763
2764	num_cpus = num_online_cpus();
2765	if (num_cpus >= MAX_CPUS)
2766		num_cpus = MAX_CPUS - 1;
2767
2768	SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
2769	return num_cpus;
2770}
2771
2772static int hwi_init_port(struct beiscsi_hba *phba)
2773{
2774	struct hwi_controller *phwi_ctrlr;
2775	struct hwi_context_memory *phwi_context;
2776	unsigned int def_pdu_ring_sz;
2777	struct be_ctrl_info *ctrl = &phba->ctrl;
2778	int status;
2779
2780	def_pdu_ring_sz =
2781		phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2782	phwi_ctrlr = phba->phwi_ctrlr;
2783	phwi_context = phwi_ctrlr->phwi_ctxt;
2784	phwi_context->max_eqd = 0;
2785	phwi_context->min_eqd = 0;
2786	phwi_context->cur_eqd = 64;
2787	be_cmd_fw_initialize(&phba->ctrl);
2788
2789	status = beiscsi_create_eqs(phba, phwi_context);
2790	if (status != 0) {
2791		shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2792		goto error;
2793	}
2794
2795	status = be_mcc_queues_create(phba, phwi_context);
2796	if (status != 0)
2797		goto error;
2798
2799	status = mgmt_check_supported_fw(ctrl, phba);
2800	if (status != 0) {
2801		shost_printk(KERN_ERR, phba->shost,
2802			     "Unsupported fw version \n");
2803		goto error;
2804	}
2805
2806	if (phba->fw_config.iscsi_features == 0x1)
2807		ring_mode = 1;
2808	else
2809		ring_mode = 0;
2810
2811	status = beiscsi_create_cqs(phba, phwi_context);
2812	if (status != 0) {
2813		shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2814		goto error;
2815	}
2816
2817	status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2818					def_pdu_ring_sz);
2819	if (status != 0) {
2820		shost_printk(KERN_ERR, phba->shost,
2821			     "Default Header not created\n");
2822		goto error;
2823	}
2824
2825	status = beiscsi_create_def_data(phba, phwi_context,
2826					 phwi_ctrlr, def_pdu_ring_sz);
2827	if (status != 0) {
2828		shost_printk(KERN_ERR, phba->shost,
2829			     "Default Data not created\n");
2830		goto error;
2831	}
2832
2833	status = beiscsi_post_pages(phba);
2834	if (status != 0) {
2835		shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2836		goto error;
2837	}
2838
2839	status = beiscsi_create_wrb_rings(phba,	phwi_context, phwi_ctrlr);
2840	if (status != 0) {
2841		shost_printk(KERN_ERR, phba->shost,
2842			     "WRB Rings not created\n");
2843		goto error;
2844	}
2845
2846	SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2847	return 0;
2848
2849error:
2850	shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2851	hwi_cleanup(phba);
2852	return -ENOMEM;
2853}
2854
2855static int hwi_init_controller(struct beiscsi_hba *phba)
2856{
2857	struct hwi_controller *phwi_ctrlr;
2858
2859	phwi_ctrlr = phba->phwi_ctrlr;
2860	if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2861		phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2862		    init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2863		SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
2864			 phwi_ctrlr->phwi_ctxt);
2865	} else {
2866		shost_printk(KERN_ERR, phba->shost,
2867			     "HWI_MEM_ADDN_CONTEXT is more than one element."
2868			     "Failing to load\n");
2869		return -ENOMEM;
2870	}
2871
2872	iscsi_init_global_templates(phba);
2873	beiscsi_init_wrb_handle(phba);
2874	hwi_init_async_pdu_ctx(phba);
2875	if (hwi_init_port(phba) != 0) {
2876		shost_printk(KERN_ERR, phba->shost,
2877			     "hwi_init_controller failed\n");
2878		return -ENOMEM;
2879	}
2880	return 0;
2881}
2882
2883static void beiscsi_free_mem(struct beiscsi_hba *phba)
2884{
2885	struct be_mem_descriptor *mem_descr;
2886	int i, j;
2887
2888	mem_descr = phba->init_mem;
2889	i = 0;
2890	j = 0;
2891	for (i = 0; i < SE_MEM_MAX; i++) {
2892		for (j = mem_descr->num_elements; j > 0; j--) {
2893			pci_free_consistent(phba->pcidev,
2894			  mem_descr->mem_array[j - 1].size,
2895			  mem_descr->mem_array[j - 1].virtual_address,
2896			  mem_descr->mem_array[j - 1].bus_address.
2897				u.a64.address);
2898		}
2899		kfree(mem_descr->mem_array);
2900		mem_descr++;
2901	}
2902	kfree(phba->init_mem);
2903	kfree(phba->phwi_ctrlr);
2904}
2905
2906static int beiscsi_init_controller(struct beiscsi_hba *phba)
2907{
2908	int ret = -ENOMEM;
2909
2910	ret = beiscsi_get_memory(phba);
2911	if (ret < 0) {
2912		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
2913			     "Failed in beiscsi_alloc_memory \n");
2914		return ret;
2915	}
2916
2917	ret = hwi_init_controller(phba);
2918	if (ret)
2919		goto free_init;
2920	SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
2921	return 0;
2922
2923free_init:
2924	beiscsi_free_mem(phba);
2925	return -ENOMEM;
2926}
2927
2928static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
2929{
2930	struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
2931	struct sgl_handle *psgl_handle;
2932	struct iscsi_sge *pfrag;
2933	unsigned int arr_index, i, idx;
2934
2935	phba->io_sgl_hndl_avbl = 0;
2936	phba->eh_sgl_hndl_avbl = 0;
2937
2938	if (ring_mode) {
2939		phba->sgl_hndl_array = kzalloc(sizeof(struct sgl_handle *) *
2940					      phba->params.icds_per_ctrl,
2941						 GFP_KERNEL);
2942		if (!phba->sgl_hndl_array) {
2943			shost_printk(KERN_ERR, phba->shost,
2944			     "Mem Alloc Failed. Failing to load\n");
2945			return -ENOMEM;
2946		}
2947	}
2948
2949	mem_descr_sglh = phba->init_mem;
2950	mem_descr_sglh += HWI_MEM_SGLH;
2951	if (1 == mem_descr_sglh->num_elements) {
2952		phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2953						 phba->params.ios_per_ctrl,
2954						 GFP_KERNEL);
2955		if (!phba->io_sgl_hndl_base) {
2956			if (ring_mode)
2957				kfree(phba->sgl_hndl_array);
2958			shost_printk(KERN_ERR, phba->shost,
2959				     "Mem Alloc Failed. Failing to load\n");
2960			return -ENOMEM;
2961		}
2962		phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
2963						 (phba->params.icds_per_ctrl -
2964						 phba->params.ios_per_ctrl),
2965						 GFP_KERNEL);
2966		if (!phba->eh_sgl_hndl_base) {
2967			kfree(phba->io_sgl_hndl_base);
2968			shost_printk(KERN_ERR, phba->shost,
2969				     "Mem Alloc Failed. Failing to load\n");
2970			return -ENOMEM;
2971		}
2972	} else {
2973		shost_printk(KERN_ERR, phba->shost,
2974			     "HWI_MEM_SGLH is more than one element."
2975			     "Failing to load\n");
2976		return -ENOMEM;
2977	}
2978
2979	arr_index = 0;
2980	idx = 0;
2981	while (idx < mem_descr_sglh->num_elements) {
2982		psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
2983
2984		for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
2985		      sizeof(struct sgl_handle)); i++) {
2986			if (arr_index < phba->params.ios_per_ctrl) {
2987				phba->io_sgl_hndl_base[arr_index] = psgl_handle;
2988				phba->io_sgl_hndl_avbl++;
2989				arr_index++;
2990			} else {
2991				phba->eh_sgl_hndl_base[arr_index -
2992					phba->params.ios_per_ctrl] =
2993								psgl_handle;
2994				arr_index++;
2995				phba->eh_sgl_hndl_avbl++;
2996			}
2997			psgl_handle++;
2998		}
2999		idx++;
3000	}
3001	SE_DEBUG(DBG_LVL_8,
3002		 "phba->io_sgl_hndl_avbl=%d"
3003		 "phba->eh_sgl_hndl_avbl=%d \n",
3004		 phba->io_sgl_hndl_avbl,
3005		 phba->eh_sgl_hndl_avbl);
3006	mem_descr_sg = phba->init_mem;
3007	mem_descr_sg += HWI_MEM_SGE;
3008	SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
3009		 mem_descr_sg->num_elements);
3010	arr_index = 0;
3011	idx = 0;
3012	while (idx < mem_descr_sg->num_elements) {
3013		pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3014
3015		for (i = 0;
3016		     i < (mem_descr_sg->mem_array[idx].size) /
3017		     (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3018		     i++) {
3019			if (arr_index < phba->params.ios_per_ctrl)
3020				psgl_handle = phba->io_sgl_hndl_base[arr_index];
3021			else
3022				psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3023						phba->params.ios_per_ctrl];
3024			psgl_handle->pfrag = pfrag;
3025			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3026			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3027			pfrag += phba->params.num_sge_per_io;
3028			psgl_handle->sgl_index =
3029				phba->fw_config.iscsi_icd_start + arr_index++;
3030		}
3031		idx++;
3032	}
3033	phba->io_sgl_free_index = 0;
3034	phba->io_sgl_alloc_index = 0;
3035	phba->eh_sgl_free_index = 0;
3036	phba->eh_sgl_alloc_index = 0;
3037	return 0;
3038}
3039
3040static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3041{
3042	int i, new_cid;
3043
3044	phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3045				  GFP_KERNEL);
3046	if (!phba->cid_array) {
3047		shost_printk(KERN_ERR, phba->shost,
3048			     "Failed to allocate memory in "
3049			     "hba_setup_cid_tbls\n");
3050		return -ENOMEM;
3051	}
3052	phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3053				 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3054	if (!phba->ep_array) {
3055		shost_printk(KERN_ERR, phba->shost,
3056			     "Failed to allocate memory in "
3057			     "hba_setup_cid_tbls \n");
3058		kfree(phba->cid_array);
3059		return -ENOMEM;
3060	}
3061	new_cid = phba->fw_config.iscsi_cid_start;
3062	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3063		phba->cid_array[i] = new_cid;
3064		new_cid += 2;
3065	}
3066	phba->avlbl_cids = phba->params.cxns_per_ctrl;
3067	return 0;
3068}
3069
3070static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
3071{
3072	struct be_ctrl_info *ctrl = &phba->ctrl;
3073	struct hwi_controller *phwi_ctrlr;
3074	struct hwi_context_memory *phwi_context;
3075	struct be_queue_info *eq;
3076	u8 __iomem *addr;
3077	u32 reg, i;
3078	u32 enabled;
3079
3080	phwi_ctrlr = phba->phwi_ctrlr;
3081	phwi_context = phwi_ctrlr->phwi_ctxt;
3082
3083	addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3084			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3085	reg = ioread32(addr);
3086	SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
3087
3088	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3089	if (!enabled) {
3090		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3091		SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
3092		iowrite32(reg, addr);
3093		for (i = 0; i <= phba->num_cpus; i++) {
3094			eq = &phwi_context->be_eq[i].q;
3095			SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3096			hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3097		}
3098	} else
3099		shost_printk(KERN_WARNING, phba->shost,
3100			     "In hwi_enable_intr, Not Enabled \n");
3101	return true;
3102}
3103
3104static void hwi_disable_intr(struct beiscsi_hba *phba)
3105{
3106	struct be_ctrl_info *ctrl = &phba->ctrl;
3107
3108	u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3109	u32 reg = ioread32(addr);
3110
3111	u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3112	if (enabled) {
3113		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3114		iowrite32(reg, addr);
3115	} else
3116		shost_printk(KERN_WARNING, phba->shost,
3117			     "In hwi_disable_intr, Already Disabled \n");
3118}
3119
3120static int beiscsi_init_port(struct beiscsi_hba *phba)
3121{
3122	int ret;
3123
3124	ret = beiscsi_init_controller(phba);
3125	if (ret < 0) {
3126		shost_printk(KERN_ERR, phba->shost,
3127			     "beiscsi_dev_probe - Failed in"
3128			     "beiscsi_init_controller \n");
3129		return ret;
3130	}
3131	ret = beiscsi_init_sgl_handle(phba);
3132	if (ret < 0) {
3133		shost_printk(KERN_ERR, phba->shost,
3134			     "beiscsi_dev_probe - Failed in"
3135			     "beiscsi_init_sgl_handle \n");
3136		goto do_cleanup_ctrlr;
3137	}
3138
3139	if (hba_setup_cid_tbls(phba)) {
3140		shost_printk(KERN_ERR, phba->shost,
3141			     "Failed in hba_setup_cid_tbls\n");
3142		if (ring_mode)
3143			kfree(phba->sgl_hndl_array);
3144		kfree(phba->io_sgl_hndl_base);
3145		kfree(phba->eh_sgl_hndl_base);
3146		goto do_cleanup_ctrlr;
3147	}
3148
3149	return ret;
3150
3151do_cleanup_ctrlr:
3152	hwi_cleanup(phba);
3153	return ret;
3154}
3155
3156static void hwi_purge_eq(struct beiscsi_hba *phba)
3157{
3158	struct hwi_controller *phwi_ctrlr;
3159	struct hwi_context_memory *phwi_context;
3160	struct be_queue_info *eq;
3161	struct be_eq_entry *eqe = NULL;
3162	int i, eq_msix;
3163
3164	phwi_ctrlr = phba->phwi_ctrlr;
3165	phwi_context = phwi_ctrlr->phwi_ctxt;
3166	if (phba->msix_enabled)
3167		eq_msix = 1;
3168	else
3169		eq_msix = 0;
3170
3171	for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3172		eq = &phwi_context->be_eq[i].q;
3173		eqe = queue_tail_node(eq);
3174
3175		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3176					& EQE_VALID_MASK) {
3177			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3178			queue_tail_inc(eq);
3179			eqe = queue_tail_node(eq);
3180		}
3181	}
3182}
3183
3184static void beiscsi_clean_port(struct beiscsi_hba *phba)
3185{
3186	unsigned char mgmt_status;
3187
3188	mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3189	if (mgmt_status)
3190		shost_printk(KERN_WARNING, phba->shost,
3191			     "mgmt_epfw_cleanup FAILED \n");
3192	hwi_cleanup(phba);
3193	hwi_purge_eq(phba);
3194	if (ring_mode)
3195		kfree(phba->sgl_hndl_array);
3196	kfree(phba->io_sgl_hndl_base);
3197	kfree(phba->eh_sgl_hndl_base);
3198	kfree(phba->cid_array);
3199	kfree(phba->ep_array);
3200}
3201
3202void
3203beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3204			   struct beiscsi_offload_params *params)
3205{
3206	struct wrb_handle *pwrb_handle;
3207	struct iscsi_target_context_update_wrb *pwrb = NULL;
3208	struct be_mem_descriptor *mem_descr;
3209	struct beiscsi_hba *phba = beiscsi_conn->phba;
3210	u32 doorbell = 0;
3211
3212	/*
3213	 * We can always use 0 here because it is reserved by libiscsi for
3214	 * login/startup related tasks.
3215	 */
3216	pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3217				       phba->fw_config.iscsi_cid_start));
3218	pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3219	memset(pwrb, 0, sizeof(*pwrb));
3220	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3221		      max_burst_length, pwrb, params->dw[offsetof
3222		      (struct amap_beiscsi_offload_params,
3223		      max_burst_length) / 32]);
3224	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3225		      max_send_data_segment_length, pwrb,
3226		      params->dw[offsetof(struct amap_beiscsi_offload_params,
3227		      max_send_data_segment_length) / 32]);
3228	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3229		      first_burst_length,
3230		      pwrb,
3231		      params->dw[offsetof(struct amap_beiscsi_offload_params,
3232		      first_burst_length) / 32]);
3233
3234	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3235		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3236		      erl) / 32] & OFFLD_PARAMS_ERL));
3237	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3238		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3239		      dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3240	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3241		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3242		      hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3243	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3244		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3245		      ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3246	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3247		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3248		       imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3249	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3250		      pwrb,
3251		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3252		      exp_statsn) / 32] + 1));
3253	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3254		      0x7);
3255	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3256		      pwrb, pwrb_handle->wrb_index);
3257	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3258		      pwrb, pwrb_handle->nxt_wrb_index);
3259	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3260			session_state, pwrb, 0);
3261	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3262		      pwrb, 1);
3263	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3264		      pwrb, 0);
3265	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3266		      0);
3267
3268	mem_descr = phba->init_mem;
3269	mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3270
3271	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3272			pad_buffer_addr_hi, pwrb,
3273		      mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3274	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3275			pad_buffer_addr_lo, pwrb,
3276		      mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3277
3278	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3279
3280	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3281	if (!ring_mode)
3282		doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3283			     << DB_DEF_PDU_WRB_INDEX_SHIFT;
3284	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3285
3286	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3287}
3288
3289static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3290			      int *index, int *age)
3291{
3292	*index = (int)itt;
3293	if (age)
3294		*age = conn->session->age;
3295}
3296
3297/**
3298 * beiscsi_alloc_pdu - allocates pdu and related resources
3299 * @task: libiscsi task
3300 * @opcode: opcode of pdu for task
3301 *
3302 * This is called with the session lock held. It will allocate
3303 * the wrb and sgl if needed for the command. And it will prep
3304 * the pdu's itt. beiscsi_parse_pdu will later translate
3305 * the pdu itt to the libiscsi task itt.
3306 */
3307static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3308{
3309	struct beiscsi_io_task *io_task = task->dd_data;
3310	struct iscsi_conn *conn = task->conn;
3311	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3312	struct beiscsi_hba *phba = beiscsi_conn->phba;
3313	struct hwi_wrb_context *pwrb_context;
3314	struct hwi_controller *phwi_ctrlr;
3315	itt_t itt;
3316	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3317	dma_addr_t paddr;
3318
3319	io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3320					  GFP_KERNEL, &paddr);
3321	if (!io_task->cmd_bhs)
3322		return -ENOMEM;
3323	io_task->bhs_pa.u.a64.address = paddr;
3324	io_task->libiscsi_itt = (itt_t)task->itt;
3325	io_task->pwrb_handle = alloc_wrb_handle(phba,
3326						beiscsi_conn->beiscsi_conn_cid -
3327						phba->fw_config.iscsi_cid_start
3328						);
3329	io_task->conn = beiscsi_conn;
3330
3331	task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3332	task->hdr_max = sizeof(struct be_cmd_bhs);
3333
3334	if (task->sc) {
3335		spin_lock(&phba->io_sgl_lock);
3336		io_task->psgl_handle = alloc_io_sgl_handle(phba);
3337		spin_unlock(&phba->io_sgl_lock);
3338		if (!io_task->psgl_handle)
3339			goto free_hndls;
3340	} else {
3341		io_task->scsi_cmnd = NULL;
3342		if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3343			if (!beiscsi_conn->login_in_progress) {
3344				spin_lock(&phba->mgmt_sgl_lock);
3345				io_task->psgl_handle = (struct sgl_handle *)
3346						alloc_mgmt_sgl_handle(phba);
3347				spin_unlock(&phba->mgmt_sgl_lock);
3348				if (!io_task->psgl_handle)
3349					goto free_hndls;
3350
3351				beiscsi_conn->login_in_progress = 1;
3352				beiscsi_conn->plogin_sgl_handle =
3353							io_task->psgl_handle;
3354			} else {
3355				io_task->psgl_handle =
3356						beiscsi_conn->plogin_sgl_handle;
3357			}
3358		} else {
3359			spin_lock(&phba->mgmt_sgl_lock);
3360			io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3361			spin_unlock(&phba->mgmt_sgl_lock);
3362			if (!io_task->psgl_handle)
3363				goto free_hndls;
3364		}
3365	}
3366	itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3367				 wrb_index << 16) | (unsigned int)
3368				(io_task->psgl_handle->sgl_index));
3369	if (ring_mode) {
3370		phba->sgl_hndl_array[io_task->psgl_handle->sgl_index -
3371				     phba->fw_config.iscsi_icd_start] =
3372				     io_task->psgl_handle;
3373		io_task->psgl_handle->task = task;
3374		io_task->psgl_handle->cid = beiscsi_conn->beiscsi_conn_cid  -
3375					    phba->fw_config.iscsi_cid_start;
3376	} else
3377		io_task->pwrb_handle->pio_handle = task;
3378
3379	io_task->cmd_bhs->iscsi_hdr.itt = itt;
3380	return 0;
3381
3382free_hndls:
3383	phwi_ctrlr = phba->phwi_ctrlr;
3384	pwrb_context = &phwi_ctrlr->wrb_context[
3385			beiscsi_conn->beiscsi_conn_cid -
3386			phba->fw_config.iscsi_cid_start];
3387	free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3388	io_task->pwrb_handle = NULL;
3389	pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3390		      io_task->bhs_pa.u.a64.address);
3391	SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
3392	return -ENOMEM;
3393}
3394
3395static void beiscsi_cleanup_task(struct iscsi_task *task)
3396{
3397	struct beiscsi_io_task *io_task = task->dd_data;
3398	struct iscsi_conn *conn = task->conn;
3399	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3400	struct beiscsi_hba *phba = beiscsi_conn->phba;
3401	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3402	struct hwi_wrb_context *pwrb_context;
3403	struct hwi_controller *phwi_ctrlr;
3404
3405	phwi_ctrlr = phba->phwi_ctrlr;
3406	pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3407			- phba->fw_config.iscsi_cid_start];
3408	if (io_task->pwrb_handle) {
3409		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3410		io_task->pwrb_handle = NULL;
3411	}
3412
3413	if (io_task->cmd_bhs) {
3414		pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3415			      io_task->bhs_pa.u.a64.address);
3416	}
3417
3418	if (task->sc) {
3419		if (io_task->psgl_handle) {
3420			spin_lock(&phba->io_sgl_lock);
3421			free_io_sgl_handle(phba, io_task->psgl_handle);
3422			spin_unlock(&phba->io_sgl_lock);
3423			io_task->psgl_handle = NULL;
3424		}
3425	} else {
3426		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3427			return;
3428		if (io_task->psgl_handle) {
3429			spin_lock(&phba->mgmt_sgl_lock);
3430			free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3431			spin_unlock(&phba->mgmt_sgl_lock);
3432			io_task->psgl_handle = NULL;
3433		}
3434	}
3435}
3436
3437static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3438			  unsigned int num_sg, unsigned int xferlen,
3439			  unsigned int writedir)
3440{
3441
3442	struct beiscsi_io_task *io_task = task->dd_data;
3443	struct iscsi_conn *conn = task->conn;
3444	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3445	struct beiscsi_hba *phba = beiscsi_conn->phba;
3446	struct iscsi_wrb *pwrb = NULL;
3447	unsigned int doorbell = 0;
3448
3449	pwrb = io_task->pwrb_handle->pwrb;
3450	io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3451	io_task->bhs_len = sizeof(struct be_cmd_bhs);
3452
3453	if (writedir) {
3454		memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3455		AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3456			      &io_task->cmd_bhs->iscsi_data_pdu,
3457			      (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3458		AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3459			      &io_task->cmd_bhs->iscsi_data_pdu,
3460			      ISCSI_OPCODE_SCSI_DATA_OUT);
3461		AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3462			      &io_task->cmd_bhs->iscsi_data_pdu, 1);
3463		if (ring_mode)
3464			io_task->psgl_handle->type = INI_WR_CMD;
3465		else
3466			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3467				      INI_WR_CMD);
3468		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3469	} else {
3470		if (ring_mode)
3471			io_task->psgl_handle->type = INI_RD_CMD;
3472		else
3473			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3474				      INI_RD_CMD);
3475		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3476	}
3477	memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3478	       dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3479	       io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3480
3481	AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3482		      cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3483				  lun[0]));
3484	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3485	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3486		      io_task->pwrb_handle->wrb_index);
3487	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3488		      be32_to_cpu(task->cmdsn));
3489	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3490		      io_task->psgl_handle->sgl_index);
3491
3492	hwi_write_sgl(pwrb, sg, num_sg, io_task);
3493
3494	AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3495		      io_task->pwrb_handle->nxt_wrb_index);
3496	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3497
3498	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3499	if (!ring_mode)
3500		doorbell |= (io_task->pwrb_handle->wrb_index &
3501		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3502	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3503
3504	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3505	return 0;
3506}
3507
3508static int beiscsi_mtask(struct iscsi_task *task)
3509{
3510	struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data;
3511	struct iscsi_conn *conn = task->conn;
3512	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3513	struct beiscsi_hba *phba = beiscsi_conn->phba;
3514	struct iscsi_session *session;
3515	struct iscsi_wrb *pwrb = NULL;
3516	struct hwi_controller *phwi_ctrlr;
3517	struct hwi_wrb_context *pwrb_context;
3518	struct wrb_handle *pwrb_handle;
3519	unsigned int doorbell = 0;
3520	unsigned int i, cid;
3521	struct iscsi_task *aborted_task;
3522
3523	cid = beiscsi_conn->beiscsi_conn_cid;
3524	pwrb = io_task->pwrb_handle->pwrb;
3525	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3526		      be32_to_cpu(task->cmdsn));
3527	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3528		      io_task->pwrb_handle->wrb_index);
3529	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3530		      io_task->psgl_handle->sgl_index);
3531
3532	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3533	case ISCSI_OP_LOGIN:
3534		if (ring_mode)
3535			io_task->psgl_handle->type = TGT_DM_CMD;
3536		else
3537			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3538				      TGT_DM_CMD);
3539		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3540		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3541		hwi_write_buffer(pwrb, task);
3542		break;
3543	case ISCSI_OP_NOOP_OUT:
3544		if (ring_mode)
3545			io_task->psgl_handle->type = INI_RD_CMD;
3546		else
3547			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3548				      INI_RD_CMD);
3549		if (task->hdr->ttt == ISCSI_RESERVED_TAG)
3550			AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3551		else
3552			AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
3553
3554		hwi_write_buffer(pwrb, task);
3555		break;
3556	case ISCSI_OP_TEXT:
3557		if (ring_mode)
3558			io_task->psgl_handle->type = INI_WR_CMD;
3559		else
3560			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3561				      INI_WR_CMD);
3562		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3563		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3564		hwi_write_buffer(pwrb, task);
3565		break;
3566	case ISCSI_OP_SCSI_TMFUNC:
3567		session = conn->session;
3568		i = ((struct iscsi_tm *)task->hdr)->rtt;
3569		phwi_ctrlr = phba->phwi_ctrlr;
3570		pwrb_context = &phwi_ctrlr->wrb_context[cid -
3571					    phba->fw_config.iscsi_cid_start];
3572		pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
3573								>> 16];
3574		aborted_task = pwrb_handle->pio_handle;
3575		 if (!aborted_task)
3576			return 0;
3577
3578		aborted_io_task = aborted_task->dd_data;
3579		if (!aborted_io_task->scsi_cmnd)
3580			return 0;
3581
3582		mgmt_invalidate_icds(phba,
3583				     aborted_io_task->psgl_handle->sgl_index,
3584				     cid);
3585		if (ring_mode)
3586			io_task->psgl_handle->type = INI_TMF_CMD;
3587		else
3588			AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3589				      INI_TMF_CMD);
3590		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3591		hwi_write_buffer(pwrb, task);
3592		break;
3593	case ISCSI_OP_LOGOUT:
3594		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3595		if (ring_mode)
3596			io_task->psgl_handle->type = HWH_TYPE_LOGOUT;
3597		else
3598		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3599				HWH_TYPE_LOGOUT);
3600		hwi_write_buffer(pwrb, task);
3601		break;
3602
3603	default:
3604		SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
3605			 task->hdr->opcode & ISCSI_OPCODE_MASK);
3606		return -EINVAL;
3607	}
3608
3609	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3610		      be32_to_cpu(task->data_count));
3611	AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3612		      io_task->pwrb_handle->nxt_wrb_index);
3613	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3614
3615	doorbell |= cid & DB_WRB_POST_CID_MASK;
3616	if (!ring_mode)
3617		doorbell |= (io_task->pwrb_handle->wrb_index &
3618		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3619	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3620	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3621	return 0;
3622}
3623
3624static int beiscsi_task_xmit(struct iscsi_task *task)
3625{
3626	struct iscsi_conn *conn = task->conn;
3627	struct beiscsi_io_task *io_task = task->dd_data;
3628	struct scsi_cmnd *sc = task->sc;
3629	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3630	struct scatterlist *sg;
3631	int num_sg;
3632	unsigned int  writedir = 0, xferlen = 0;
3633
3634	SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
3635		 "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
3636		 task, conn, beiscsi_conn);
3637	if (!sc)
3638		return beiscsi_mtask(task);
3639
3640	io_task->scsi_cmnd = sc;
3641	num_sg = scsi_dma_map(sc);
3642	if (num_sg < 0) {
3643		SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3644		return num_sg;
3645	}
3646	SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3647		  (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3648	xferlen = scsi_bufflen(sc);
3649	sg = scsi_sglist(sc);
3650	if (sc->sc_data_direction == DMA_TO_DEVICE) {
3651		writedir = 1;
3652		SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
3653			 task->imm_count);
3654	} else
3655		writedir = 0;
3656	return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3657}
3658
3659
3660static void beiscsi_remove(struct pci_dev *pcidev)
3661{
3662	struct beiscsi_hba *phba = NULL;
3663	struct hwi_controller *phwi_ctrlr;
3664	struct hwi_context_memory *phwi_context;
3665	struct be_eq_obj *pbe_eq;
3666	unsigned int i, msix_vec;
3667
3668	phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3669	if (!phba) {
3670		dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
3671		return;
3672	}
3673
3674	phwi_ctrlr = phba->phwi_ctrlr;
3675	phwi_context = phwi_ctrlr->phwi_ctxt;
3676	hwi_disable_intr(phba);
3677	if (phba->msix_enabled) {
3678		for (i = 0; i <= phba->num_cpus; i++) {
3679			msix_vec = phba->msix_entries[i].vector;
3680			free_irq(msix_vec, &phwi_context->be_eq[i]);
3681		}
3682	} else
3683		if (phba->pcidev->irq)
3684			free_irq(phba->pcidev->irq, phba);
3685	pci_disable_msix(phba->pcidev);
3686	destroy_workqueue(phba->wq);
3687	if (blk_iopoll_enabled)
3688		for (i = 0; i < phba->num_cpus; i++) {
3689			pbe_eq = &phwi_context->be_eq[i];
3690			blk_iopoll_disable(&pbe_eq->iopoll);
3691		}
3692
3693	beiscsi_clean_port(phba);
3694	beiscsi_free_mem(phba);
3695	beiscsi_unmap_pci_function(phba);
3696	pci_free_consistent(phba->pcidev,
3697			    phba->ctrl.mbox_mem_alloced.size,
3698			    phba->ctrl.mbox_mem_alloced.va,
3699			    phba->ctrl.mbox_mem_alloced.dma);
3700	iscsi_host_remove(phba->shost);
3701	pci_dev_put(phba->pcidev);
3702	iscsi_host_free(phba->shost);
3703}
3704
3705static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3706{
3707	int i, status;
3708
3709	for (i = 0; i <= phba->num_cpus; i++)
3710		phba->msix_entries[i].entry = i;
3711
3712	status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3713				 (phba->num_cpus + 1));
3714	if (!status)
3715		phba->msix_enabled = true;
3716
3717	return;
3718}
3719
3720static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3721				const struct pci_device_id *id)
3722{
3723	struct beiscsi_hba *phba = NULL;
3724	struct hwi_controller *phwi_ctrlr;
3725	struct hwi_context_memory *phwi_context;
3726	struct be_eq_obj *pbe_eq;
3727	int ret, msix_vec, num_cpus, i;
3728
3729	ret = beiscsi_enable_pci(pcidev);
3730	if (ret < 0) {
3731		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3732			     "Failed to enable pci device \n");
3733		return ret;
3734	}
3735
3736	phba = beiscsi_hba_alloc(pcidev);
3737	if (!phba) {
3738		dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3739			" Failed in beiscsi_hba_alloc \n");
3740		goto disable_pci;
3741	}
3742	SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
3743
3744	if (enable_msix)
3745		num_cpus = find_num_cpus();
3746	else
3747		num_cpus = 1;
3748	phba->num_cpus = num_cpus;
3749	SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
3750
3751	if (enable_msix)
3752		beiscsi_msix_enable(phba);
3753	ret = be_ctrl_init(phba, pcidev);
3754	if (ret) {
3755		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3756				"Failed in be_ctrl_init\n");
3757		goto hba_free;
3758	}
3759
3760	spin_lock_init(&phba->io_sgl_lock);
3761	spin_lock_init(&phba->mgmt_sgl_lock);
3762	spin_lock_init(&phba->isr_lock);
3763	ret = mgmt_get_fw_config(&phba->ctrl, phba);
3764	if (ret != 0) {
3765		shost_printk(KERN_ERR, phba->shost,
3766			     "Error getting fw config\n");
3767		goto free_port;
3768	}
3769	phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3770	phba->shost->can_queue = phba->params.ios_per_ctrl;
3771	beiscsi_get_params(phba);
3772	ret = beiscsi_init_port(phba);
3773	if (ret < 0) {
3774		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3775			     "Failed in beiscsi_init_port\n");
3776		goto free_port;
3777	}
3778
3779	snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3780		 phba->shost->host_no);
3781	phba->wq = create_workqueue(phba->wq_name);
3782	if (!phba->wq) {
3783		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3784				"Failed to allocate work queue\n");
3785		goto free_twq;
3786	}
3787
3788	INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3789
3790	phwi_ctrlr = phba->phwi_ctrlr;
3791	phwi_context = phwi_ctrlr->phwi_ctxt;
3792	if (blk_iopoll_enabled) {
3793		for (i = 0; i < phba->num_cpus; i++) {
3794			pbe_eq = &phwi_context->be_eq[i];
3795			blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3796					be_iopoll);
3797			blk_iopoll_enable(&pbe_eq->iopoll);
3798		}
3799	}
3800	ret = beiscsi_init_irqs(phba);
3801	if (ret < 0) {
3802		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3803			     "Failed to beiscsi_init_irqs\n");
3804		goto free_blkenbld;
3805	}
3806	ret = hwi_enable_intr(phba);
3807	if (ret < 0) {
3808		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3809			     "Failed to hwi_enable_intr\n");
3810		goto free_ctrlr;
3811	}
3812	SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3813	return 0;
3814
3815free_ctrlr:
3816	if (phba->msix_enabled) {
3817		for (i = 0; i <= phba->num_cpus; i++) {
3818			msix_vec = phba->msix_entries[i].vector;
3819			free_irq(msix_vec, &phwi_context->be_eq[i]);
3820		}
3821	} else
3822		if (phba->pcidev->irq)
3823			free_irq(phba->pcidev->irq, phba);
3824	pci_disable_msix(phba->pcidev);
3825free_blkenbld:
3826	destroy_workqueue(phba->wq);
3827	if (blk_iopoll_enabled)
3828		for (i = 0; i < phba->num_cpus; i++) {
3829			pbe_eq = &phwi_context->be_eq[i];
3830			blk_iopoll_disable(&pbe_eq->iopoll);
3831		}
3832free_twq:
3833	beiscsi_clean_port(phba);
3834	beiscsi_free_mem(phba);
3835free_port:
3836	pci_free_consistent(phba->pcidev,
3837			    phba->ctrl.mbox_mem_alloced.size,
3838			    phba->ctrl.mbox_mem_alloced.va,
3839			   phba->ctrl.mbox_mem_alloced.dma);
3840	beiscsi_unmap_pci_function(phba);
3841hba_free:
3842	iscsi_host_remove(phba->shost);
3843	pci_dev_put(phba->pcidev);
3844	iscsi_host_free(phba->shost);
3845disable_pci:
3846	pci_disable_device(pcidev);
3847	return ret;
3848}
3849
3850struct iscsi_transport beiscsi_iscsi_transport = {
3851	.owner = THIS_MODULE,
3852	.name = DRV_NAME,
3853	.caps = CAP_RECOVERY_L0 | CAP_HDRDGST |
3854		CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3855	.param_mask = ISCSI_MAX_RECV_DLENGTH |
3856		ISCSI_MAX_XMIT_DLENGTH |
3857		ISCSI_HDRDGST_EN |
3858		ISCSI_DATADGST_EN |
3859		ISCSI_INITIAL_R2T_EN |
3860		ISCSI_MAX_R2T |
3861		ISCSI_IMM_DATA_EN |
3862		ISCSI_FIRST_BURST |
3863		ISCSI_MAX_BURST |
3864		ISCSI_PDU_INORDER_EN |
3865		ISCSI_DATASEQ_INORDER_EN |
3866		ISCSI_ERL |
3867		ISCSI_CONN_PORT |
3868		ISCSI_CONN_ADDRESS |
3869		ISCSI_EXP_STATSN |
3870		ISCSI_PERSISTENT_PORT |
3871		ISCSI_PERSISTENT_ADDRESS |
3872		ISCSI_TARGET_NAME | ISCSI_TPGT |
3873		ISCSI_USERNAME | ISCSI_PASSWORD |
3874		ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3875		ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3876		ISCSI_LU_RESET_TMO |
3877		ISCSI_PING_TMO | ISCSI_RECV_TMO |
3878		ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3879	.host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3880				ISCSI_HOST_INITIATOR_NAME,
3881	.create_session = beiscsi_session_create,
3882	.destroy_session = beiscsi_session_destroy,
3883	.create_conn = beiscsi_conn_create,
3884	.bind_conn = beiscsi_conn_bind,
3885	.destroy_conn = iscsi_conn_teardown,
3886	.set_param = beiscsi_set_param,
3887	.get_conn_param = beiscsi_conn_get_param,
3888	.get_session_param = iscsi_session_get_param,
3889	.get_host_param = beiscsi_get_host_param,
3890	.start_conn = beiscsi_conn_start,
3891	.stop_conn = beiscsi_conn_stop,
3892	.send_pdu = iscsi_conn_send_pdu,
3893	.xmit_task = beiscsi_task_xmit,
3894	.cleanup_task = beiscsi_cleanup_task,
3895	.alloc_pdu = beiscsi_alloc_pdu,
3896	.parse_pdu_itt = beiscsi_parse_pdu,
3897	.get_stats = beiscsi_conn_get_stats,
3898	.ep_connect = beiscsi_ep_connect,
3899	.ep_poll = beiscsi_ep_poll,
3900	.ep_disconnect = beiscsi_ep_disconnect,
3901	.session_recovery_timedout = iscsi_session_recovery_timedout,
3902};
3903
3904static struct pci_driver beiscsi_pci_driver = {
3905	.name = DRV_NAME,
3906	.probe = beiscsi_dev_probe,
3907	.remove = beiscsi_remove,
3908	.id_table = beiscsi_pci_id_table
3909};
3910
3911
3912static int __init beiscsi_module_init(void)
3913{
3914	int ret;
3915
3916	beiscsi_scsi_transport =
3917			iscsi_register_transport(&beiscsi_iscsi_transport);
3918	if (!beiscsi_scsi_transport) {
3919		SE_DEBUG(DBG_LVL_1,
3920			 "beiscsi_module_init - Unable to  register beiscsi"
3921			 "transport.\n");
3922		ret = -ENOMEM;
3923	}
3924	SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3925		 &beiscsi_iscsi_transport);
3926
3927	ret = pci_register_driver(&beiscsi_pci_driver);
3928	if (ret) {
3929		SE_DEBUG(DBG_LVL_1,
3930			 "beiscsi_module_init - Unable to  register"
3931			 "beiscsi pci driver.\n");
3932		goto unregister_iscsi_transport;
3933	}
3934	ring_mode = 0;
3935	return 0;
3936
3937unregister_iscsi_transport:
3938	iscsi_unregister_transport(&beiscsi_iscsi_transport);
3939	return ret;
3940}
3941
3942static void __exit beiscsi_module_exit(void)
3943{
3944	pci_unregister_driver(&beiscsi_pci_driver);
3945	iscsi_unregister_transport(&beiscsi_iscsi_transport);
3946}
3947
3948module_init(beiscsi_module_init);
3949module_exit(beiscsi_module_exit);
3950