be_main.c revision 82284c09c5dc5c5a5046f3c852f2683dab60109c
1/**
2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation.  The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11 *
12 * Contact Information:
13 * linux-drivers@serverengines.com
14 *
15 *  ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 *
19 */
20#include <linux/reboot.h>
21#include <linux/delay.h>
22#include <linux/slab.h>
23#include <linux/interrupt.h>
24#include <linux/blkdev.h>
25#include <linux/pci.h>
26#include <linux/string.h>
27#include <linux/kernel.h>
28#include <linux/semaphore.h>
29
30#include <scsi/libiscsi.h>
31#include <scsi/scsi_transport_iscsi.h>
32#include <scsi/scsi_transport.h>
33#include <scsi/scsi_cmnd.h>
34#include <scsi/scsi_device.h>
35#include <scsi/scsi_host.h>
36#include <scsi/scsi.h>
37#include "be_main.h"
38#include "be_iscsi.h"
39#include "be_mgmt.h"
40
41static unsigned int be_iopoll_budget = 10;
42static unsigned int be_max_phys_size = 64;
43static unsigned int enable_msix = 1;
44
45MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
46MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
47MODULE_AUTHOR("ServerEngines Corporation");
48MODULE_LICENSE("GPL");
49module_param(be_iopoll_budget, int, 0);
50module_param(enable_msix, int, 0);
51module_param(be_max_phys_size, uint, S_IRUGO);
52MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
53				   "contiguous memory that can be allocated."
54				   "Range is 16 - 128");
55
56static int beiscsi_slave_configure(struct scsi_device *sdev)
57{
58	blk_queue_max_segment_size(sdev->request_queue, 65536);
59	return 0;
60}
61
62static int beiscsi_eh_abort(struct scsi_cmnd *sc)
63{
64	struct iscsi_cls_session *cls_session;
65	struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
66	struct beiscsi_io_task *aborted_io_task;
67	struct iscsi_conn *conn;
68	struct beiscsi_conn *beiscsi_conn;
69	struct beiscsi_hba *phba;
70	struct iscsi_session *session;
71	struct invalidate_command_table *inv_tbl;
72	unsigned int cid, tag, num_invalidate;
73
74	cls_session = starget_to_session(scsi_target(sc->device));
75	session = cls_session->dd_data;
76
77	spin_lock_bh(&session->lock);
78	if (!aborted_task || !aborted_task->sc) {
79		/* we raced */
80		spin_unlock_bh(&session->lock);
81		return SUCCESS;
82	}
83
84	aborted_io_task = aborted_task->dd_data;
85	if (!aborted_io_task->scsi_cmnd) {
86		/* raced or invalid command */
87		spin_unlock_bh(&session->lock);
88		return SUCCESS;
89	}
90	spin_unlock_bh(&session->lock);
91	conn = aborted_task->conn;
92	beiscsi_conn = conn->dd_data;
93	phba = beiscsi_conn->phba;
94
95	/* invalidate iocb */
96	cid = beiscsi_conn->beiscsi_conn_cid;
97	inv_tbl = phba->inv_tbl;
98	memset(inv_tbl, 0x0, sizeof(*inv_tbl));
99	inv_tbl->cid = cid;
100	inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
101	num_invalidate = 1;
102	tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
103	if (!tag) {
104		shost_printk(KERN_WARNING, phba->shost,
105			     "mgmt_invalidate_icds could not be"
106			     " submitted\n");
107		return FAILED;
108	} else {
109		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
110					 phba->ctrl.mcc_numtag[tag]);
111		free_mcc_tag(&phba->ctrl, tag);
112	}
113
114	return iscsi_eh_abort(sc);
115}
116
117static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
118{
119	struct iscsi_task *abrt_task;
120	struct beiscsi_io_task *abrt_io_task;
121	struct iscsi_conn *conn;
122	struct beiscsi_conn *beiscsi_conn;
123	struct beiscsi_hba *phba;
124	struct iscsi_session *session;
125	struct iscsi_cls_session *cls_session;
126	struct invalidate_command_table *inv_tbl;
127	unsigned int cid, tag, i, num_invalidate;
128	int rc = FAILED;
129
130	/* invalidate iocbs */
131	cls_session = starget_to_session(scsi_target(sc->device));
132	session = cls_session->dd_data;
133	spin_lock_bh(&session->lock);
134	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
135		goto unlock;
136
137	conn = session->leadconn;
138	beiscsi_conn = conn->dd_data;
139	phba = beiscsi_conn->phba;
140	cid = beiscsi_conn->beiscsi_conn_cid;
141	inv_tbl = phba->inv_tbl;
142	memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
143	num_invalidate = 0;
144	for (i = 0; i < conn->session->cmds_max; i++) {
145		abrt_task = conn->session->cmds[i];
146		abrt_io_task = abrt_task->dd_data;
147		if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
148			continue;
149
150		if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
151			continue;
152
153		inv_tbl->cid = cid;
154		inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
155		num_invalidate++;
156		inv_tbl++;
157	}
158	spin_unlock_bh(&session->lock);
159	inv_tbl = phba->inv_tbl;
160
161	tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
162	if (!tag) {
163		shost_printk(KERN_WARNING, phba->shost,
164			     "mgmt_invalidate_icds could not be"
165			     " submitted\n");
166		return FAILED;
167	} else {
168		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
169					 phba->ctrl.mcc_numtag[tag]);
170		free_mcc_tag(&phba->ctrl, tag);
171	}
172
173	return iscsi_eh_device_reset(sc);
174unlock:
175	spin_unlock_bh(&session->lock);
176	return rc;
177}
178
179/*------------------- PCI Driver operations and data ----------------- */
180static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
181	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
182	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
183	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
184	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
185	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
186	{ 0 }
187};
188MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
189
190static struct scsi_host_template beiscsi_sht = {
191	.module = THIS_MODULE,
192	.name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
193	.proc_name = DRV_NAME,
194	.queuecommand = iscsi_queuecommand,
195	.change_queue_depth = iscsi_change_queue_depth,
196	.slave_configure = beiscsi_slave_configure,
197	.target_alloc = iscsi_target_alloc,
198	.eh_abort_handler = beiscsi_eh_abort,
199	.eh_device_reset_handler = beiscsi_eh_device_reset,
200	.eh_target_reset_handler = iscsi_eh_session_reset,
201	.sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
202	.can_queue = BE2_IO_DEPTH,
203	.this_id = -1,
204	.max_sectors = BEISCSI_MAX_SECTORS,
205	.cmd_per_lun = BEISCSI_CMD_PER_LUN,
206	.use_clustering = ENABLE_CLUSTERING,
207};
208
209static struct scsi_transport_template *beiscsi_scsi_transport;
210
211static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
212{
213	struct beiscsi_hba *phba;
214	struct Scsi_Host *shost;
215
216	shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
217	if (!shost) {
218		dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
219			"iscsi_host_alloc failed \n");
220		return NULL;
221	}
222	shost->dma_boundary = pcidev->dma_mask;
223	shost->max_id = BE2_MAX_SESSIONS;
224	shost->max_channel = 0;
225	shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
226	shost->max_lun = BEISCSI_NUM_MAX_LUN;
227	shost->transportt = beiscsi_scsi_transport;
228	phba = iscsi_host_priv(shost);
229	memset(phba, 0, sizeof(*phba));
230	phba->shost = shost;
231	phba->pcidev = pci_dev_get(pcidev);
232	pci_set_drvdata(pcidev, phba);
233
234	if (iscsi_host_add(shost, &phba->pcidev->dev))
235		goto free_devices;
236	return phba;
237
238free_devices:
239	pci_dev_put(phba->pcidev);
240	iscsi_host_free(phba->shost);
241	return NULL;
242}
243
244static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
245{
246	if (phba->csr_va) {
247		iounmap(phba->csr_va);
248		phba->csr_va = NULL;
249	}
250	if (phba->db_va) {
251		iounmap(phba->db_va);
252		phba->db_va = NULL;
253	}
254	if (phba->pci_va) {
255		iounmap(phba->pci_va);
256		phba->pci_va = NULL;
257	}
258}
259
260static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
261				struct pci_dev *pcidev)
262{
263	u8 __iomem *addr;
264	int pcicfg_reg;
265
266	addr = ioremap_nocache(pci_resource_start(pcidev, 2),
267			       pci_resource_len(pcidev, 2));
268	if (addr == NULL)
269		return -ENOMEM;
270	phba->ctrl.csr = addr;
271	phba->csr_va = addr;
272	phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
273
274	addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
275	if (addr == NULL)
276		goto pci_map_err;
277	phba->ctrl.db = addr;
278	phba->db_va = addr;
279	phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
280
281	if (phba->generation == BE_GEN2)
282		pcicfg_reg = 1;
283	else
284		pcicfg_reg = 0;
285
286	addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
287			       pci_resource_len(pcidev, pcicfg_reg));
288
289	if (addr == NULL)
290		goto pci_map_err;
291	phba->ctrl.pcicfg = addr;
292	phba->pci_va = addr;
293	phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
294	return 0;
295
296pci_map_err:
297	beiscsi_unmap_pci_function(phba);
298	return -ENOMEM;
299}
300
301static int beiscsi_enable_pci(struct pci_dev *pcidev)
302{
303	int ret;
304
305	ret = pci_enable_device(pcidev);
306	if (ret) {
307		dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
308			"failed. Returning -ENODEV\n");
309		return ret;
310	}
311
312	pci_set_master(pcidev);
313	if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
314		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
315		if (ret) {
316			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
317			pci_disable_device(pcidev);
318			return ret;
319		}
320	}
321	return 0;
322}
323
324static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
325{
326	struct be_ctrl_info *ctrl = &phba->ctrl;
327	struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
328	struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
329	int status = 0;
330
331	ctrl->pdev = pdev;
332	status = beiscsi_map_pci_bars(phba, pdev);
333	if (status)
334		return status;
335	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
336	mbox_mem_alloc->va = pci_alloc_consistent(pdev,
337						  mbox_mem_alloc->size,
338						  &mbox_mem_alloc->dma);
339	if (!mbox_mem_alloc->va) {
340		beiscsi_unmap_pci_function(phba);
341		status = -ENOMEM;
342		return status;
343	}
344
345	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
346	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
347	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
348	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
349	spin_lock_init(&ctrl->mbox_lock);
350	spin_lock_init(&phba->ctrl.mcc_lock);
351	spin_lock_init(&phba->ctrl.mcc_cq_lock);
352
353	return status;
354}
355
356static void beiscsi_get_params(struct beiscsi_hba *phba)
357{
358	phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
359				    - (phba->fw_config.iscsi_cid_count
360				    + BE2_TMFS
361				    + BE2_NOPOUT_REQ));
362	phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
363	phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
364	phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
365	phba->params.num_sge_per_io = BE2_SGE;
366	phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
367	phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
368	phba->params.eq_timer = 64;
369	phba->params.num_eq_entries =
370	    (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
371				    + BE2_TMFS) / 512) + 1) * 512;
372	phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
373				? 1024 : phba->params.num_eq_entries;
374	SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
375			     phba->params.num_eq_entries);
376	phba->params.num_cq_entries =
377	    (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
378				    + BE2_TMFS) / 512) + 1) * 512;
379	phba->params.wrbs_per_cxn = 256;
380}
381
382static void hwi_ring_eq_db(struct beiscsi_hba *phba,
383			   unsigned int id, unsigned int clr_interrupt,
384			   unsigned int num_processed,
385			   unsigned char rearm, unsigned char event)
386{
387	u32 val = 0;
388	val |= id & DB_EQ_RING_ID_MASK;
389	if (rearm)
390		val |= 1 << DB_EQ_REARM_SHIFT;
391	if (clr_interrupt)
392		val |= 1 << DB_EQ_CLR_SHIFT;
393	if (event)
394		val |= 1 << DB_EQ_EVNT_SHIFT;
395	val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
396	iowrite32(val, phba->db_va + DB_EQ_OFFSET);
397}
398
399/**
400 * be_isr_mcc - The isr routine of the driver.
401 * @irq: Not used
402 * @dev_id: Pointer to host adapter structure
403 */
404static irqreturn_t be_isr_mcc(int irq, void *dev_id)
405{
406	struct beiscsi_hba *phba;
407	struct be_eq_entry *eqe = NULL;
408	struct be_queue_info *eq;
409	struct be_queue_info *mcc;
410	unsigned int num_eq_processed;
411	struct be_eq_obj *pbe_eq;
412	unsigned long flags;
413
414	pbe_eq = dev_id;
415	eq = &pbe_eq->q;
416	phba =  pbe_eq->phba;
417	mcc = &phba->ctrl.mcc_obj.cq;
418	eqe = queue_tail_node(eq);
419	if (!eqe)
420		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
421
422	num_eq_processed = 0;
423
424	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
425				& EQE_VALID_MASK) {
426		if (((eqe->dw[offsetof(struct amap_eq_entry,
427		     resource_id) / 32] &
428		     EQE_RESID_MASK) >> 16) == mcc->id) {
429			spin_lock_irqsave(&phba->isr_lock, flags);
430			phba->todo_mcc_cq = 1;
431			spin_unlock_irqrestore(&phba->isr_lock, flags);
432		}
433		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
434		queue_tail_inc(eq);
435		eqe = queue_tail_node(eq);
436		num_eq_processed++;
437	}
438	if (phba->todo_mcc_cq)
439		queue_work(phba->wq, &phba->work_cqs);
440	if (num_eq_processed)
441		hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 1, 1);
442
443	return IRQ_HANDLED;
444}
445
446/**
447 * be_isr_msix - The isr routine of the driver.
448 * @irq: Not used
449 * @dev_id: Pointer to host adapter structure
450 */
451static irqreturn_t be_isr_msix(int irq, void *dev_id)
452{
453	struct beiscsi_hba *phba;
454	struct be_eq_entry *eqe = NULL;
455	struct be_queue_info *eq;
456	struct be_queue_info *cq;
457	unsigned int num_eq_processed;
458	struct be_eq_obj *pbe_eq;
459	unsigned long flags;
460
461	pbe_eq = dev_id;
462	eq = &pbe_eq->q;
463	cq = pbe_eq->cq;
464	eqe = queue_tail_node(eq);
465	if (!eqe)
466		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
467
468	phba = pbe_eq->phba;
469	num_eq_processed = 0;
470	if (blk_iopoll_enabled) {
471		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
472					& EQE_VALID_MASK) {
473			if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
474				blk_iopoll_sched(&pbe_eq->iopoll);
475
476			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
477			queue_tail_inc(eq);
478			eqe = queue_tail_node(eq);
479			num_eq_processed++;
480		}
481		if (num_eq_processed)
482			hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 0, 1);
483
484		return IRQ_HANDLED;
485	} else {
486		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
487						& EQE_VALID_MASK) {
488			spin_lock_irqsave(&phba->isr_lock, flags);
489			phba->todo_cq = 1;
490			spin_unlock_irqrestore(&phba->isr_lock, flags);
491			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
492			queue_tail_inc(eq);
493			eqe = queue_tail_node(eq);
494			num_eq_processed++;
495		}
496		if (phba->todo_cq)
497			queue_work(phba->wq, &phba->work_cqs);
498
499		if (num_eq_processed)
500			hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
501
502		return IRQ_HANDLED;
503	}
504}
505
506/**
507 * be_isr - The isr routine of the driver.
508 * @irq: Not used
509 * @dev_id: Pointer to host adapter structure
510 */
511static irqreturn_t be_isr(int irq, void *dev_id)
512{
513	struct beiscsi_hba *phba;
514	struct hwi_controller *phwi_ctrlr;
515	struct hwi_context_memory *phwi_context;
516	struct be_eq_entry *eqe = NULL;
517	struct be_queue_info *eq;
518	struct be_queue_info *cq;
519	struct be_queue_info *mcc;
520	unsigned long flags, index;
521	unsigned int num_mcceq_processed, num_ioeq_processed;
522	struct be_ctrl_info *ctrl;
523	struct be_eq_obj *pbe_eq;
524	int isr;
525
526	phba = dev_id;
527	ctrl = &phba->ctrl;;
528	isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
529		       (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
530	if (!isr)
531		return IRQ_NONE;
532
533	phwi_ctrlr = phba->phwi_ctrlr;
534	phwi_context = phwi_ctrlr->phwi_ctxt;
535	pbe_eq = &phwi_context->be_eq[0];
536
537	eq = &phwi_context->be_eq[0].q;
538	mcc = &phba->ctrl.mcc_obj.cq;
539	index = 0;
540	eqe = queue_tail_node(eq);
541	if (!eqe)
542		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
543
544	num_ioeq_processed = 0;
545	num_mcceq_processed = 0;
546	if (blk_iopoll_enabled) {
547		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
548					& EQE_VALID_MASK) {
549			if (((eqe->dw[offsetof(struct amap_eq_entry,
550			     resource_id) / 32] &
551			     EQE_RESID_MASK) >> 16) == mcc->id) {
552				spin_lock_irqsave(&phba->isr_lock, flags);
553				phba->todo_mcc_cq = 1;
554				spin_unlock_irqrestore(&phba->isr_lock, flags);
555				num_mcceq_processed++;
556			} else {
557				if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
558					blk_iopoll_sched(&pbe_eq->iopoll);
559				num_ioeq_processed++;
560			}
561			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
562			queue_tail_inc(eq);
563			eqe = queue_tail_node(eq);
564		}
565		if (num_ioeq_processed || num_mcceq_processed) {
566			if (phba->todo_mcc_cq)
567				queue_work(phba->wq, &phba->work_cqs);
568
569			if ((num_mcceq_processed) && (!num_ioeq_processed))
570				hwi_ring_eq_db(phba, eq->id, 0,
571					      (num_ioeq_processed +
572					       num_mcceq_processed) , 1, 1);
573			else
574				hwi_ring_eq_db(phba, eq->id, 0,
575					       (num_ioeq_processed +
576						num_mcceq_processed), 0, 1);
577
578			return IRQ_HANDLED;
579		} else
580			return IRQ_NONE;
581	} else {
582		cq = &phwi_context->be_cq[0];
583		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
584						& EQE_VALID_MASK) {
585
586			if (((eqe->dw[offsetof(struct amap_eq_entry,
587			     resource_id) / 32] &
588			     EQE_RESID_MASK) >> 16) != cq->id) {
589				spin_lock_irqsave(&phba->isr_lock, flags);
590				phba->todo_mcc_cq = 1;
591				spin_unlock_irqrestore(&phba->isr_lock, flags);
592			} else {
593				spin_lock_irqsave(&phba->isr_lock, flags);
594				phba->todo_cq = 1;
595				spin_unlock_irqrestore(&phba->isr_lock, flags);
596			}
597			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
598			queue_tail_inc(eq);
599			eqe = queue_tail_node(eq);
600			num_ioeq_processed++;
601		}
602		if (phba->todo_cq || phba->todo_mcc_cq)
603			queue_work(phba->wq, &phba->work_cqs);
604
605		if (num_ioeq_processed) {
606			hwi_ring_eq_db(phba, eq->id, 0,
607				       num_ioeq_processed, 1, 1);
608			return IRQ_HANDLED;
609		} else
610			return IRQ_NONE;
611	}
612}
613
614static int beiscsi_init_irqs(struct beiscsi_hba *phba)
615{
616	struct pci_dev *pcidev = phba->pcidev;
617	struct hwi_controller *phwi_ctrlr;
618	struct hwi_context_memory *phwi_context;
619	int ret, msix_vec, i = 0;
620	char desc[32];
621
622	phwi_ctrlr = phba->phwi_ctrlr;
623	phwi_context = phwi_ctrlr->phwi_ctxt;
624
625	if (phba->msix_enabled) {
626		for (i = 0; i < phba->num_cpus; i++) {
627			sprintf(desc, "beiscsi_msix_%04x", i);
628			msix_vec = phba->msix_entries[i].vector;
629			ret = request_irq(msix_vec, be_isr_msix, 0, desc,
630					  &phwi_context->be_eq[i]);
631		}
632		msix_vec = phba->msix_entries[i].vector;
633		ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
634				  &phwi_context->be_eq[i]);
635	} else {
636		ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
637				  "beiscsi", phba);
638		if (ret) {
639			shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
640				     "Failed to register irq\\n");
641			return ret;
642		}
643	}
644	return 0;
645}
646
647static void hwi_ring_cq_db(struct beiscsi_hba *phba,
648			   unsigned int id, unsigned int num_processed,
649			   unsigned char rearm, unsigned char event)
650{
651	u32 val = 0;
652	val |= id & DB_CQ_RING_ID_MASK;
653	if (rearm)
654		val |= 1 << DB_CQ_REARM_SHIFT;
655	val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
656	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
657}
658
659static unsigned int
660beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
661			  struct beiscsi_hba *phba,
662			  unsigned short cid,
663			  struct pdu_base *ppdu,
664			  unsigned long pdu_len,
665			  void *pbuffer, unsigned long buf_len)
666{
667	struct iscsi_conn *conn = beiscsi_conn->conn;
668	struct iscsi_session *session = conn->session;
669	struct iscsi_task *task;
670	struct beiscsi_io_task *io_task;
671	struct iscsi_hdr *login_hdr;
672
673	switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
674						PDUBASE_OPCODE_MASK) {
675	case ISCSI_OP_NOOP_IN:
676		pbuffer = NULL;
677		buf_len = 0;
678		break;
679	case ISCSI_OP_ASYNC_EVENT:
680		break;
681	case ISCSI_OP_REJECT:
682		WARN_ON(!pbuffer);
683		WARN_ON(!(buf_len == 48));
684		SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
685		break;
686	case ISCSI_OP_LOGIN_RSP:
687	case ISCSI_OP_TEXT_RSP:
688		task = conn->login_task;
689		io_task = task->dd_data;
690		login_hdr = (struct iscsi_hdr *)ppdu;
691		login_hdr->itt = io_task->libiscsi_itt;
692		break;
693	default:
694		shost_printk(KERN_WARNING, phba->shost,
695			     "Unrecognized opcode 0x%x in async msg \n",
696			     (ppdu->
697			     dw[offsetof(struct amap_pdu_base, opcode) / 32]
698						& PDUBASE_OPCODE_MASK));
699		return 1;
700	}
701
702	spin_lock_bh(&session->lock);
703	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
704	spin_unlock_bh(&session->lock);
705	return 0;
706}
707
708static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
709{
710	struct sgl_handle *psgl_handle;
711
712	if (phba->io_sgl_hndl_avbl) {
713		SE_DEBUG(DBG_LVL_8,
714			 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
715			 phba->io_sgl_alloc_index);
716		psgl_handle = phba->io_sgl_hndl_base[phba->
717						io_sgl_alloc_index];
718		phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
719		phba->io_sgl_hndl_avbl--;
720		if (phba->io_sgl_alloc_index == (phba->params.
721						 ios_per_ctrl - 1))
722			phba->io_sgl_alloc_index = 0;
723		else
724			phba->io_sgl_alloc_index++;
725	} else
726		psgl_handle = NULL;
727	return psgl_handle;
728}
729
730static void
731free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
732{
733	SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
734		 phba->io_sgl_free_index);
735	if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
736		/*
737		 * this can happen if clean_task is called on a task that
738		 * failed in xmit_task or alloc_pdu.
739		 */
740		 SE_DEBUG(DBG_LVL_8,
741			 "Double Free in IO SGL io_sgl_free_index=%d,"
742			 "value there=%p \n", phba->io_sgl_free_index,
743			 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
744		return;
745	}
746	phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
747	phba->io_sgl_hndl_avbl++;
748	if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
749		phba->io_sgl_free_index = 0;
750	else
751		phba->io_sgl_free_index++;
752}
753
754/**
755 * alloc_wrb_handle - To allocate a wrb handle
756 * @phba: The hba pointer
757 * @cid: The cid to use for allocation
758 *
759 * This happens under session_lock until submission to chip
760 */
761struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
762{
763	struct hwi_wrb_context *pwrb_context;
764	struct hwi_controller *phwi_ctrlr;
765	struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
766
767	phwi_ctrlr = phba->phwi_ctrlr;
768	pwrb_context = &phwi_ctrlr->wrb_context[cid];
769	if (pwrb_context->wrb_handles_available >= 2) {
770		pwrb_handle = pwrb_context->pwrb_handle_base[
771					    pwrb_context->alloc_index];
772		pwrb_context->wrb_handles_available--;
773		if (pwrb_context->alloc_index ==
774						(phba->params.wrbs_per_cxn - 1))
775			pwrb_context->alloc_index = 0;
776		else
777			pwrb_context->alloc_index++;
778		pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
779						pwrb_context->alloc_index];
780		pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
781	} else
782		pwrb_handle = NULL;
783	return pwrb_handle;
784}
785
786/**
787 * free_wrb_handle - To free the wrb handle back to pool
788 * @phba: The hba pointer
789 * @pwrb_context: The context to free from
790 * @pwrb_handle: The wrb_handle to free
791 *
792 * This happens under session_lock until submission to chip
793 */
794static void
795free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
796		struct wrb_handle *pwrb_handle)
797{
798	pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
799	pwrb_context->wrb_handles_available++;
800	if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
801		pwrb_context->free_index = 0;
802	else
803		pwrb_context->free_index++;
804
805	SE_DEBUG(DBG_LVL_8,
806		 "FREE WRB: pwrb_handle=%p free_index=0x%x"
807		 "wrb_handles_available=%d \n",
808		 pwrb_handle, pwrb_context->free_index,
809		 pwrb_context->wrb_handles_available);
810}
811
812static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
813{
814	struct sgl_handle *psgl_handle;
815
816	if (phba->eh_sgl_hndl_avbl) {
817		psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
818		phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
819		SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
820			 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
821		phba->eh_sgl_hndl_avbl--;
822		if (phba->eh_sgl_alloc_index ==
823		    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
824		     1))
825			phba->eh_sgl_alloc_index = 0;
826		else
827			phba->eh_sgl_alloc_index++;
828	} else
829		psgl_handle = NULL;
830	return psgl_handle;
831}
832
833void
834free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
835{
836
837	SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
838			     phba->eh_sgl_free_index);
839	if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
840		/*
841		 * this can happen if clean_task is called on a task that
842		 * failed in xmit_task or alloc_pdu.
843		 */
844		SE_DEBUG(DBG_LVL_8,
845			 "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
846			 phba->eh_sgl_free_index);
847		return;
848	}
849	phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
850	phba->eh_sgl_hndl_avbl++;
851	if (phba->eh_sgl_free_index ==
852	    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
853		phba->eh_sgl_free_index = 0;
854	else
855		phba->eh_sgl_free_index++;
856}
857
858static void
859be_complete_io(struct beiscsi_conn *beiscsi_conn,
860	       struct iscsi_task *task, struct sol_cqe *psol)
861{
862	struct beiscsi_io_task *io_task = task->dd_data;
863	struct be_status_bhs *sts_bhs =
864				(struct be_status_bhs *)io_task->cmd_bhs;
865	struct iscsi_conn *conn = beiscsi_conn->conn;
866	unsigned int sense_len;
867	unsigned char *sense;
868	u32 resid = 0, exp_cmdsn, max_cmdsn;
869	u8 rsp, status, flags;
870
871	exp_cmdsn = (psol->
872			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
873			& SOL_EXP_CMD_SN_MASK);
874	max_cmdsn = ((psol->
875			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
876			& SOL_EXP_CMD_SN_MASK) +
877			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
878				/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
879	rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
880						& SOL_RESP_MASK) >> 16);
881	status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
882						& SOL_STS_MASK) >> 8);
883	flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
884					& SOL_FLAGS_MASK) >> 24) | 0x80;
885
886	task->sc->result = (DID_OK << 16) | status;
887	if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
888		task->sc->result = DID_ERROR << 16;
889		goto unmap;
890	}
891
892	/* bidi not initially supported */
893	if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
894		resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
895				32] & SOL_RES_CNT_MASK);
896
897		if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
898			task->sc->result = DID_ERROR << 16;
899
900		if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
901			scsi_set_resid(task->sc, resid);
902			if (!status && (scsi_bufflen(task->sc) - resid <
903			    task->sc->underflow))
904				task->sc->result = DID_ERROR << 16;
905		}
906	}
907
908	if (status == SAM_STAT_CHECK_CONDITION) {
909		unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
910		sense = sts_bhs->sense_info + sizeof(unsigned short);
911		sense_len =  cpu_to_be16(*slen);
912		memcpy(task->sc->sense_buffer, sense,
913		       min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
914	}
915
916	if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
917		if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
918							& SOL_RES_CNT_MASK)
919			 conn->rxdata_octets += (psol->
920			     dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
921			     & SOL_RES_CNT_MASK);
922	}
923unmap:
924	scsi_dma_unmap(io_task->scsi_cmnd);
925	iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
926}
927
928static void
929be_complete_logout(struct beiscsi_conn *beiscsi_conn,
930		   struct iscsi_task *task, struct sol_cqe *psol)
931{
932	struct iscsi_logout_rsp *hdr;
933	struct beiscsi_io_task *io_task = task->dd_data;
934	struct iscsi_conn *conn = beiscsi_conn->conn;
935
936	hdr = (struct iscsi_logout_rsp *)task->hdr;
937	hdr->opcode = ISCSI_OP_LOGOUT_RSP;
938	hdr->t2wait = 5;
939	hdr->t2retain = 0;
940	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
941					& SOL_FLAGS_MASK) >> 24) | 0x80;
942	hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
943					32] & SOL_RESP_MASK);
944	hdr->exp_cmdsn = cpu_to_be32(psol->
945			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
946					& SOL_EXP_CMD_SN_MASK);
947	hdr->max_cmdsn = be32_to_cpu((psol->
948			 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
949					& SOL_EXP_CMD_SN_MASK) +
950			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
951					/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
952	hdr->dlength[0] = 0;
953	hdr->dlength[1] = 0;
954	hdr->dlength[2] = 0;
955	hdr->hlength = 0;
956	hdr->itt = io_task->libiscsi_itt;
957	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
958}
959
960static void
961be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
962		struct iscsi_task *task, struct sol_cqe *psol)
963{
964	struct iscsi_tm_rsp *hdr;
965	struct iscsi_conn *conn = beiscsi_conn->conn;
966	struct beiscsi_io_task *io_task = task->dd_data;
967
968	hdr = (struct iscsi_tm_rsp *)task->hdr;
969	hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
970	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
971					& SOL_FLAGS_MASK) >> 24) | 0x80;
972	hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
973					32] & SOL_RESP_MASK);
974	hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
975				    i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
976	hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
977			i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
978			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
979			/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
980	hdr->itt = io_task->libiscsi_itt;
981	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
982}
983
984static void
985hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
986		       struct beiscsi_hba *phba, struct sol_cqe *psol)
987{
988	struct hwi_wrb_context *pwrb_context;
989	struct wrb_handle *pwrb_handle = NULL;
990	struct hwi_controller *phwi_ctrlr;
991	struct iscsi_task *task;
992	struct beiscsi_io_task *io_task;
993	struct iscsi_conn *conn = beiscsi_conn->conn;
994	struct iscsi_session *session = conn->session;
995
996	phwi_ctrlr = phba->phwi_ctrlr;
997	pwrb_context = &phwi_ctrlr->wrb_context[((psol->
998				dw[offsetof(struct amap_sol_cqe, cid) / 32] &
999				SOL_CID_MASK) >> 6) -
1000				phba->fw_config.iscsi_cid_start];
1001	pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1002				dw[offsetof(struct amap_sol_cqe, wrb_index) /
1003				32] & SOL_WRB_INDEX_MASK) >> 16)];
1004	task = pwrb_handle->pio_handle;
1005
1006	io_task = task->dd_data;
1007	spin_lock(&phba->mgmt_sgl_lock);
1008	free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1009	spin_unlock(&phba->mgmt_sgl_lock);
1010	spin_lock_bh(&session->lock);
1011	free_wrb_handle(phba, pwrb_context, pwrb_handle);
1012	spin_unlock_bh(&session->lock);
1013}
1014
1015static void
1016be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1017		       struct iscsi_task *task, struct sol_cqe *psol)
1018{
1019	struct iscsi_nopin *hdr;
1020	struct iscsi_conn *conn = beiscsi_conn->conn;
1021	struct beiscsi_io_task *io_task = task->dd_data;
1022
1023	hdr = (struct iscsi_nopin *)task->hdr;
1024	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1025			& SOL_FLAGS_MASK) >> 24) | 0x80;
1026	hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1027				     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1028	hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1029			i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1030			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1031			/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1032	hdr->opcode = ISCSI_OP_NOOP_IN;
1033	hdr->itt = io_task->libiscsi_itt;
1034	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1035}
1036
1037static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1038			     struct beiscsi_hba *phba, struct sol_cqe *psol)
1039{
1040	struct hwi_wrb_context *pwrb_context;
1041	struct wrb_handle *pwrb_handle;
1042	struct iscsi_wrb *pwrb = NULL;
1043	struct hwi_controller *phwi_ctrlr;
1044	struct iscsi_task *task;
1045	unsigned int type;
1046	struct iscsi_conn *conn = beiscsi_conn->conn;
1047	struct iscsi_session *session = conn->session;
1048
1049	phwi_ctrlr = phba->phwi_ctrlr;
1050	pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
1051				(struct amap_sol_cqe, cid) / 32]
1052				& SOL_CID_MASK) >> 6) -
1053				phba->fw_config.iscsi_cid_start];
1054	pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1055				dw[offsetof(struct amap_sol_cqe, wrb_index) /
1056				32] & SOL_WRB_INDEX_MASK) >> 16)];
1057	task = pwrb_handle->pio_handle;
1058	pwrb = pwrb_handle->pwrb;
1059	type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1060				 WRB_TYPE_MASK) >> 28;
1061
1062	spin_lock_bh(&session->lock);
1063	switch (type) {
1064	case HWH_TYPE_IO:
1065	case HWH_TYPE_IO_RD:
1066		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1067		     ISCSI_OP_NOOP_OUT)
1068			be_complete_nopin_resp(beiscsi_conn, task, psol);
1069		else
1070			be_complete_io(beiscsi_conn, task, psol);
1071		break;
1072
1073	case HWH_TYPE_LOGOUT:
1074		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1075			be_complete_logout(beiscsi_conn, task, psol);
1076		else
1077			be_complete_tmf(beiscsi_conn, task, psol);
1078
1079		break;
1080
1081	case HWH_TYPE_LOGIN:
1082		SE_DEBUG(DBG_LVL_1,
1083			 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
1084			 "- Solicited path \n");
1085		break;
1086
1087	case HWH_TYPE_NOP:
1088		be_complete_nopin_resp(beiscsi_conn, task, psol);
1089		break;
1090
1091	default:
1092		shost_printk(KERN_WARNING, phba->shost,
1093				"In hwi_complete_cmd, unknown type = %d"
1094				"wrb_index 0x%x CID 0x%x\n", type,
1095				((psol->dw[offsetof(struct amap_iscsi_wrb,
1096				type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1097				((psol->dw[offsetof(struct amap_sol_cqe,
1098				cid) / 32] & SOL_CID_MASK) >> 6));
1099		break;
1100	}
1101
1102	spin_unlock_bh(&session->lock);
1103}
1104
1105static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1106					  *pasync_ctx, unsigned int is_header,
1107					  unsigned int host_write_ptr)
1108{
1109	if (is_header)
1110		return &pasync_ctx->async_entry[host_write_ptr].
1111		    header_busy_list;
1112	else
1113		return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1114}
1115
1116static struct async_pdu_handle *
1117hwi_get_async_handle(struct beiscsi_hba *phba,
1118		     struct beiscsi_conn *beiscsi_conn,
1119		     struct hwi_async_pdu_context *pasync_ctx,
1120		     struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1121{
1122	struct be_bus_address phys_addr;
1123	struct list_head *pbusy_list;
1124	struct async_pdu_handle *pasync_handle = NULL;
1125	int buffer_len = 0;
1126	unsigned char buffer_index = -1;
1127	unsigned char is_header = 0;
1128
1129	phys_addr.u.a32.address_lo =
1130	    pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1131	    ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1132						& PDUCQE_DPL_MASK) >> 16);
1133	phys_addr.u.a32.address_hi =
1134	    pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1135
1136	phys_addr.u.a64.address =
1137			*((unsigned long long *)(&phys_addr.u.a64.address));
1138
1139	switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1140			& PDUCQE_CODE_MASK) {
1141	case UNSOL_HDR_NOTIFY:
1142		is_header = 1;
1143
1144		pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1145			(pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1146			index) / 32] & PDUCQE_INDEX_MASK));
1147
1148		buffer_len = (unsigned int)(phys_addr.u.a64.address -
1149				pasync_ctx->async_header.pa_base.u.a64.address);
1150
1151		buffer_index = buffer_len /
1152				pasync_ctx->async_header.buffer_size;
1153
1154		break;
1155	case UNSOL_DATA_NOTIFY:
1156		pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1157					dw[offsetof(struct amap_i_t_dpdu_cqe,
1158					index) / 32] & PDUCQE_INDEX_MASK));
1159		buffer_len = (unsigned long)(phys_addr.u.a64.address -
1160					pasync_ctx->async_data.pa_base.u.
1161					a64.address);
1162		buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1163		break;
1164	default:
1165		pbusy_list = NULL;
1166		shost_printk(KERN_WARNING, phba->shost,
1167			"Unexpected code=%d \n",
1168			 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1169					code) / 32] & PDUCQE_CODE_MASK);
1170		return NULL;
1171	}
1172
1173	WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1174	WARN_ON(list_empty(pbusy_list));
1175	list_for_each_entry(pasync_handle, pbusy_list, link) {
1176		WARN_ON(pasync_handle->consumed);
1177		if (pasync_handle->index == buffer_index)
1178			break;
1179	}
1180
1181	WARN_ON(!pasync_handle);
1182
1183	pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1184					     phba->fw_config.iscsi_cid_start;
1185	pasync_handle->is_header = is_header;
1186	pasync_handle->buffer_len = ((pdpdu_cqe->
1187			dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1188			& PDUCQE_DPL_MASK) >> 16);
1189
1190	*pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1191			index) / 32] & PDUCQE_INDEX_MASK);
1192	return pasync_handle;
1193}
1194
1195static unsigned int
1196hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1197			   unsigned int is_header, unsigned int cq_index)
1198{
1199	struct list_head *pbusy_list;
1200	struct async_pdu_handle *pasync_handle;
1201	unsigned int num_entries, writables = 0;
1202	unsigned int *pep_read_ptr, *pwritables;
1203
1204
1205	if (is_header) {
1206		pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1207		pwritables = &pasync_ctx->async_header.writables;
1208		num_entries = pasync_ctx->async_header.num_entries;
1209	} else {
1210		pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1211		pwritables = &pasync_ctx->async_data.writables;
1212		num_entries = pasync_ctx->async_data.num_entries;
1213	}
1214
1215	while ((*pep_read_ptr) != cq_index) {
1216		(*pep_read_ptr)++;
1217		*pep_read_ptr = (*pep_read_ptr) % num_entries;
1218
1219		pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1220						     *pep_read_ptr);
1221		if (writables == 0)
1222			WARN_ON(list_empty(pbusy_list));
1223
1224		if (!list_empty(pbusy_list)) {
1225			pasync_handle = list_entry(pbusy_list->next,
1226						   struct async_pdu_handle,
1227						   link);
1228			WARN_ON(!pasync_handle);
1229			pasync_handle->consumed = 1;
1230		}
1231
1232		writables++;
1233	}
1234
1235	if (!writables) {
1236		SE_DEBUG(DBG_LVL_1,
1237			 "Duplicate notification received - index 0x%x!!\n",
1238			 cq_index);
1239		WARN_ON(1);
1240	}
1241
1242	*pwritables = *pwritables + writables;
1243	return 0;
1244}
1245
1246static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1247				       unsigned int cri)
1248{
1249	struct hwi_controller *phwi_ctrlr;
1250	struct hwi_async_pdu_context *pasync_ctx;
1251	struct async_pdu_handle *pasync_handle, *tmp_handle;
1252	struct list_head *plist;
1253	unsigned int i = 0;
1254
1255	phwi_ctrlr = phba->phwi_ctrlr;
1256	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1257
1258	plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1259
1260	list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1261		list_del(&pasync_handle->link);
1262
1263		if (i == 0) {
1264			list_add_tail(&pasync_handle->link,
1265				      &pasync_ctx->async_header.free_list);
1266			pasync_ctx->async_header.free_entries++;
1267			i++;
1268		} else {
1269			list_add_tail(&pasync_handle->link,
1270				      &pasync_ctx->async_data.free_list);
1271			pasync_ctx->async_data.free_entries++;
1272			i++;
1273		}
1274	}
1275
1276	INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1277	pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1278	pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1279	return 0;
1280}
1281
1282static struct phys_addr *
1283hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1284		     unsigned int is_header, unsigned int host_write_ptr)
1285{
1286	struct phys_addr *pasync_sge = NULL;
1287
1288	if (is_header)
1289		pasync_sge = pasync_ctx->async_header.ring_base;
1290	else
1291		pasync_sge = pasync_ctx->async_data.ring_base;
1292
1293	return pasync_sge + host_write_ptr;
1294}
1295
1296static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1297				   unsigned int is_header)
1298{
1299	struct hwi_controller *phwi_ctrlr;
1300	struct hwi_async_pdu_context *pasync_ctx;
1301	struct async_pdu_handle *pasync_handle;
1302	struct list_head *pfree_link, *pbusy_list;
1303	struct phys_addr *pasync_sge;
1304	unsigned int ring_id, num_entries;
1305	unsigned int host_write_num;
1306	unsigned int writables;
1307	unsigned int i = 0;
1308	u32 doorbell = 0;
1309
1310	phwi_ctrlr = phba->phwi_ctrlr;
1311	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1312
1313	if (is_header) {
1314		num_entries = pasync_ctx->async_header.num_entries;
1315		writables = min(pasync_ctx->async_header.writables,
1316				pasync_ctx->async_header.free_entries);
1317		pfree_link = pasync_ctx->async_header.free_list.next;
1318		host_write_num = pasync_ctx->async_header.host_write_ptr;
1319		ring_id = phwi_ctrlr->default_pdu_hdr.id;
1320	} else {
1321		num_entries = pasync_ctx->async_data.num_entries;
1322		writables = min(pasync_ctx->async_data.writables,
1323				pasync_ctx->async_data.free_entries);
1324		pfree_link = pasync_ctx->async_data.free_list.next;
1325		host_write_num = pasync_ctx->async_data.host_write_ptr;
1326		ring_id = phwi_ctrlr->default_pdu_data.id;
1327	}
1328
1329	writables = (writables / 8) * 8;
1330	if (writables) {
1331		for (i = 0; i < writables; i++) {
1332			pbusy_list =
1333			    hwi_get_async_busy_list(pasync_ctx, is_header,
1334						    host_write_num);
1335			pasync_handle =
1336			    list_entry(pfree_link, struct async_pdu_handle,
1337								link);
1338			WARN_ON(!pasync_handle);
1339			pasync_handle->consumed = 0;
1340
1341			pfree_link = pfree_link->next;
1342
1343			pasync_sge = hwi_get_ring_address(pasync_ctx,
1344						is_header, host_write_num);
1345
1346			pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1347			pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1348
1349			list_move(&pasync_handle->link, pbusy_list);
1350
1351			host_write_num++;
1352			host_write_num = host_write_num % num_entries;
1353		}
1354
1355		if (is_header) {
1356			pasync_ctx->async_header.host_write_ptr =
1357							host_write_num;
1358			pasync_ctx->async_header.free_entries -= writables;
1359			pasync_ctx->async_header.writables -= writables;
1360			pasync_ctx->async_header.busy_entries += writables;
1361		} else {
1362			pasync_ctx->async_data.host_write_ptr = host_write_num;
1363			pasync_ctx->async_data.free_entries -= writables;
1364			pasync_ctx->async_data.writables -= writables;
1365			pasync_ctx->async_data.busy_entries += writables;
1366		}
1367
1368		doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1369		doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1370		doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1371		doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1372					<< DB_DEF_PDU_CQPROC_SHIFT;
1373
1374		iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1375	}
1376}
1377
1378static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1379					 struct beiscsi_conn *beiscsi_conn,
1380					 struct i_t_dpdu_cqe *pdpdu_cqe)
1381{
1382	struct hwi_controller *phwi_ctrlr;
1383	struct hwi_async_pdu_context *pasync_ctx;
1384	struct async_pdu_handle *pasync_handle = NULL;
1385	unsigned int cq_index = -1;
1386
1387	phwi_ctrlr = phba->phwi_ctrlr;
1388	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1389
1390	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1391					     pdpdu_cqe, &cq_index);
1392	BUG_ON(pasync_handle->is_header != 0);
1393	if (pasync_handle->consumed == 0)
1394		hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1395					   cq_index);
1396
1397	hwi_free_async_msg(phba, pasync_handle->cri);
1398	hwi_post_async_buffers(phba, pasync_handle->is_header);
1399}
1400
1401static unsigned int
1402hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1403		  struct beiscsi_hba *phba,
1404		  struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1405{
1406	struct list_head *plist;
1407	struct async_pdu_handle *pasync_handle;
1408	void *phdr = NULL;
1409	unsigned int hdr_len = 0, buf_len = 0;
1410	unsigned int status, index = 0, offset = 0;
1411	void *pfirst_buffer = NULL;
1412	unsigned int num_buf = 0;
1413
1414	plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1415
1416	list_for_each_entry(pasync_handle, plist, link) {
1417		if (index == 0) {
1418			phdr = pasync_handle->pbuffer;
1419			hdr_len = pasync_handle->buffer_len;
1420		} else {
1421			buf_len = pasync_handle->buffer_len;
1422			if (!num_buf) {
1423				pfirst_buffer = pasync_handle->pbuffer;
1424				num_buf++;
1425			}
1426			memcpy(pfirst_buffer + offset,
1427			       pasync_handle->pbuffer, buf_len);
1428			offset = buf_len;
1429		}
1430		index++;
1431	}
1432
1433	status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1434					   (beiscsi_conn->beiscsi_conn_cid -
1435					    phba->fw_config.iscsi_cid_start),
1436					    phdr, hdr_len, pfirst_buffer,
1437					    buf_len);
1438
1439	if (status == 0)
1440		hwi_free_async_msg(phba, cri);
1441	return 0;
1442}
1443
1444static unsigned int
1445hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1446		     struct beiscsi_hba *phba,
1447		     struct async_pdu_handle *pasync_handle)
1448{
1449	struct hwi_async_pdu_context *pasync_ctx;
1450	struct hwi_controller *phwi_ctrlr;
1451	unsigned int bytes_needed = 0, status = 0;
1452	unsigned short cri = pasync_handle->cri;
1453	struct pdu_base *ppdu;
1454
1455	phwi_ctrlr = phba->phwi_ctrlr;
1456	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1457
1458	list_del(&pasync_handle->link);
1459	if (pasync_handle->is_header) {
1460		pasync_ctx->async_header.busy_entries--;
1461		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1462			hwi_free_async_msg(phba, cri);
1463			BUG();
1464		}
1465
1466		pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1467		pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1468		pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1469				(unsigned short)pasync_handle->buffer_len;
1470		list_add_tail(&pasync_handle->link,
1471			      &pasync_ctx->async_entry[cri].wait_queue.list);
1472
1473		ppdu = pasync_handle->pbuffer;
1474		bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1475			data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1476			0xFFFF0000) | ((be16_to_cpu((ppdu->
1477			dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1478			& PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1479
1480		if (status == 0) {
1481			pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1482			    bytes_needed;
1483
1484			if (bytes_needed == 0)
1485				status = hwi_fwd_async_msg(beiscsi_conn, phba,
1486							   pasync_ctx, cri);
1487		}
1488	} else {
1489		pasync_ctx->async_data.busy_entries--;
1490		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1491			list_add_tail(&pasync_handle->link,
1492				      &pasync_ctx->async_entry[cri].wait_queue.
1493				      list);
1494			pasync_ctx->async_entry[cri].wait_queue.
1495				bytes_received +=
1496				(unsigned short)pasync_handle->buffer_len;
1497
1498			if (pasync_ctx->async_entry[cri].wait_queue.
1499			    bytes_received >=
1500			    pasync_ctx->async_entry[cri].wait_queue.
1501			    bytes_needed)
1502				status = hwi_fwd_async_msg(beiscsi_conn, phba,
1503							   pasync_ctx, cri);
1504		}
1505	}
1506	return status;
1507}
1508
1509static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1510					 struct beiscsi_hba *phba,
1511					 struct i_t_dpdu_cqe *pdpdu_cqe)
1512{
1513	struct hwi_controller *phwi_ctrlr;
1514	struct hwi_async_pdu_context *pasync_ctx;
1515	struct async_pdu_handle *pasync_handle = NULL;
1516	unsigned int cq_index = -1;
1517
1518	phwi_ctrlr = phba->phwi_ctrlr;
1519	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1520	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1521					     pdpdu_cqe, &cq_index);
1522
1523	if (pasync_handle->consumed == 0)
1524		hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1525					   cq_index);
1526	hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1527	hwi_post_async_buffers(phba, pasync_handle->is_header);
1528}
1529
1530static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1531{
1532	struct be_queue_info *mcc_cq;
1533	struct  be_mcc_compl *mcc_compl;
1534	unsigned int num_processed = 0;
1535
1536	mcc_cq = &phba->ctrl.mcc_obj.cq;
1537	mcc_compl = queue_tail_node(mcc_cq);
1538	mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1539	while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1540
1541		if (num_processed >= 32) {
1542			hwi_ring_cq_db(phba, mcc_cq->id,
1543					num_processed, 0, 0);
1544			num_processed = 0;
1545		}
1546		if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1547			/* Interpret flags as an async trailer */
1548			if (is_link_state_evt(mcc_compl->flags))
1549				/* Interpret compl as a async link evt */
1550				beiscsi_async_link_state_process(phba,
1551				(struct be_async_event_link_state *) mcc_compl);
1552			else
1553				SE_DEBUG(DBG_LVL_1,
1554					" Unsupported Async Event, flags"
1555					" = 0x%08x \n", mcc_compl->flags);
1556		} else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1557			be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1558			atomic_dec(&phba->ctrl.mcc_obj.q.used);
1559		}
1560
1561		mcc_compl->flags = 0;
1562		queue_tail_inc(mcc_cq);
1563		mcc_compl = queue_tail_node(mcc_cq);
1564		mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1565		num_processed++;
1566	}
1567
1568	if (num_processed > 0)
1569		hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1570
1571}
1572
1573static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1574{
1575	struct be_queue_info *cq;
1576	struct sol_cqe *sol;
1577	struct dmsg_cqe *dmsg;
1578	unsigned int num_processed = 0;
1579	unsigned int tot_nump = 0;
1580	struct beiscsi_conn *beiscsi_conn;
1581	struct beiscsi_endpoint *beiscsi_ep;
1582	struct iscsi_endpoint *ep;
1583	struct beiscsi_hba *phba;
1584
1585	cq = pbe_eq->cq;
1586	sol = queue_tail_node(cq);
1587	phba = pbe_eq->phba;
1588
1589	while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1590	       CQE_VALID_MASK) {
1591		be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1592
1593		ep = phba->ep_array[(u32) ((sol->
1594				   dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1595				   SOL_CID_MASK) >> 6) -
1596				   phba->fw_config.iscsi_cid_start];
1597
1598		beiscsi_ep = ep->dd_data;
1599		beiscsi_conn = beiscsi_ep->conn;
1600
1601		if (num_processed >= 32) {
1602			hwi_ring_cq_db(phba, cq->id,
1603					num_processed, 0, 0);
1604			tot_nump += num_processed;
1605			num_processed = 0;
1606		}
1607
1608		switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1609			32] & CQE_CODE_MASK) {
1610		case SOL_CMD_COMPLETE:
1611			hwi_complete_cmd(beiscsi_conn, phba, sol);
1612			break;
1613		case DRIVERMSG_NOTIFY:
1614			SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
1615			dmsg = (struct dmsg_cqe *)sol;
1616			hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1617			break;
1618		case UNSOL_HDR_NOTIFY:
1619			SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1620			hwi_process_default_pdu_ring(beiscsi_conn, phba,
1621					     (struct i_t_dpdu_cqe *)sol);
1622			break;
1623		case UNSOL_DATA_NOTIFY:
1624			SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1625			hwi_process_default_pdu_ring(beiscsi_conn, phba,
1626					     (struct i_t_dpdu_cqe *)sol);
1627			break;
1628		case CXN_INVALIDATE_INDEX_NOTIFY:
1629		case CMD_INVALIDATED_NOTIFY:
1630		case CXN_INVALIDATE_NOTIFY:
1631			SE_DEBUG(DBG_LVL_1,
1632				 "Ignoring CQ Error notification for cmd/cxn"
1633				 "invalidate\n");
1634			break;
1635		case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1636		case CMD_KILLED_INVALID_STATSN_RCVD:
1637		case CMD_KILLED_INVALID_R2T_RCVD:
1638		case CMD_CXN_KILLED_LUN_INVALID:
1639		case CMD_CXN_KILLED_ICD_INVALID:
1640		case CMD_CXN_KILLED_ITT_INVALID:
1641		case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1642		case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1643			SE_DEBUG(DBG_LVL_1,
1644				 "CQ Error notification for cmd.. "
1645				 "code %d cid 0x%x\n",
1646				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1647				 32] & CQE_CODE_MASK,
1648				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1649				 32] & SOL_CID_MASK));
1650			break;
1651		case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1652			SE_DEBUG(DBG_LVL_1,
1653				 "Digest error on def pdu ring, dropping..\n");
1654			hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1655					     (struct i_t_dpdu_cqe *) sol);
1656			break;
1657		case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1658		case CXN_KILLED_BURST_LEN_MISMATCH:
1659		case CXN_KILLED_AHS_RCVD:
1660		case CXN_KILLED_HDR_DIGEST_ERR:
1661		case CXN_KILLED_UNKNOWN_HDR:
1662		case CXN_KILLED_STALE_ITT_TTT_RCVD:
1663		case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1664		case CXN_KILLED_TIMED_OUT:
1665		case CXN_KILLED_FIN_RCVD:
1666		case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1667		case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1668		case CXN_KILLED_OVER_RUN_RESIDUAL:
1669		case CXN_KILLED_UNDER_RUN_RESIDUAL:
1670		case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1671			SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1672				 "0x%x...\n",
1673				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1674				 32] & CQE_CODE_MASK,
1675				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1676				 32] & CQE_CID_MASK));
1677			iscsi_conn_failure(beiscsi_conn->conn,
1678					   ISCSI_ERR_CONN_FAILED);
1679			break;
1680		case CXN_KILLED_RST_SENT:
1681		case CXN_KILLED_RST_RCVD:
1682			SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1683				"received/sent on CID 0x%x...\n",
1684				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1685				 32] & CQE_CODE_MASK,
1686				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1687				 32] & CQE_CID_MASK));
1688			iscsi_conn_failure(beiscsi_conn->conn,
1689					   ISCSI_ERR_CONN_FAILED);
1690			break;
1691		default:
1692			SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1693				 "received on CID 0x%x...\n",
1694				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1695				 32] & CQE_CODE_MASK,
1696				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1697				 32] & CQE_CID_MASK));
1698			break;
1699		}
1700
1701		AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1702		queue_tail_inc(cq);
1703		sol = queue_tail_node(cq);
1704		num_processed++;
1705	}
1706
1707	if (num_processed > 0) {
1708		tot_nump += num_processed;
1709		hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1710	}
1711	return tot_nump;
1712}
1713
1714void beiscsi_process_all_cqs(struct work_struct *work)
1715{
1716	unsigned long flags;
1717	struct hwi_controller *phwi_ctrlr;
1718	struct hwi_context_memory *phwi_context;
1719	struct be_eq_obj *pbe_eq;
1720	struct beiscsi_hba *phba =
1721	    container_of(work, struct beiscsi_hba, work_cqs);
1722
1723	phwi_ctrlr = phba->phwi_ctrlr;
1724	phwi_context = phwi_ctrlr->phwi_ctxt;
1725	if (phba->msix_enabled)
1726		pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1727	else
1728		pbe_eq = &phwi_context->be_eq[0];
1729
1730	if (phba->todo_mcc_cq) {
1731		spin_lock_irqsave(&phba->isr_lock, flags);
1732		phba->todo_mcc_cq = 0;
1733		spin_unlock_irqrestore(&phba->isr_lock, flags);
1734		beiscsi_process_mcc_isr(phba);
1735	}
1736
1737	if (phba->todo_cq) {
1738		spin_lock_irqsave(&phba->isr_lock, flags);
1739		phba->todo_cq = 0;
1740		spin_unlock_irqrestore(&phba->isr_lock, flags);
1741		beiscsi_process_cq(pbe_eq);
1742	}
1743}
1744
1745static int be_iopoll(struct blk_iopoll *iop, int budget)
1746{
1747	static unsigned int ret;
1748	struct beiscsi_hba *phba;
1749	struct be_eq_obj *pbe_eq;
1750
1751	pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1752	ret = beiscsi_process_cq(pbe_eq);
1753	if (ret < budget) {
1754		phba = pbe_eq->phba;
1755		blk_iopoll_complete(iop);
1756		SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1757		hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1758	}
1759	return ret;
1760}
1761
1762static void
1763hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1764	      unsigned int num_sg, struct beiscsi_io_task *io_task)
1765{
1766	struct iscsi_sge *psgl;
1767	unsigned short sg_len, index;
1768	unsigned int sge_len = 0;
1769	unsigned long long addr;
1770	struct scatterlist *l_sg;
1771	unsigned int offset;
1772
1773	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1774				      io_task->bhs_pa.u.a32.address_lo);
1775	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1776				      io_task->bhs_pa.u.a32.address_hi);
1777
1778	l_sg = sg;
1779	for (index = 0; (index < num_sg) && (index < 2); index++,
1780							 sg = sg_next(sg)) {
1781		if (index == 0) {
1782			sg_len = sg_dma_len(sg);
1783			addr = (u64) sg_dma_address(sg);
1784			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1785							(addr & 0xFFFFFFFF));
1786			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1787							(addr >> 32));
1788			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1789							sg_len);
1790			sge_len = sg_len;
1791		} else {
1792			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1793							pwrb, sge_len);
1794			sg_len = sg_dma_len(sg);
1795			addr = (u64) sg_dma_address(sg);
1796			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1797							(addr & 0xFFFFFFFF));
1798			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1799							(addr >> 32));
1800			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1801							sg_len);
1802		}
1803	}
1804	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1805	memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1806
1807	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1808
1809	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1810			io_task->bhs_pa.u.a32.address_hi);
1811	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1812			io_task->bhs_pa.u.a32.address_lo);
1813
1814	if (num_sg == 1) {
1815		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1816								1);
1817		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1818								0);
1819	} else if (num_sg == 2) {
1820		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1821								0);
1822		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1823								1);
1824	} else {
1825		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1826								0);
1827		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1828								0);
1829	}
1830	sg = l_sg;
1831	psgl++;
1832	psgl++;
1833	offset = 0;
1834	for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
1835		sg_len = sg_dma_len(sg);
1836		addr = (u64) sg_dma_address(sg);
1837		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1838						(addr & 0xFFFFFFFF));
1839		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1840						(addr >> 32));
1841		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1842		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1843		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1844		offset += sg_len;
1845	}
1846	psgl--;
1847	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1848}
1849
1850static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1851{
1852	struct iscsi_sge *psgl;
1853	unsigned long long addr;
1854	struct beiscsi_io_task *io_task = task->dd_data;
1855	struct beiscsi_conn *beiscsi_conn = io_task->conn;
1856	struct beiscsi_hba *phba = beiscsi_conn->phba;
1857
1858	io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1859	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1860				io_task->bhs_pa.u.a32.address_lo);
1861	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1862				io_task->bhs_pa.u.a32.address_hi);
1863
1864	if (task->data) {
1865		if (task->data_count) {
1866			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1867			addr = (u64) pci_map_single(phba->pcidev,
1868						    task->data,
1869						    task->data_count, 1);
1870		} else {
1871			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1872			addr = 0;
1873		}
1874		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1875						(addr & 0xFFFFFFFF));
1876		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1877						(addr >> 32));
1878		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1879						task->data_count);
1880
1881		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1882	} else {
1883		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1884		addr = 0;
1885	}
1886
1887	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1888
1889	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1890
1891	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1892		      io_task->bhs_pa.u.a32.address_hi);
1893	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1894		      io_task->bhs_pa.u.a32.address_lo);
1895	if (task->data) {
1896		psgl++;
1897		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1898		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1899		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1900		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1901		AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1902		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1903
1904		psgl++;
1905		if (task->data) {
1906			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1907						(addr & 0xFFFFFFFF));
1908			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1909						(addr >> 32));
1910		}
1911		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1912	}
1913	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1914}
1915
1916static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1917{
1918	unsigned int num_cq_pages, num_async_pdu_buf_pages;
1919	unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1920	unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1921
1922	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1923				      sizeof(struct sol_cqe));
1924	num_async_pdu_buf_pages =
1925			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1926				       phba->params.defpdu_hdr_sz);
1927	num_async_pdu_buf_sgl_pages =
1928			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1929				       sizeof(struct phys_addr));
1930	num_async_pdu_data_pages =
1931			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1932				       phba->params.defpdu_data_sz);
1933	num_async_pdu_data_sgl_pages =
1934			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1935				       sizeof(struct phys_addr));
1936
1937	phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1938
1939	phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1940						 BE_ISCSI_PDU_HEADER_SIZE;
1941	phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1942					    sizeof(struct hwi_context_memory);
1943
1944
1945	phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1946	    * (phba->params.wrbs_per_cxn)
1947	    * phba->params.cxns_per_ctrl;
1948	wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
1949				 (phba->params.wrbs_per_cxn);
1950	phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1951				phba->params.cxns_per_ctrl);
1952
1953	phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1954		phba->params.icds_per_ctrl;
1955	phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1956		phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1957
1958	phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1959		num_async_pdu_buf_pages * PAGE_SIZE;
1960	phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1961		num_async_pdu_data_pages * PAGE_SIZE;
1962	phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1963		num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1964	phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1965		num_async_pdu_data_sgl_pages * PAGE_SIZE;
1966	phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1967		phba->params.asyncpdus_per_ctrl *
1968		sizeof(struct async_pdu_handle);
1969	phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1970		phba->params.asyncpdus_per_ctrl *
1971		sizeof(struct async_pdu_handle);
1972	phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1973		sizeof(struct hwi_async_pdu_context) +
1974		(phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1975}
1976
1977static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1978{
1979	struct be_mem_descriptor *mem_descr;
1980	dma_addr_t bus_add;
1981	struct mem_array *mem_arr, *mem_arr_orig;
1982	unsigned int i, j, alloc_size, curr_alloc_size;
1983
1984	phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
1985	if (!phba->phwi_ctrlr)
1986		return -ENOMEM;
1987
1988	phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
1989				 GFP_KERNEL);
1990	if (!phba->init_mem) {
1991		kfree(phba->phwi_ctrlr);
1992		return -ENOMEM;
1993	}
1994
1995	mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
1996			       GFP_KERNEL);
1997	if (!mem_arr_orig) {
1998		kfree(phba->init_mem);
1999		kfree(phba->phwi_ctrlr);
2000		return -ENOMEM;
2001	}
2002
2003	mem_descr = phba->init_mem;
2004	for (i = 0; i < SE_MEM_MAX; i++) {
2005		j = 0;
2006		mem_arr = mem_arr_orig;
2007		alloc_size = phba->mem_req[i];
2008		memset(mem_arr, 0, sizeof(struct mem_array) *
2009		       BEISCSI_MAX_FRAGS_INIT);
2010		curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2011		do {
2012			mem_arr->virtual_address = pci_alloc_consistent(
2013							phba->pcidev,
2014							curr_alloc_size,
2015							&bus_add);
2016			if (!mem_arr->virtual_address) {
2017				if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2018					goto free_mem;
2019				if (curr_alloc_size -
2020					rounddown_pow_of_two(curr_alloc_size))
2021					curr_alloc_size = rounddown_pow_of_two
2022							     (curr_alloc_size);
2023				else
2024					curr_alloc_size = curr_alloc_size / 2;
2025			} else {
2026				mem_arr->bus_address.u.
2027				    a64.address = (__u64) bus_add;
2028				mem_arr->size = curr_alloc_size;
2029				alloc_size -= curr_alloc_size;
2030				curr_alloc_size = min(be_max_phys_size *
2031						      1024, alloc_size);
2032				j++;
2033				mem_arr++;
2034			}
2035		} while (alloc_size);
2036		mem_descr->num_elements = j;
2037		mem_descr->size_in_bytes = phba->mem_req[i];
2038		mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2039					       GFP_KERNEL);
2040		if (!mem_descr->mem_array)
2041			goto free_mem;
2042
2043		memcpy(mem_descr->mem_array, mem_arr_orig,
2044		       sizeof(struct mem_array) * j);
2045		mem_descr++;
2046	}
2047	kfree(mem_arr_orig);
2048	return 0;
2049free_mem:
2050	mem_descr->num_elements = j;
2051	while ((i) || (j)) {
2052		for (j = mem_descr->num_elements; j > 0; j--) {
2053			pci_free_consistent(phba->pcidev,
2054					    mem_descr->mem_array[j - 1].size,
2055					    mem_descr->mem_array[j - 1].
2056					    virtual_address,
2057					    mem_descr->mem_array[j - 1].
2058					    bus_address.u.a64.address);
2059		}
2060		if (i) {
2061			i--;
2062			kfree(mem_descr->mem_array);
2063			mem_descr--;
2064		}
2065	}
2066	kfree(mem_arr_orig);
2067	kfree(phba->init_mem);
2068	kfree(phba->phwi_ctrlr);
2069	return -ENOMEM;
2070}
2071
2072static int beiscsi_get_memory(struct beiscsi_hba *phba)
2073{
2074	beiscsi_find_mem_req(phba);
2075	return beiscsi_alloc_mem(phba);
2076}
2077
2078static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2079{
2080	struct pdu_data_out *pdata_out;
2081	struct pdu_nop_out *pnop_out;
2082	struct be_mem_descriptor *mem_descr;
2083
2084	mem_descr = phba->init_mem;
2085	mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2086	pdata_out =
2087	    (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2088	memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2089
2090	AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2091		      IIOC_SCSI_DATA);
2092
2093	pnop_out =
2094	    (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2095				   virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2096
2097	memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2098	AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2099	AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2100	AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2101}
2102
2103static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2104{
2105	struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2106	struct wrb_handle *pwrb_handle;
2107	struct hwi_controller *phwi_ctrlr;
2108	struct hwi_wrb_context *pwrb_context;
2109	struct iscsi_wrb *pwrb;
2110	unsigned int num_cxn_wrbh;
2111	unsigned int num_cxn_wrb, j, idx, index;
2112
2113	mem_descr_wrbh = phba->init_mem;
2114	mem_descr_wrbh += HWI_MEM_WRBH;
2115
2116	mem_descr_wrb = phba->init_mem;
2117	mem_descr_wrb += HWI_MEM_WRB;
2118
2119	idx = 0;
2120	pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2121	num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2122			((sizeof(struct wrb_handle)) *
2123			 phba->params.wrbs_per_cxn));
2124	phwi_ctrlr = phba->phwi_ctrlr;
2125
2126	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2127		pwrb_context = &phwi_ctrlr->wrb_context[index];
2128		pwrb_context->pwrb_handle_base =
2129				kzalloc(sizeof(struct wrb_handle *) *
2130					phba->params.wrbs_per_cxn, GFP_KERNEL);
2131		pwrb_context->pwrb_handle_basestd =
2132				kzalloc(sizeof(struct wrb_handle *) *
2133					phba->params.wrbs_per_cxn, GFP_KERNEL);
2134		if (num_cxn_wrbh) {
2135			pwrb_context->alloc_index = 0;
2136			pwrb_context->wrb_handles_available = 0;
2137			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2138				pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2139				pwrb_context->pwrb_handle_basestd[j] =
2140								pwrb_handle;
2141				pwrb_context->wrb_handles_available++;
2142				pwrb_handle->wrb_index = j;
2143				pwrb_handle++;
2144			}
2145			pwrb_context->free_index = 0;
2146			num_cxn_wrbh--;
2147		} else {
2148			idx++;
2149			pwrb_handle =
2150			    mem_descr_wrbh->mem_array[idx].virtual_address;
2151			num_cxn_wrbh =
2152			    ((mem_descr_wrbh->mem_array[idx].size) /
2153			     ((sizeof(struct wrb_handle)) *
2154			      phba->params.wrbs_per_cxn));
2155			pwrb_context->alloc_index = 0;
2156			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2157				pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2158				pwrb_context->pwrb_handle_basestd[j] =
2159				    pwrb_handle;
2160				pwrb_context->wrb_handles_available++;
2161				pwrb_handle->wrb_index = j;
2162				pwrb_handle++;
2163			}
2164			pwrb_context->free_index = 0;
2165			num_cxn_wrbh--;
2166		}
2167	}
2168	idx = 0;
2169	pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2170	num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2171		      ((sizeof(struct iscsi_wrb) *
2172			phba->params.wrbs_per_cxn));
2173	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2174		pwrb_context = &phwi_ctrlr->wrb_context[index];
2175		if (num_cxn_wrb) {
2176			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2177				pwrb_handle = pwrb_context->pwrb_handle_base[j];
2178				pwrb_handle->pwrb = pwrb;
2179				pwrb++;
2180			}
2181			num_cxn_wrb--;
2182		} else {
2183			idx++;
2184			pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2185			num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2186				      ((sizeof(struct iscsi_wrb) *
2187					phba->params.wrbs_per_cxn));
2188			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2189				pwrb_handle = pwrb_context->pwrb_handle_base[j];
2190				pwrb_handle->pwrb = pwrb;
2191				pwrb++;
2192			}
2193			num_cxn_wrb--;
2194		}
2195	}
2196}
2197
2198static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2199{
2200	struct hwi_controller *phwi_ctrlr;
2201	struct hba_parameters *p = &phba->params;
2202	struct hwi_async_pdu_context *pasync_ctx;
2203	struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2204	unsigned int index;
2205	struct be_mem_descriptor *mem_descr;
2206
2207	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2208	mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2209
2210	phwi_ctrlr = phba->phwi_ctrlr;
2211	phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2212				mem_descr->mem_array[0].virtual_address;
2213	pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2214	memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2215
2216	pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2217	pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2218	pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2219	pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2220
2221	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2222	mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2223	if (mem_descr->mem_array[0].virtual_address) {
2224		SE_DEBUG(DBG_LVL_8,
2225			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2226			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2227	} else
2228		shost_printk(KERN_WARNING, phba->shost,
2229			     "No Virtual address \n");
2230
2231	pasync_ctx->async_header.va_base =
2232			mem_descr->mem_array[0].virtual_address;
2233
2234	pasync_ctx->async_header.pa_base.u.a64.address =
2235			mem_descr->mem_array[0].bus_address.u.a64.address;
2236
2237	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2238	mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2239	if (mem_descr->mem_array[0].virtual_address) {
2240		SE_DEBUG(DBG_LVL_8,
2241			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2242			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2243	} else
2244		shost_printk(KERN_WARNING, phba->shost,
2245			    "No Virtual address \n");
2246	pasync_ctx->async_header.ring_base =
2247			mem_descr->mem_array[0].virtual_address;
2248
2249	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2250	mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2251	if (mem_descr->mem_array[0].virtual_address) {
2252		SE_DEBUG(DBG_LVL_8,
2253			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2254			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2255	} else
2256		shost_printk(KERN_WARNING, phba->shost,
2257			    "No Virtual address \n");
2258
2259	pasync_ctx->async_header.handle_base =
2260			mem_descr->mem_array[0].virtual_address;
2261	pasync_ctx->async_header.writables = 0;
2262	INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2263
2264	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2265	mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2266	if (mem_descr->mem_array[0].virtual_address) {
2267		SE_DEBUG(DBG_LVL_8,
2268			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2269			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2270	} else
2271		shost_printk(KERN_WARNING, phba->shost,
2272			    "No Virtual address \n");
2273	pasync_ctx->async_data.va_base =
2274			mem_descr->mem_array[0].virtual_address;
2275	pasync_ctx->async_data.pa_base.u.a64.address =
2276			mem_descr->mem_array[0].bus_address.u.a64.address;
2277
2278	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2279	mem_descr += HWI_MEM_ASYNC_DATA_RING;
2280	if (mem_descr->mem_array[0].virtual_address) {
2281		SE_DEBUG(DBG_LVL_8,
2282			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2283			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2284	} else
2285		shost_printk(KERN_WARNING, phba->shost,
2286			     "No Virtual address \n");
2287
2288	pasync_ctx->async_data.ring_base =
2289			mem_descr->mem_array[0].virtual_address;
2290
2291	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2292	mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2293	if (!mem_descr->mem_array[0].virtual_address)
2294		shost_printk(KERN_WARNING, phba->shost,
2295			    "No Virtual address \n");
2296
2297	pasync_ctx->async_data.handle_base =
2298			mem_descr->mem_array[0].virtual_address;
2299	pasync_ctx->async_data.writables = 0;
2300	INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2301
2302	pasync_header_h =
2303		(struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2304	pasync_data_h =
2305		(struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2306
2307	for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2308		pasync_header_h->cri = -1;
2309		pasync_header_h->index = (char)index;
2310		INIT_LIST_HEAD(&pasync_header_h->link);
2311		pasync_header_h->pbuffer =
2312			(void *)((unsigned long)
2313			(pasync_ctx->async_header.va_base) +
2314			(p->defpdu_hdr_sz * index));
2315
2316		pasync_header_h->pa.u.a64.address =
2317			pasync_ctx->async_header.pa_base.u.a64.address +
2318			(p->defpdu_hdr_sz * index);
2319
2320		list_add_tail(&pasync_header_h->link,
2321				&pasync_ctx->async_header.free_list);
2322		pasync_header_h++;
2323		pasync_ctx->async_header.free_entries++;
2324		pasync_ctx->async_header.writables++;
2325
2326		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2327		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2328			       header_busy_list);
2329		pasync_data_h->cri = -1;
2330		pasync_data_h->index = (char)index;
2331		INIT_LIST_HEAD(&pasync_data_h->link);
2332		pasync_data_h->pbuffer =
2333			(void *)((unsigned long)
2334			(pasync_ctx->async_data.va_base) +
2335			(p->defpdu_data_sz * index));
2336
2337		pasync_data_h->pa.u.a64.address =
2338		    pasync_ctx->async_data.pa_base.u.a64.address +
2339		    (p->defpdu_data_sz * index);
2340
2341		list_add_tail(&pasync_data_h->link,
2342			      &pasync_ctx->async_data.free_list);
2343		pasync_data_h++;
2344		pasync_ctx->async_data.free_entries++;
2345		pasync_ctx->async_data.writables++;
2346
2347		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2348	}
2349
2350	pasync_ctx->async_header.host_write_ptr = 0;
2351	pasync_ctx->async_header.ep_read_ptr = -1;
2352	pasync_ctx->async_data.host_write_ptr = 0;
2353	pasync_ctx->async_data.ep_read_ptr = -1;
2354}
2355
2356static int
2357be_sgl_create_contiguous(void *virtual_address,
2358			 u64 physical_address, u32 length,
2359			 struct be_dma_mem *sgl)
2360{
2361	WARN_ON(!virtual_address);
2362	WARN_ON(!physical_address);
2363	WARN_ON(!length > 0);
2364	WARN_ON(!sgl);
2365
2366	sgl->va = virtual_address;
2367	sgl->dma = physical_address;
2368	sgl->size = length;
2369
2370	return 0;
2371}
2372
2373static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2374{
2375	memset(sgl, 0, sizeof(*sgl));
2376}
2377
2378static void
2379hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2380		     struct mem_array *pmem, struct be_dma_mem *sgl)
2381{
2382	if (sgl->va)
2383		be_sgl_destroy_contiguous(sgl);
2384
2385	be_sgl_create_contiguous(pmem->virtual_address,
2386				 pmem->bus_address.u.a64.address,
2387				 pmem->size, sgl);
2388}
2389
2390static void
2391hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2392			   struct mem_array *pmem, struct be_dma_mem *sgl)
2393{
2394	if (sgl->va)
2395		be_sgl_destroy_contiguous(sgl);
2396
2397	be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2398				 pmem->bus_address.u.a64.address,
2399				 pmem->size, sgl);
2400}
2401
2402static int be_fill_queue(struct be_queue_info *q,
2403		u16 len, u16 entry_size, void *vaddress)
2404{
2405	struct be_dma_mem *mem = &q->dma_mem;
2406
2407	memset(q, 0, sizeof(*q));
2408	q->len = len;
2409	q->entry_size = entry_size;
2410	mem->size = len * entry_size;
2411	mem->va = vaddress;
2412	if (!mem->va)
2413		return -ENOMEM;
2414	memset(mem->va, 0, mem->size);
2415	return 0;
2416}
2417
2418static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2419			     struct hwi_context_memory *phwi_context)
2420{
2421	unsigned int i, num_eq_pages;
2422	int ret, eq_for_mcc;
2423	struct be_queue_info *eq;
2424	struct be_dma_mem *mem;
2425	void *eq_vaddress;
2426	dma_addr_t paddr;
2427
2428	num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2429				      sizeof(struct be_eq_entry));
2430
2431	if (phba->msix_enabled)
2432		eq_for_mcc = 1;
2433	else
2434		eq_for_mcc = 0;
2435	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2436		eq = &phwi_context->be_eq[i].q;
2437		mem = &eq->dma_mem;
2438		phwi_context->be_eq[i].phba = phba;
2439		eq_vaddress = pci_alloc_consistent(phba->pcidev,
2440						     num_eq_pages * PAGE_SIZE,
2441						     &paddr);
2442		if (!eq_vaddress)
2443			goto create_eq_error;
2444
2445		mem->va = eq_vaddress;
2446		ret = be_fill_queue(eq, phba->params.num_eq_entries,
2447				    sizeof(struct be_eq_entry), eq_vaddress);
2448		if (ret) {
2449			shost_printk(KERN_ERR, phba->shost,
2450				     "be_fill_queue Failed for EQ \n");
2451			goto create_eq_error;
2452		}
2453
2454		mem->dma = paddr;
2455		ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2456					    phwi_context->cur_eqd);
2457		if (ret) {
2458			shost_printk(KERN_ERR, phba->shost,
2459				     "beiscsi_cmd_eq_create"
2460				     "Failedfor EQ \n");
2461			goto create_eq_error;
2462		}
2463		SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2464	}
2465	return 0;
2466create_eq_error:
2467	for (i = 0; i < (phba->num_cpus + 1); i++) {
2468		eq = &phwi_context->be_eq[i].q;
2469		mem = &eq->dma_mem;
2470		if (mem->va)
2471			pci_free_consistent(phba->pcidev, num_eq_pages
2472					    * PAGE_SIZE,
2473					    mem->va, mem->dma);
2474	}
2475	return ret;
2476}
2477
2478static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2479			     struct hwi_context_memory *phwi_context)
2480{
2481	unsigned int i, num_cq_pages;
2482	int ret;
2483	struct be_queue_info *cq, *eq;
2484	struct be_dma_mem *mem;
2485	struct be_eq_obj *pbe_eq;
2486	void *cq_vaddress;
2487	dma_addr_t paddr;
2488
2489	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2490				      sizeof(struct sol_cqe));
2491
2492	for (i = 0; i < phba->num_cpus; i++) {
2493		cq = &phwi_context->be_cq[i];
2494		eq = &phwi_context->be_eq[i].q;
2495		pbe_eq = &phwi_context->be_eq[i];
2496		pbe_eq->cq = cq;
2497		pbe_eq->phba = phba;
2498		mem = &cq->dma_mem;
2499		cq_vaddress = pci_alloc_consistent(phba->pcidev,
2500						     num_cq_pages * PAGE_SIZE,
2501						     &paddr);
2502		if (!cq_vaddress)
2503			goto create_cq_error;
2504		ret = be_fill_queue(cq, phba->params.num_cq_entries,
2505				    sizeof(struct sol_cqe), cq_vaddress);
2506		if (ret) {
2507			shost_printk(KERN_ERR, phba->shost,
2508				     "be_fill_queue Failed for ISCSI CQ \n");
2509			goto create_cq_error;
2510		}
2511
2512		mem->dma = paddr;
2513		ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2514					    false, 0);
2515		if (ret) {
2516			shost_printk(KERN_ERR, phba->shost,
2517				     "beiscsi_cmd_eq_create"
2518				     "Failed for ISCSI CQ \n");
2519			goto create_cq_error;
2520		}
2521		SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2522						 cq->id, eq->id);
2523		SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2524	}
2525	return 0;
2526
2527create_cq_error:
2528	for (i = 0; i < phba->num_cpus; i++) {
2529		cq = &phwi_context->be_cq[i];
2530		mem = &cq->dma_mem;
2531		if (mem->va)
2532			pci_free_consistent(phba->pcidev, num_cq_pages
2533					    * PAGE_SIZE,
2534					    mem->va, mem->dma);
2535	}
2536	return ret;
2537
2538}
2539
2540static int
2541beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2542		       struct hwi_context_memory *phwi_context,
2543		       struct hwi_controller *phwi_ctrlr,
2544		       unsigned int def_pdu_ring_sz)
2545{
2546	unsigned int idx;
2547	int ret;
2548	struct be_queue_info *dq, *cq;
2549	struct be_dma_mem *mem;
2550	struct be_mem_descriptor *mem_descr;
2551	void *dq_vaddress;
2552
2553	idx = 0;
2554	dq = &phwi_context->be_def_hdrq;
2555	cq = &phwi_context->be_cq[0];
2556	mem = &dq->dma_mem;
2557	mem_descr = phba->init_mem;
2558	mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2559	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2560	ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2561			    sizeof(struct phys_addr),
2562			    sizeof(struct phys_addr), dq_vaddress);
2563	if (ret) {
2564		shost_printk(KERN_ERR, phba->shost,
2565			     "be_fill_queue Failed for DEF PDU HDR\n");
2566		return ret;
2567	}
2568	mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2569	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2570					      def_pdu_ring_sz,
2571					      phba->params.defpdu_hdr_sz);
2572	if (ret) {
2573		shost_printk(KERN_ERR, phba->shost,
2574			     "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2575		return ret;
2576	}
2577	phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2578	SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2579		 phwi_context->be_def_hdrq.id);
2580	hwi_post_async_buffers(phba, 1);
2581	return 0;
2582}
2583
2584static int
2585beiscsi_create_def_data(struct beiscsi_hba *phba,
2586			struct hwi_context_memory *phwi_context,
2587			struct hwi_controller *phwi_ctrlr,
2588			unsigned int def_pdu_ring_sz)
2589{
2590	unsigned int idx;
2591	int ret;
2592	struct be_queue_info *dataq, *cq;
2593	struct be_dma_mem *mem;
2594	struct be_mem_descriptor *mem_descr;
2595	void *dq_vaddress;
2596
2597	idx = 0;
2598	dataq = &phwi_context->be_def_dataq;
2599	cq = &phwi_context->be_cq[0];
2600	mem = &dataq->dma_mem;
2601	mem_descr = phba->init_mem;
2602	mem_descr += HWI_MEM_ASYNC_DATA_RING;
2603	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2604	ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2605			    sizeof(struct phys_addr),
2606			    sizeof(struct phys_addr), dq_vaddress);
2607	if (ret) {
2608		shost_printk(KERN_ERR, phba->shost,
2609			     "be_fill_queue Failed for DEF PDU DATA\n");
2610		return ret;
2611	}
2612	mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2613	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2614					      def_pdu_ring_sz,
2615					      phba->params.defpdu_data_sz);
2616	if (ret) {
2617		shost_printk(KERN_ERR, phba->shost,
2618			     "be_cmd_create_default_pdu_queue Failed"
2619			     " for DEF PDU DATA\n");
2620		return ret;
2621	}
2622	phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2623	SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2624		 phwi_context->be_def_dataq.id);
2625	hwi_post_async_buffers(phba, 0);
2626	SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
2627	return 0;
2628}
2629
2630static int
2631beiscsi_post_pages(struct beiscsi_hba *phba)
2632{
2633	struct be_mem_descriptor *mem_descr;
2634	struct mem_array *pm_arr;
2635	unsigned int page_offset, i;
2636	struct be_dma_mem sgl;
2637	int status;
2638
2639	mem_descr = phba->init_mem;
2640	mem_descr += HWI_MEM_SGE;
2641	pm_arr = mem_descr->mem_array;
2642
2643	page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2644			phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2645	for (i = 0; i < mem_descr->num_elements; i++) {
2646		hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2647		status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2648						page_offset,
2649						(pm_arr->size / PAGE_SIZE));
2650		page_offset += pm_arr->size / PAGE_SIZE;
2651		if (status != 0) {
2652			shost_printk(KERN_ERR, phba->shost,
2653				     "post sgl failed.\n");
2654			return status;
2655		}
2656		pm_arr++;
2657	}
2658	SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
2659	return 0;
2660}
2661
2662static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2663{
2664	struct be_dma_mem *mem = &q->dma_mem;
2665	if (mem->va)
2666		pci_free_consistent(phba->pcidev, mem->size,
2667			mem->va, mem->dma);
2668}
2669
2670static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2671		u16 len, u16 entry_size)
2672{
2673	struct be_dma_mem *mem = &q->dma_mem;
2674
2675	memset(q, 0, sizeof(*q));
2676	q->len = len;
2677	q->entry_size = entry_size;
2678	mem->size = len * entry_size;
2679	mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2680	if (!mem->va)
2681		return -1;
2682	memset(mem->va, 0, mem->size);
2683	return 0;
2684}
2685
2686static int
2687beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2688			 struct hwi_context_memory *phwi_context,
2689			 struct hwi_controller *phwi_ctrlr)
2690{
2691	unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2692	u64 pa_addr_lo;
2693	unsigned int idx, num, i;
2694	struct mem_array *pwrb_arr;
2695	void *wrb_vaddr;
2696	struct be_dma_mem sgl;
2697	struct be_mem_descriptor *mem_descr;
2698	int status;
2699
2700	idx = 0;
2701	mem_descr = phba->init_mem;
2702	mem_descr += HWI_MEM_WRB;
2703	pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2704			   GFP_KERNEL);
2705	if (!pwrb_arr) {
2706		shost_printk(KERN_ERR, phba->shost,
2707			     "Memory alloc failed in create wrb ring.\n");
2708		return -ENOMEM;
2709	}
2710	wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2711	pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2712	num_wrb_rings = mem_descr->mem_array[idx].size /
2713		(phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2714
2715	for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2716		if (num_wrb_rings) {
2717			pwrb_arr[num].virtual_address = wrb_vaddr;
2718			pwrb_arr[num].bus_address.u.a64.address	= pa_addr_lo;
2719			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2720					    sizeof(struct iscsi_wrb);
2721			wrb_vaddr += pwrb_arr[num].size;
2722			pa_addr_lo += pwrb_arr[num].size;
2723			num_wrb_rings--;
2724		} else {
2725			idx++;
2726			wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2727			pa_addr_lo = mem_descr->mem_array[idx].\
2728					bus_address.u.a64.address;
2729			num_wrb_rings = mem_descr->mem_array[idx].size /
2730					(phba->params.wrbs_per_cxn *
2731					sizeof(struct iscsi_wrb));
2732			pwrb_arr[num].virtual_address = wrb_vaddr;
2733			pwrb_arr[num].bus_address.u.a64.address\
2734						= pa_addr_lo;
2735			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2736						 sizeof(struct iscsi_wrb);
2737			wrb_vaddr += pwrb_arr[num].size;
2738			pa_addr_lo   += pwrb_arr[num].size;
2739			num_wrb_rings--;
2740		}
2741	}
2742	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2743		wrb_mem_index = 0;
2744		offset = 0;
2745		size = 0;
2746
2747		hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2748		status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2749					    &phwi_context->be_wrbq[i]);
2750		if (status != 0) {
2751			shost_printk(KERN_ERR, phba->shost,
2752				     "wrbq create failed.");
2753			kfree(pwrb_arr);
2754			return status;
2755		}
2756		phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2757								   id;
2758	}
2759	kfree(pwrb_arr);
2760	return 0;
2761}
2762
2763static void free_wrb_handles(struct beiscsi_hba *phba)
2764{
2765	unsigned int index;
2766	struct hwi_controller *phwi_ctrlr;
2767	struct hwi_wrb_context *pwrb_context;
2768
2769	phwi_ctrlr = phba->phwi_ctrlr;
2770	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2771		pwrb_context = &phwi_ctrlr->wrb_context[index];
2772		kfree(pwrb_context->pwrb_handle_base);
2773		kfree(pwrb_context->pwrb_handle_basestd);
2774	}
2775}
2776
2777static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2778{
2779	struct be_queue_info *q;
2780	struct be_ctrl_info *ctrl = &phba->ctrl;
2781
2782	q = &phba->ctrl.mcc_obj.q;
2783	if (q->created)
2784		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2785	be_queue_free(phba, q);
2786
2787	q = &phba->ctrl.mcc_obj.cq;
2788	if (q->created)
2789		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2790	be_queue_free(phba, q);
2791}
2792
2793static void hwi_cleanup(struct beiscsi_hba *phba)
2794{
2795	struct be_queue_info *q;
2796	struct be_ctrl_info *ctrl = &phba->ctrl;
2797	struct hwi_controller *phwi_ctrlr;
2798	struct hwi_context_memory *phwi_context;
2799	int i, eq_num;
2800
2801	phwi_ctrlr = phba->phwi_ctrlr;
2802	phwi_context = phwi_ctrlr->phwi_ctxt;
2803	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2804		q = &phwi_context->be_wrbq[i];
2805		if (q->created)
2806			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2807	}
2808	free_wrb_handles(phba);
2809
2810	q = &phwi_context->be_def_hdrq;
2811	if (q->created)
2812		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2813
2814	q = &phwi_context->be_def_dataq;
2815	if (q->created)
2816		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2817
2818	beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2819
2820	for (i = 0; i < (phba->num_cpus); i++) {
2821		q = &phwi_context->be_cq[i];
2822		if (q->created)
2823			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2824	}
2825	if (phba->msix_enabled)
2826		eq_num = 1;
2827	else
2828		eq_num = 0;
2829	for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2830		q = &phwi_context->be_eq[i].q;
2831		if (q->created)
2832			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2833	}
2834	be_mcc_queues_destroy(phba);
2835}
2836
2837static int be_mcc_queues_create(struct beiscsi_hba *phba,
2838				struct hwi_context_memory *phwi_context)
2839{
2840	struct be_queue_info *q, *cq;
2841	struct be_ctrl_info *ctrl = &phba->ctrl;
2842
2843	/* Alloc MCC compl queue */
2844	cq = &phba->ctrl.mcc_obj.cq;
2845	if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2846			sizeof(struct be_mcc_compl)))
2847		goto err;
2848	/* Ask BE to create MCC compl queue; */
2849	if (phba->msix_enabled) {
2850		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2851					 [phba->num_cpus].q, false, true, 0))
2852		goto mcc_cq_free;
2853	} else {
2854		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2855					  false, true, 0))
2856		goto mcc_cq_free;
2857	}
2858
2859	/* Alloc MCC queue */
2860	q = &phba->ctrl.mcc_obj.q;
2861	if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2862		goto mcc_cq_destroy;
2863
2864	/* Ask BE to create MCC queue */
2865	if (beiscsi_cmd_mccq_create(phba, q, cq))
2866		goto mcc_q_free;
2867
2868	return 0;
2869
2870mcc_q_free:
2871	be_queue_free(phba, q);
2872mcc_cq_destroy:
2873	beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2874mcc_cq_free:
2875	be_queue_free(phba, cq);
2876err:
2877	return -1;
2878}
2879
2880static int find_num_cpus(void)
2881{
2882	int  num_cpus = 0;
2883
2884	num_cpus = num_online_cpus();
2885	if (num_cpus >= MAX_CPUS)
2886		num_cpus = MAX_CPUS - 1;
2887
2888	SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
2889	return num_cpus;
2890}
2891
2892static int hwi_init_port(struct beiscsi_hba *phba)
2893{
2894	struct hwi_controller *phwi_ctrlr;
2895	struct hwi_context_memory *phwi_context;
2896	unsigned int def_pdu_ring_sz;
2897	struct be_ctrl_info *ctrl = &phba->ctrl;
2898	int status;
2899
2900	def_pdu_ring_sz =
2901		phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2902	phwi_ctrlr = phba->phwi_ctrlr;
2903	phwi_context = phwi_ctrlr->phwi_ctxt;
2904	phwi_context->max_eqd = 0;
2905	phwi_context->min_eqd = 0;
2906	phwi_context->cur_eqd = 64;
2907	be_cmd_fw_initialize(&phba->ctrl);
2908
2909	status = beiscsi_create_eqs(phba, phwi_context);
2910	if (status != 0) {
2911		shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2912		goto error;
2913	}
2914
2915	status = be_mcc_queues_create(phba, phwi_context);
2916	if (status != 0)
2917		goto error;
2918
2919	status = mgmt_check_supported_fw(ctrl, phba);
2920	if (status != 0) {
2921		shost_printk(KERN_ERR, phba->shost,
2922			     "Unsupported fw version \n");
2923		goto error;
2924	}
2925
2926	status = beiscsi_create_cqs(phba, phwi_context);
2927	if (status != 0) {
2928		shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2929		goto error;
2930	}
2931
2932	status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2933					def_pdu_ring_sz);
2934	if (status != 0) {
2935		shost_printk(KERN_ERR, phba->shost,
2936			     "Default Header not created\n");
2937		goto error;
2938	}
2939
2940	status = beiscsi_create_def_data(phba, phwi_context,
2941					 phwi_ctrlr, def_pdu_ring_sz);
2942	if (status != 0) {
2943		shost_printk(KERN_ERR, phba->shost,
2944			     "Default Data not created\n");
2945		goto error;
2946	}
2947
2948	status = beiscsi_post_pages(phba);
2949	if (status != 0) {
2950		shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2951		goto error;
2952	}
2953
2954	status = beiscsi_create_wrb_rings(phba,	phwi_context, phwi_ctrlr);
2955	if (status != 0) {
2956		shost_printk(KERN_ERR, phba->shost,
2957			     "WRB Rings not created\n");
2958		goto error;
2959	}
2960
2961	SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2962	return 0;
2963
2964error:
2965	shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2966	hwi_cleanup(phba);
2967	return -ENOMEM;
2968}
2969
2970static int hwi_init_controller(struct beiscsi_hba *phba)
2971{
2972	struct hwi_controller *phwi_ctrlr;
2973
2974	phwi_ctrlr = phba->phwi_ctrlr;
2975	if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2976		phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2977		    init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2978		SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
2979			 phwi_ctrlr->phwi_ctxt);
2980	} else {
2981		shost_printk(KERN_ERR, phba->shost,
2982			     "HWI_MEM_ADDN_CONTEXT is more than one element."
2983			     "Failing to load\n");
2984		return -ENOMEM;
2985	}
2986
2987	iscsi_init_global_templates(phba);
2988	beiscsi_init_wrb_handle(phba);
2989	hwi_init_async_pdu_ctx(phba);
2990	if (hwi_init_port(phba) != 0) {
2991		shost_printk(KERN_ERR, phba->shost,
2992			     "hwi_init_controller failed\n");
2993		return -ENOMEM;
2994	}
2995	return 0;
2996}
2997
2998static void beiscsi_free_mem(struct beiscsi_hba *phba)
2999{
3000	struct be_mem_descriptor *mem_descr;
3001	int i, j;
3002
3003	mem_descr = phba->init_mem;
3004	i = 0;
3005	j = 0;
3006	for (i = 0; i < SE_MEM_MAX; i++) {
3007		for (j = mem_descr->num_elements; j > 0; j--) {
3008			pci_free_consistent(phba->pcidev,
3009			  mem_descr->mem_array[j - 1].size,
3010			  mem_descr->mem_array[j - 1].virtual_address,
3011			  mem_descr->mem_array[j - 1].bus_address.
3012				u.a64.address);
3013		}
3014		kfree(mem_descr->mem_array);
3015		mem_descr++;
3016	}
3017	kfree(phba->init_mem);
3018	kfree(phba->phwi_ctrlr);
3019}
3020
3021static int beiscsi_init_controller(struct beiscsi_hba *phba)
3022{
3023	int ret = -ENOMEM;
3024
3025	ret = beiscsi_get_memory(phba);
3026	if (ret < 0) {
3027		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
3028			     "Failed in beiscsi_alloc_memory \n");
3029		return ret;
3030	}
3031
3032	ret = hwi_init_controller(phba);
3033	if (ret)
3034		goto free_init;
3035	SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
3036	return 0;
3037
3038free_init:
3039	beiscsi_free_mem(phba);
3040	return -ENOMEM;
3041}
3042
3043static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3044{
3045	struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3046	struct sgl_handle *psgl_handle;
3047	struct iscsi_sge *pfrag;
3048	unsigned int arr_index, i, idx;
3049
3050	phba->io_sgl_hndl_avbl = 0;
3051	phba->eh_sgl_hndl_avbl = 0;
3052
3053	mem_descr_sglh = phba->init_mem;
3054	mem_descr_sglh += HWI_MEM_SGLH;
3055	if (1 == mem_descr_sglh->num_elements) {
3056		phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3057						 phba->params.ios_per_ctrl,
3058						 GFP_KERNEL);
3059		if (!phba->io_sgl_hndl_base) {
3060			shost_printk(KERN_ERR, phba->shost,
3061				     "Mem Alloc Failed. Failing to load\n");
3062			return -ENOMEM;
3063		}
3064		phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3065						 (phba->params.icds_per_ctrl -
3066						 phba->params.ios_per_ctrl),
3067						 GFP_KERNEL);
3068		if (!phba->eh_sgl_hndl_base) {
3069			kfree(phba->io_sgl_hndl_base);
3070			shost_printk(KERN_ERR, phba->shost,
3071				     "Mem Alloc Failed. Failing to load\n");
3072			return -ENOMEM;
3073		}
3074	} else {
3075		shost_printk(KERN_ERR, phba->shost,
3076			     "HWI_MEM_SGLH is more than one element."
3077			     "Failing to load\n");
3078		return -ENOMEM;
3079	}
3080
3081	arr_index = 0;
3082	idx = 0;
3083	while (idx < mem_descr_sglh->num_elements) {
3084		psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3085
3086		for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3087		      sizeof(struct sgl_handle)); i++) {
3088			if (arr_index < phba->params.ios_per_ctrl) {
3089				phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3090				phba->io_sgl_hndl_avbl++;
3091				arr_index++;
3092			} else {
3093				phba->eh_sgl_hndl_base[arr_index -
3094					phba->params.ios_per_ctrl] =
3095								psgl_handle;
3096				arr_index++;
3097				phba->eh_sgl_hndl_avbl++;
3098			}
3099			psgl_handle++;
3100		}
3101		idx++;
3102	}
3103	SE_DEBUG(DBG_LVL_8,
3104		 "phba->io_sgl_hndl_avbl=%d"
3105		 "phba->eh_sgl_hndl_avbl=%d \n",
3106		 phba->io_sgl_hndl_avbl,
3107		 phba->eh_sgl_hndl_avbl);
3108	mem_descr_sg = phba->init_mem;
3109	mem_descr_sg += HWI_MEM_SGE;
3110	SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
3111		 mem_descr_sg->num_elements);
3112	arr_index = 0;
3113	idx = 0;
3114	while (idx < mem_descr_sg->num_elements) {
3115		pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3116
3117		for (i = 0;
3118		     i < (mem_descr_sg->mem_array[idx].size) /
3119		     (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3120		     i++) {
3121			if (arr_index < phba->params.ios_per_ctrl)
3122				psgl_handle = phba->io_sgl_hndl_base[arr_index];
3123			else
3124				psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3125						phba->params.ios_per_ctrl];
3126			psgl_handle->pfrag = pfrag;
3127			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3128			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3129			pfrag += phba->params.num_sge_per_io;
3130			psgl_handle->sgl_index =
3131				phba->fw_config.iscsi_icd_start + arr_index++;
3132		}
3133		idx++;
3134	}
3135	phba->io_sgl_free_index = 0;
3136	phba->io_sgl_alloc_index = 0;
3137	phba->eh_sgl_free_index = 0;
3138	phba->eh_sgl_alloc_index = 0;
3139	return 0;
3140}
3141
3142static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3143{
3144	int i, new_cid;
3145
3146	phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3147				  GFP_KERNEL);
3148	if (!phba->cid_array) {
3149		shost_printk(KERN_ERR, phba->shost,
3150			     "Failed to allocate memory in "
3151			     "hba_setup_cid_tbls\n");
3152		return -ENOMEM;
3153	}
3154	phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3155				 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3156	if (!phba->ep_array) {
3157		shost_printk(KERN_ERR, phba->shost,
3158			     "Failed to allocate memory in "
3159			     "hba_setup_cid_tbls \n");
3160		kfree(phba->cid_array);
3161		return -ENOMEM;
3162	}
3163	new_cid = phba->fw_config.iscsi_cid_start;
3164	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3165		phba->cid_array[i] = new_cid;
3166		new_cid += 2;
3167	}
3168	phba->avlbl_cids = phba->params.cxns_per_ctrl;
3169	return 0;
3170}
3171
3172static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
3173{
3174	struct be_ctrl_info *ctrl = &phba->ctrl;
3175	struct hwi_controller *phwi_ctrlr;
3176	struct hwi_context_memory *phwi_context;
3177	struct be_queue_info *eq;
3178	u8 __iomem *addr;
3179	u32 reg, i;
3180	u32 enabled;
3181
3182	phwi_ctrlr = phba->phwi_ctrlr;
3183	phwi_context = phwi_ctrlr->phwi_ctxt;
3184
3185	addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3186			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3187	reg = ioread32(addr);
3188	SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
3189
3190	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3191	if (!enabled) {
3192		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3193		SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
3194		iowrite32(reg, addr);
3195		if (!phba->msix_enabled) {
3196			eq = &phwi_context->be_eq[0].q;
3197			SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3198			hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3199		} else {
3200			for (i = 0; i <= phba->num_cpus; i++) {
3201				eq = &phwi_context->be_eq[i].q;
3202				SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3203				hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3204			}
3205		}
3206	}
3207	return true;
3208}
3209
3210static void hwi_disable_intr(struct beiscsi_hba *phba)
3211{
3212	struct be_ctrl_info *ctrl = &phba->ctrl;
3213
3214	u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3215	u32 reg = ioread32(addr);
3216
3217	u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3218	if (enabled) {
3219		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3220		iowrite32(reg, addr);
3221	} else
3222		shost_printk(KERN_WARNING, phba->shost,
3223			     "In hwi_disable_intr, Already Disabled \n");
3224}
3225
3226static int beiscsi_init_port(struct beiscsi_hba *phba)
3227{
3228	int ret;
3229
3230	ret = beiscsi_init_controller(phba);
3231	if (ret < 0) {
3232		shost_printk(KERN_ERR, phba->shost,
3233			     "beiscsi_dev_probe - Failed in"
3234			     "beiscsi_init_controller \n");
3235		return ret;
3236	}
3237	ret = beiscsi_init_sgl_handle(phba);
3238	if (ret < 0) {
3239		shost_printk(KERN_ERR, phba->shost,
3240			     "beiscsi_dev_probe - Failed in"
3241			     "beiscsi_init_sgl_handle \n");
3242		goto do_cleanup_ctrlr;
3243	}
3244
3245	if (hba_setup_cid_tbls(phba)) {
3246		shost_printk(KERN_ERR, phba->shost,
3247			     "Failed in hba_setup_cid_tbls\n");
3248		kfree(phba->io_sgl_hndl_base);
3249		kfree(phba->eh_sgl_hndl_base);
3250		goto do_cleanup_ctrlr;
3251	}
3252
3253	return ret;
3254
3255do_cleanup_ctrlr:
3256	hwi_cleanup(phba);
3257	return ret;
3258}
3259
3260static void hwi_purge_eq(struct beiscsi_hba *phba)
3261{
3262	struct hwi_controller *phwi_ctrlr;
3263	struct hwi_context_memory *phwi_context;
3264	struct be_queue_info *eq;
3265	struct be_eq_entry *eqe = NULL;
3266	int i, eq_msix;
3267	unsigned int num_processed;
3268
3269	phwi_ctrlr = phba->phwi_ctrlr;
3270	phwi_context = phwi_ctrlr->phwi_ctxt;
3271	if (phba->msix_enabled)
3272		eq_msix = 1;
3273	else
3274		eq_msix = 0;
3275
3276	for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3277		eq = &phwi_context->be_eq[i].q;
3278		eqe = queue_tail_node(eq);
3279		num_processed = 0;
3280		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3281					& EQE_VALID_MASK) {
3282			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3283			queue_tail_inc(eq);
3284			eqe = queue_tail_node(eq);
3285			num_processed++;
3286		}
3287
3288		if (num_processed)
3289			hwi_ring_eq_db(phba, eq->id, 1,	num_processed, 1, 1);
3290	}
3291}
3292
3293static void beiscsi_clean_port(struct beiscsi_hba *phba)
3294{
3295	unsigned char mgmt_status;
3296
3297	mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3298	if (mgmt_status)
3299		shost_printk(KERN_WARNING, phba->shost,
3300			     "mgmt_epfw_cleanup FAILED \n");
3301
3302	hwi_purge_eq(phba);
3303	hwi_cleanup(phba);
3304	kfree(phba->io_sgl_hndl_base);
3305	kfree(phba->eh_sgl_hndl_base);
3306	kfree(phba->cid_array);
3307	kfree(phba->ep_array);
3308}
3309
3310void
3311beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3312			   struct beiscsi_offload_params *params)
3313{
3314	struct wrb_handle *pwrb_handle;
3315	struct iscsi_target_context_update_wrb *pwrb = NULL;
3316	struct be_mem_descriptor *mem_descr;
3317	struct beiscsi_hba *phba = beiscsi_conn->phba;
3318	u32 doorbell = 0;
3319
3320	/*
3321	 * We can always use 0 here because it is reserved by libiscsi for
3322	 * login/startup related tasks.
3323	 */
3324	pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3325				       phba->fw_config.iscsi_cid_start));
3326	pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3327	memset(pwrb, 0, sizeof(*pwrb));
3328	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3329		      max_burst_length, pwrb, params->dw[offsetof
3330		      (struct amap_beiscsi_offload_params,
3331		      max_burst_length) / 32]);
3332	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3333		      max_send_data_segment_length, pwrb,
3334		      params->dw[offsetof(struct amap_beiscsi_offload_params,
3335		      max_send_data_segment_length) / 32]);
3336	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3337		      first_burst_length,
3338		      pwrb,
3339		      params->dw[offsetof(struct amap_beiscsi_offload_params,
3340		      first_burst_length) / 32]);
3341
3342	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3343		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3344		      erl) / 32] & OFFLD_PARAMS_ERL));
3345	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3346		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3347		      dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3348	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3349		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3350		      hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3351	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3352		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3353		      ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3354	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3355		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3356		       imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3357	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3358		      pwrb,
3359		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3360		      exp_statsn) / 32] + 1));
3361	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3362		      0x7);
3363	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3364		      pwrb, pwrb_handle->wrb_index);
3365	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3366		      pwrb, pwrb_handle->nxt_wrb_index);
3367	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3368			session_state, pwrb, 0);
3369	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3370		      pwrb, 1);
3371	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3372		      pwrb, 0);
3373	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3374		      0);
3375
3376	mem_descr = phba->init_mem;
3377	mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3378
3379	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3380			pad_buffer_addr_hi, pwrb,
3381		      mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3382	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3383			pad_buffer_addr_lo, pwrb,
3384		      mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3385
3386	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3387
3388	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3389	doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3390			     << DB_DEF_PDU_WRB_INDEX_SHIFT;
3391	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3392
3393	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3394}
3395
3396static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3397			      int *index, int *age)
3398{
3399	*index = (int)itt;
3400	if (age)
3401		*age = conn->session->age;
3402}
3403
3404/**
3405 * beiscsi_alloc_pdu - allocates pdu and related resources
3406 * @task: libiscsi task
3407 * @opcode: opcode of pdu for task
3408 *
3409 * This is called with the session lock held. It will allocate
3410 * the wrb and sgl if needed for the command. And it will prep
3411 * the pdu's itt. beiscsi_parse_pdu will later translate
3412 * the pdu itt to the libiscsi task itt.
3413 */
3414static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3415{
3416	struct beiscsi_io_task *io_task = task->dd_data;
3417	struct iscsi_conn *conn = task->conn;
3418	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3419	struct beiscsi_hba *phba = beiscsi_conn->phba;
3420	struct hwi_wrb_context *pwrb_context;
3421	struct hwi_controller *phwi_ctrlr;
3422	itt_t itt;
3423	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3424	dma_addr_t paddr;
3425
3426	io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3427					  GFP_KERNEL, &paddr);
3428	if (!io_task->cmd_bhs)
3429		return -ENOMEM;
3430	io_task->bhs_pa.u.a64.address = paddr;
3431	io_task->libiscsi_itt = (itt_t)task->itt;
3432	io_task->pwrb_handle = alloc_wrb_handle(phba,
3433						beiscsi_conn->beiscsi_conn_cid -
3434						phba->fw_config.iscsi_cid_start
3435						);
3436	io_task->conn = beiscsi_conn;
3437
3438	task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3439	task->hdr_max = sizeof(struct be_cmd_bhs);
3440
3441	if (task->sc) {
3442		spin_lock(&phba->io_sgl_lock);
3443		io_task->psgl_handle = alloc_io_sgl_handle(phba);
3444		spin_unlock(&phba->io_sgl_lock);
3445		if (!io_task->psgl_handle)
3446			goto free_hndls;
3447	} else {
3448		io_task->scsi_cmnd = NULL;
3449		if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3450			if (!beiscsi_conn->login_in_progress) {
3451				spin_lock(&phba->mgmt_sgl_lock);
3452				io_task->psgl_handle = (struct sgl_handle *)
3453						alloc_mgmt_sgl_handle(phba);
3454				spin_unlock(&phba->mgmt_sgl_lock);
3455				if (!io_task->psgl_handle)
3456					goto free_hndls;
3457
3458				beiscsi_conn->login_in_progress = 1;
3459				beiscsi_conn->plogin_sgl_handle =
3460							io_task->psgl_handle;
3461			} else {
3462				io_task->psgl_handle =
3463						beiscsi_conn->plogin_sgl_handle;
3464			}
3465		} else {
3466			spin_lock(&phba->mgmt_sgl_lock);
3467			io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3468			spin_unlock(&phba->mgmt_sgl_lock);
3469			if (!io_task->psgl_handle)
3470				goto free_hndls;
3471		}
3472	}
3473	itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3474				 wrb_index << 16) | (unsigned int)
3475				(io_task->psgl_handle->sgl_index));
3476	io_task->pwrb_handle->pio_handle = task;
3477
3478	io_task->cmd_bhs->iscsi_hdr.itt = itt;
3479	return 0;
3480
3481free_hndls:
3482	phwi_ctrlr = phba->phwi_ctrlr;
3483	pwrb_context = &phwi_ctrlr->wrb_context[
3484			beiscsi_conn->beiscsi_conn_cid -
3485			phba->fw_config.iscsi_cid_start];
3486	free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3487	io_task->pwrb_handle = NULL;
3488	pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3489		      io_task->bhs_pa.u.a64.address);
3490	SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
3491	return -ENOMEM;
3492}
3493
3494static void beiscsi_cleanup_task(struct iscsi_task *task)
3495{
3496	struct beiscsi_io_task *io_task = task->dd_data;
3497	struct iscsi_conn *conn = task->conn;
3498	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3499	struct beiscsi_hba *phba = beiscsi_conn->phba;
3500	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3501	struct hwi_wrb_context *pwrb_context;
3502	struct hwi_controller *phwi_ctrlr;
3503
3504	phwi_ctrlr = phba->phwi_ctrlr;
3505	pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3506			- phba->fw_config.iscsi_cid_start];
3507	if (io_task->pwrb_handle) {
3508		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3509		io_task->pwrb_handle = NULL;
3510	}
3511
3512	if (io_task->cmd_bhs) {
3513		pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3514			      io_task->bhs_pa.u.a64.address);
3515	}
3516
3517	if (task->sc) {
3518		if (io_task->psgl_handle) {
3519			spin_lock(&phba->io_sgl_lock);
3520			free_io_sgl_handle(phba, io_task->psgl_handle);
3521			spin_unlock(&phba->io_sgl_lock);
3522			io_task->psgl_handle = NULL;
3523		}
3524	} else {
3525		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3526			return;
3527		if (io_task->psgl_handle) {
3528			spin_lock(&phba->mgmt_sgl_lock);
3529			free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3530			spin_unlock(&phba->mgmt_sgl_lock);
3531			io_task->psgl_handle = NULL;
3532		}
3533	}
3534}
3535
3536static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3537			  unsigned int num_sg, unsigned int xferlen,
3538			  unsigned int writedir)
3539{
3540
3541	struct beiscsi_io_task *io_task = task->dd_data;
3542	struct iscsi_conn *conn = task->conn;
3543	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3544	struct beiscsi_hba *phba = beiscsi_conn->phba;
3545	struct iscsi_wrb *pwrb = NULL;
3546	unsigned int doorbell = 0;
3547
3548	pwrb = io_task->pwrb_handle->pwrb;
3549	io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3550	io_task->bhs_len = sizeof(struct be_cmd_bhs);
3551
3552	if (writedir) {
3553		memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3554		AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3555			      &io_task->cmd_bhs->iscsi_data_pdu,
3556			      (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3557		AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3558			      &io_task->cmd_bhs->iscsi_data_pdu,
3559			      ISCSI_OPCODE_SCSI_DATA_OUT);
3560		AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3561			      &io_task->cmd_bhs->iscsi_data_pdu, 1);
3562		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3563			      INI_WR_CMD);
3564		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3565	} else {
3566		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3567			      INI_RD_CMD);
3568		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3569	}
3570	memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3571	       dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3572	       io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3573
3574	AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3575		      cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3576				  lun[0]));
3577	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3578	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3579		      io_task->pwrb_handle->wrb_index);
3580	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3581		      be32_to_cpu(task->cmdsn));
3582	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3583		      io_task->psgl_handle->sgl_index);
3584
3585	hwi_write_sgl(pwrb, sg, num_sg, io_task);
3586
3587	AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3588		      io_task->pwrb_handle->nxt_wrb_index);
3589	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3590
3591	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3592	doorbell |= (io_task->pwrb_handle->wrb_index &
3593		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3594	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3595
3596	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3597	return 0;
3598}
3599
3600static int beiscsi_mtask(struct iscsi_task *task)
3601{
3602	struct beiscsi_io_task *io_task = task->dd_data;
3603	struct iscsi_conn *conn = task->conn;
3604	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3605	struct beiscsi_hba *phba = beiscsi_conn->phba;
3606	struct iscsi_wrb *pwrb = NULL;
3607	unsigned int doorbell = 0;
3608	unsigned int cid;
3609
3610	cid = beiscsi_conn->beiscsi_conn_cid;
3611	pwrb = io_task->pwrb_handle->pwrb;
3612	memset(pwrb, 0, sizeof(*pwrb));
3613	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3614		      be32_to_cpu(task->cmdsn));
3615	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3616		      io_task->pwrb_handle->wrb_index);
3617	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3618		      io_task->psgl_handle->sgl_index);
3619
3620	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3621	case ISCSI_OP_LOGIN:
3622		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3623			      TGT_DM_CMD);
3624		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3625		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3626		hwi_write_buffer(pwrb, task);
3627		break;
3628	case ISCSI_OP_NOOP_OUT:
3629		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3630			      INI_RD_CMD);
3631		if (task->hdr->ttt == ISCSI_RESERVED_TAG)
3632			AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3633		else
3634			AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
3635		hwi_write_buffer(pwrb, task);
3636		break;
3637	case ISCSI_OP_TEXT:
3638		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3639			      TGT_DM_CMD);
3640		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3641		hwi_write_buffer(pwrb, task);
3642		break;
3643	case ISCSI_OP_SCSI_TMFUNC:
3644		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3645			      INI_TMF_CMD);
3646		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3647		hwi_write_buffer(pwrb, task);
3648		break;
3649	case ISCSI_OP_LOGOUT:
3650		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3651		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3652			      HWH_TYPE_LOGOUT);
3653		hwi_write_buffer(pwrb, task);
3654		break;
3655
3656	default:
3657		SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
3658			 task->hdr->opcode & ISCSI_OPCODE_MASK);
3659		return -EINVAL;
3660	}
3661
3662	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3663		      task->data_count);
3664	AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3665		      io_task->pwrb_handle->nxt_wrb_index);
3666	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3667
3668	doorbell |= cid & DB_WRB_POST_CID_MASK;
3669	doorbell |= (io_task->pwrb_handle->wrb_index &
3670		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3671	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3672	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3673	return 0;
3674}
3675
3676static int beiscsi_task_xmit(struct iscsi_task *task)
3677{
3678	struct beiscsi_io_task *io_task = task->dd_data;
3679	struct scsi_cmnd *sc = task->sc;
3680	struct scatterlist *sg;
3681	int num_sg;
3682	unsigned int  writedir = 0, xferlen = 0;
3683
3684	if (!sc)
3685		return beiscsi_mtask(task);
3686
3687	io_task->scsi_cmnd = sc;
3688	num_sg = scsi_dma_map(sc);
3689	if (num_sg < 0) {
3690		SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3691		return num_sg;
3692	}
3693	SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3694		  (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3695	xferlen = scsi_bufflen(sc);
3696	sg = scsi_sglist(sc);
3697	if (sc->sc_data_direction == DMA_TO_DEVICE) {
3698		writedir = 1;
3699		SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
3700			 task->imm_count);
3701	} else
3702		writedir = 0;
3703	return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3704}
3705
3706static void beiscsi_remove(struct pci_dev *pcidev)
3707{
3708	struct beiscsi_hba *phba = NULL;
3709	struct hwi_controller *phwi_ctrlr;
3710	struct hwi_context_memory *phwi_context;
3711	struct be_eq_obj *pbe_eq;
3712	unsigned int i, msix_vec;
3713
3714	phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3715	if (!phba) {
3716		dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
3717		return;
3718	}
3719
3720	phwi_ctrlr = phba->phwi_ctrlr;
3721	phwi_context = phwi_ctrlr->phwi_ctxt;
3722	hwi_disable_intr(phba);
3723	if (phba->msix_enabled) {
3724		for (i = 0; i <= phba->num_cpus; i++) {
3725			msix_vec = phba->msix_entries[i].vector;
3726			free_irq(msix_vec, &phwi_context->be_eq[i]);
3727		}
3728	} else
3729		if (phba->pcidev->irq)
3730			free_irq(phba->pcidev->irq, phba);
3731	pci_disable_msix(phba->pcidev);
3732	destroy_workqueue(phba->wq);
3733	if (blk_iopoll_enabled)
3734		for (i = 0; i < phba->num_cpus; i++) {
3735			pbe_eq = &phwi_context->be_eq[i];
3736			blk_iopoll_disable(&pbe_eq->iopoll);
3737		}
3738
3739	beiscsi_clean_port(phba);
3740	beiscsi_free_mem(phba);
3741	beiscsi_unmap_pci_function(phba);
3742	pci_free_consistent(phba->pcidev,
3743			    phba->ctrl.mbox_mem_alloced.size,
3744			    phba->ctrl.mbox_mem_alloced.va,
3745			    phba->ctrl.mbox_mem_alloced.dma);
3746	iscsi_host_remove(phba->shost);
3747	pci_dev_put(phba->pcidev);
3748	iscsi_host_free(phba->shost);
3749}
3750
3751static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3752{
3753	int i, status;
3754
3755	for (i = 0; i <= phba->num_cpus; i++)
3756		phba->msix_entries[i].entry = i;
3757
3758	status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3759				 (phba->num_cpus + 1));
3760	if (!status)
3761		phba->msix_enabled = true;
3762
3763	return;
3764}
3765
3766static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3767				const struct pci_device_id *id)
3768{
3769	struct beiscsi_hba *phba = NULL;
3770	struct hwi_controller *phwi_ctrlr;
3771	struct hwi_context_memory *phwi_context;
3772	struct be_eq_obj *pbe_eq;
3773	int ret, msix_vec, num_cpus, i;
3774
3775	ret = beiscsi_enable_pci(pcidev);
3776	if (ret < 0) {
3777		dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3778			" Failed to enable pci device\n");
3779		return ret;
3780	}
3781
3782	phba = beiscsi_hba_alloc(pcidev);
3783	if (!phba) {
3784		dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3785			" Failed in beiscsi_hba_alloc \n");
3786		goto disable_pci;
3787	}
3788
3789	switch (pcidev->device) {
3790	case BE_DEVICE_ID1:
3791	case OC_DEVICE_ID1:
3792	case OC_DEVICE_ID2:
3793		phba->generation = BE_GEN2;
3794		break;
3795	case BE_DEVICE_ID2:
3796	case OC_DEVICE_ID3:
3797		phba->generation = BE_GEN3;
3798		break;
3799	default:
3800		phba->generation = 0;
3801	}
3802
3803	if (enable_msix)
3804		num_cpus = find_num_cpus();
3805	else
3806		num_cpus = 1;
3807	phba->num_cpus = num_cpus;
3808	SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
3809
3810	if (enable_msix)
3811		beiscsi_msix_enable(phba);
3812	ret = be_ctrl_init(phba, pcidev);
3813	if (ret) {
3814		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3815				"Failed in be_ctrl_init\n");
3816		goto hba_free;
3817	}
3818
3819	spin_lock_init(&phba->io_sgl_lock);
3820	spin_lock_init(&phba->mgmt_sgl_lock);
3821	spin_lock_init(&phba->isr_lock);
3822	ret = mgmt_get_fw_config(&phba->ctrl, phba);
3823	if (ret != 0) {
3824		shost_printk(KERN_ERR, phba->shost,
3825			     "Error getting fw config\n");
3826		goto free_port;
3827	}
3828	phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3829	beiscsi_get_params(phba);
3830	phba->shost->can_queue = phba->params.ios_per_ctrl;
3831	ret = beiscsi_init_port(phba);
3832	if (ret < 0) {
3833		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3834			     "Failed in beiscsi_init_port\n");
3835		goto free_port;
3836	}
3837
3838	for (i = 0; i < MAX_MCC_CMD ; i++) {
3839		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
3840		phba->ctrl.mcc_tag[i] = i + 1;
3841		phba->ctrl.mcc_numtag[i + 1] = 0;
3842		phba->ctrl.mcc_tag_available++;
3843	}
3844
3845	phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
3846
3847	snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3848		 phba->shost->host_no);
3849	phba->wq = create_workqueue(phba->wq_name);
3850	if (!phba->wq) {
3851		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3852				"Failed to allocate work queue\n");
3853		goto free_twq;
3854	}
3855
3856	INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3857
3858	phwi_ctrlr = phba->phwi_ctrlr;
3859	phwi_context = phwi_ctrlr->phwi_ctxt;
3860	if (blk_iopoll_enabled) {
3861		for (i = 0; i < phba->num_cpus; i++) {
3862			pbe_eq = &phwi_context->be_eq[i];
3863			blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3864					be_iopoll);
3865			blk_iopoll_enable(&pbe_eq->iopoll);
3866		}
3867	}
3868	ret = beiscsi_init_irqs(phba);
3869	if (ret < 0) {
3870		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3871			     "Failed to beiscsi_init_irqs\n");
3872		goto free_blkenbld;
3873	}
3874	ret = hwi_enable_intr(phba);
3875	if (ret < 0) {
3876		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3877			     "Failed to hwi_enable_intr\n");
3878		goto free_ctrlr;
3879	}
3880	SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3881	return 0;
3882
3883free_ctrlr:
3884	if (phba->msix_enabled) {
3885		for (i = 0; i <= phba->num_cpus; i++) {
3886			msix_vec = phba->msix_entries[i].vector;
3887			free_irq(msix_vec, &phwi_context->be_eq[i]);
3888		}
3889	} else
3890		if (phba->pcidev->irq)
3891			free_irq(phba->pcidev->irq, phba);
3892	pci_disable_msix(phba->pcidev);
3893free_blkenbld:
3894	destroy_workqueue(phba->wq);
3895	if (blk_iopoll_enabled)
3896		for (i = 0; i < phba->num_cpus; i++) {
3897			pbe_eq = &phwi_context->be_eq[i];
3898			blk_iopoll_disable(&pbe_eq->iopoll);
3899		}
3900free_twq:
3901	beiscsi_clean_port(phba);
3902	beiscsi_free_mem(phba);
3903free_port:
3904	pci_free_consistent(phba->pcidev,
3905			    phba->ctrl.mbox_mem_alloced.size,
3906			    phba->ctrl.mbox_mem_alloced.va,
3907			   phba->ctrl.mbox_mem_alloced.dma);
3908	beiscsi_unmap_pci_function(phba);
3909hba_free:
3910	iscsi_host_remove(phba->shost);
3911	pci_dev_put(phba->pcidev);
3912	iscsi_host_free(phba->shost);
3913disable_pci:
3914	pci_disable_device(pcidev);
3915	return ret;
3916}
3917
3918struct iscsi_transport beiscsi_iscsi_transport = {
3919	.owner = THIS_MODULE,
3920	.name = DRV_NAME,
3921	.caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
3922		CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3923	.param_mask = ISCSI_MAX_RECV_DLENGTH |
3924		ISCSI_MAX_XMIT_DLENGTH |
3925		ISCSI_HDRDGST_EN |
3926		ISCSI_DATADGST_EN |
3927		ISCSI_INITIAL_R2T_EN |
3928		ISCSI_MAX_R2T |
3929		ISCSI_IMM_DATA_EN |
3930		ISCSI_FIRST_BURST |
3931		ISCSI_MAX_BURST |
3932		ISCSI_PDU_INORDER_EN |
3933		ISCSI_DATASEQ_INORDER_EN |
3934		ISCSI_ERL |
3935		ISCSI_CONN_PORT |
3936		ISCSI_CONN_ADDRESS |
3937		ISCSI_EXP_STATSN |
3938		ISCSI_PERSISTENT_PORT |
3939		ISCSI_PERSISTENT_ADDRESS |
3940		ISCSI_TARGET_NAME | ISCSI_TPGT |
3941		ISCSI_USERNAME | ISCSI_PASSWORD |
3942		ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3943		ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3944		ISCSI_LU_RESET_TMO |
3945		ISCSI_PING_TMO | ISCSI_RECV_TMO |
3946		ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3947	.host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3948				ISCSI_HOST_INITIATOR_NAME,
3949	.create_session = beiscsi_session_create,
3950	.destroy_session = beiscsi_session_destroy,
3951	.create_conn = beiscsi_conn_create,
3952	.bind_conn = beiscsi_conn_bind,
3953	.destroy_conn = iscsi_conn_teardown,
3954	.set_param = beiscsi_set_param,
3955	.get_conn_param = beiscsi_conn_get_param,
3956	.get_session_param = iscsi_session_get_param,
3957	.get_host_param = beiscsi_get_host_param,
3958	.start_conn = beiscsi_conn_start,
3959	.stop_conn = iscsi_conn_stop,
3960	.send_pdu = iscsi_conn_send_pdu,
3961	.xmit_task = beiscsi_task_xmit,
3962	.cleanup_task = beiscsi_cleanup_task,
3963	.alloc_pdu = beiscsi_alloc_pdu,
3964	.parse_pdu_itt = beiscsi_parse_pdu,
3965	.get_stats = beiscsi_conn_get_stats,
3966	.ep_connect = beiscsi_ep_connect,
3967	.ep_poll = beiscsi_ep_poll,
3968	.ep_disconnect = beiscsi_ep_disconnect,
3969	.session_recovery_timedout = iscsi_session_recovery_timedout,
3970};
3971
3972static struct pci_driver beiscsi_pci_driver = {
3973	.name = DRV_NAME,
3974	.probe = beiscsi_dev_probe,
3975	.remove = beiscsi_remove,
3976	.id_table = beiscsi_pci_id_table
3977};
3978
3979
3980static int __init beiscsi_module_init(void)
3981{
3982	int ret;
3983
3984	beiscsi_scsi_transport =
3985			iscsi_register_transport(&beiscsi_iscsi_transport);
3986	if (!beiscsi_scsi_transport) {
3987		SE_DEBUG(DBG_LVL_1,
3988			 "beiscsi_module_init - Unable to  register beiscsi"
3989			 "transport.\n");
3990		return -ENOMEM;
3991	}
3992	SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3993		 &beiscsi_iscsi_transport);
3994
3995	ret = pci_register_driver(&beiscsi_pci_driver);
3996	if (ret) {
3997		SE_DEBUG(DBG_LVL_1,
3998			 "beiscsi_module_init - Unable to  register"
3999			 "beiscsi pci driver.\n");
4000		goto unregister_iscsi_transport;
4001	}
4002	return 0;
4003
4004unregister_iscsi_transport:
4005	iscsi_unregister_transport(&beiscsi_iscsi_transport);
4006	return ret;
4007}
4008
4009static void __exit beiscsi_module_exit(void)
4010{
4011	pci_unregister_driver(&beiscsi_pci_driver);
4012	iscsi_unregister_transport(&beiscsi_iscsi_transport);
4013}
4014
4015module_init(beiscsi_module_init);
4016module_exit(beiscsi_module_exit);
4017