be_main.c revision c03af1ae1cce97a5530b907ea03625ce6e00214e
1/**
2 * Copyright (C) 2005 - 2010 ServerEngines
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation.  The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11 *
12 * Contact Information:
13 * linux-drivers@serverengines.com
14 *
15 *  ServerEngines
16 * 209 N. Fair Oaks Ave
17 * Sunnyvale, CA 94085
18 *
19 */
20#include <linux/reboot.h>
21#include <linux/delay.h>
22#include <linux/interrupt.h>
23#include <linux/blkdev.h>
24#include <linux/pci.h>
25#include <linux/string.h>
26#include <linux/kernel.h>
27#include <linux/semaphore.h>
28
29#include <scsi/libiscsi.h>
30#include <scsi/scsi_transport_iscsi.h>
31#include <scsi/scsi_transport.h>
32#include <scsi/scsi_cmnd.h>
33#include <scsi/scsi_device.h>
34#include <scsi/scsi_host.h>
35#include <scsi/scsi.h>
36#include "be_main.h"
37#include "be_iscsi.h"
38#include "be_mgmt.h"
39
40static unsigned int be_iopoll_budget = 10;
41static unsigned int be_max_phys_size = 64;
42static unsigned int enable_msix = 1;
43
44MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
45MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
46MODULE_AUTHOR("ServerEngines Corporation");
47MODULE_LICENSE("GPL");
48module_param(be_iopoll_budget, int, 0);
49module_param(enable_msix, int, 0);
50module_param(be_max_phys_size, uint, S_IRUGO);
51MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
52				   "contiguous memory that can be allocated."
53				   "Range is 16 - 128");
54
55static int beiscsi_slave_configure(struct scsi_device *sdev)
56{
57	blk_queue_max_segment_size(sdev->request_queue, 65536);
58	return 0;
59}
60
61static int beiscsi_eh_abort(struct scsi_cmnd *sc)
62{
63	struct iscsi_cls_session *cls_session;
64	struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
65	struct beiscsi_io_task *aborted_io_task;
66	struct iscsi_conn *conn;
67	struct beiscsi_conn *beiscsi_conn;
68	struct beiscsi_hba *phba;
69	struct iscsi_session *session;
70	struct invalidate_command_table *inv_tbl;
71	unsigned int cid, tag, num_invalidate;
72
73	cls_session = starget_to_session(scsi_target(sc->device));
74	session = cls_session->dd_data;
75
76	spin_lock_bh(&session->lock);
77	if (!aborted_task || !aborted_task->sc) {
78		/* we raced */
79		spin_unlock_bh(&session->lock);
80		return SUCCESS;
81	}
82
83	aborted_io_task = aborted_task->dd_data;
84	if (!aborted_io_task->scsi_cmnd) {
85		/* raced or invalid command */
86		spin_unlock_bh(&session->lock);
87		return SUCCESS;
88	}
89	spin_unlock_bh(&session->lock);
90	conn = aborted_task->conn;
91	beiscsi_conn = conn->dd_data;
92	phba = beiscsi_conn->phba;
93
94	/* invalidate iocb */
95	cid = beiscsi_conn->beiscsi_conn_cid;
96	inv_tbl = phba->inv_tbl;
97	memset(inv_tbl, 0x0, sizeof(*inv_tbl));
98	inv_tbl->cid = cid;
99	inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
100	num_invalidate = 1;
101	tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
102	if (!tag) {
103		shost_printk(KERN_WARNING, phba->shost,
104			     "mgmt_invalidate_icds could not be"
105			     " submitted\n");
106		return FAILED;
107	} else {
108		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
109					 phba->ctrl.mcc_numtag[tag]);
110		free_mcc_tag(&phba->ctrl, tag);
111	}
112
113	return iscsi_eh_abort(sc);
114}
115
116static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
117{
118	struct iscsi_task *abrt_task;
119	struct beiscsi_io_task *abrt_io_task;
120	struct iscsi_conn *conn;
121	struct beiscsi_conn *beiscsi_conn;
122	struct beiscsi_hba *phba;
123	struct iscsi_session *session;
124	struct iscsi_cls_session *cls_session;
125	struct invalidate_command_table *inv_tbl;
126	unsigned int cid, tag, i, num_invalidate;
127	int rc = FAILED;
128
129	/* invalidate iocbs */
130	cls_session = starget_to_session(scsi_target(sc->device));
131	session = cls_session->dd_data;
132	spin_lock_bh(&session->lock);
133	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
134		goto unlock;
135
136	conn = session->leadconn;
137	beiscsi_conn = conn->dd_data;
138	phba = beiscsi_conn->phba;
139	cid = beiscsi_conn->beiscsi_conn_cid;
140	inv_tbl = phba->inv_tbl;
141	memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
142	num_invalidate = 0;
143	for (i = 0; i < conn->session->cmds_max; i++) {
144		abrt_task = conn->session->cmds[i];
145		abrt_io_task = abrt_task->dd_data;
146		if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
147			continue;
148
149		if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
150			continue;
151
152		inv_tbl->cid = cid;
153		inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
154		num_invalidate++;
155		inv_tbl++;
156	}
157	spin_unlock_bh(&session->lock);
158	inv_tbl = phba->inv_tbl;
159
160	tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
161	if (!tag) {
162		shost_printk(KERN_WARNING, phba->shost,
163			     "mgmt_invalidate_icds could not be"
164			     " submitted\n");
165		return FAILED;
166	} else {
167		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
168					 phba->ctrl.mcc_numtag[tag]);
169		free_mcc_tag(&phba->ctrl, tag);
170	}
171
172	return iscsi_eh_device_reset(sc);
173unlock:
174	spin_unlock_bh(&session->lock);
175	return rc;
176}
177
178/*------------------- PCI Driver operations and data ----------------- */
179static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
180	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
181	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
182	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
183	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
184	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
185	{ 0 }
186};
187MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
188
189static struct scsi_host_template beiscsi_sht = {
190	.module = THIS_MODULE,
191	.name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
192	.proc_name = DRV_NAME,
193	.queuecommand = iscsi_queuecommand,
194	.change_queue_depth = iscsi_change_queue_depth,
195	.slave_configure = beiscsi_slave_configure,
196	.target_alloc = iscsi_target_alloc,
197	.eh_abort_handler = beiscsi_eh_abort,
198	.eh_device_reset_handler = beiscsi_eh_device_reset,
199	.eh_target_reset_handler = iscsi_eh_session_reset,
200	.sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
201	.can_queue = BE2_IO_DEPTH,
202	.this_id = -1,
203	.max_sectors = BEISCSI_MAX_SECTORS,
204	.cmd_per_lun = BEISCSI_CMD_PER_LUN,
205	.use_clustering = ENABLE_CLUSTERING,
206};
207
208static struct scsi_transport_template *beiscsi_scsi_transport;
209
210static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
211{
212	struct beiscsi_hba *phba;
213	struct Scsi_Host *shost;
214
215	shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
216	if (!shost) {
217		dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
218			"iscsi_host_alloc failed \n");
219		return NULL;
220	}
221	shost->dma_boundary = pcidev->dma_mask;
222	shost->max_id = BE2_MAX_SESSIONS;
223	shost->max_channel = 0;
224	shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
225	shost->max_lun = BEISCSI_NUM_MAX_LUN;
226	shost->transportt = beiscsi_scsi_transport;
227	phba = iscsi_host_priv(shost);
228	memset(phba, 0, sizeof(*phba));
229	phba->shost = shost;
230	phba->pcidev = pci_dev_get(pcidev);
231	pci_set_drvdata(pcidev, phba);
232
233	if (iscsi_host_add(shost, &phba->pcidev->dev))
234		goto free_devices;
235	return phba;
236
237free_devices:
238	pci_dev_put(phba->pcidev);
239	iscsi_host_free(phba->shost);
240	return NULL;
241}
242
243static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
244{
245	if (phba->csr_va) {
246		iounmap(phba->csr_va);
247		phba->csr_va = NULL;
248	}
249	if (phba->db_va) {
250		iounmap(phba->db_va);
251		phba->db_va = NULL;
252	}
253	if (phba->pci_va) {
254		iounmap(phba->pci_va);
255		phba->pci_va = NULL;
256	}
257}
258
259static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
260				struct pci_dev *pcidev)
261{
262	u8 __iomem *addr;
263	int pcicfg_reg;
264
265	addr = ioremap_nocache(pci_resource_start(pcidev, 2),
266			       pci_resource_len(pcidev, 2));
267	if (addr == NULL)
268		return -ENOMEM;
269	phba->ctrl.csr = addr;
270	phba->csr_va = addr;
271	phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
272
273	addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
274	if (addr == NULL)
275		goto pci_map_err;
276	phba->ctrl.db = addr;
277	phba->db_va = addr;
278	phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
279
280	if (phba->generation == BE_GEN2)
281		pcicfg_reg = 1;
282	else
283		pcicfg_reg = 0;
284
285	addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
286			       pci_resource_len(pcidev, pcicfg_reg));
287
288	if (addr == NULL)
289		goto pci_map_err;
290	phba->ctrl.pcicfg = addr;
291	phba->pci_va = addr;
292	phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
293	return 0;
294
295pci_map_err:
296	beiscsi_unmap_pci_function(phba);
297	return -ENOMEM;
298}
299
300static int beiscsi_enable_pci(struct pci_dev *pcidev)
301{
302	int ret;
303
304	ret = pci_enable_device(pcidev);
305	if (ret) {
306		dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
307			"failed. Returning -ENODEV\n");
308		return ret;
309	}
310
311	pci_set_master(pcidev);
312	if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
313		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
314		if (ret) {
315			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
316			pci_disable_device(pcidev);
317			return ret;
318		}
319	}
320	return 0;
321}
322
323static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
324{
325	struct be_ctrl_info *ctrl = &phba->ctrl;
326	struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
327	struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
328	int status = 0;
329
330	ctrl->pdev = pdev;
331	status = beiscsi_map_pci_bars(phba, pdev);
332	if (status)
333		return status;
334	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
335	mbox_mem_alloc->va = pci_alloc_consistent(pdev,
336						  mbox_mem_alloc->size,
337						  &mbox_mem_alloc->dma);
338	if (!mbox_mem_alloc->va) {
339		beiscsi_unmap_pci_function(phba);
340		status = -ENOMEM;
341		return status;
342	}
343
344	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
345	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
346	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
347	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
348	spin_lock_init(&ctrl->mbox_lock);
349	spin_lock_init(&phba->ctrl.mcc_lock);
350	spin_lock_init(&phba->ctrl.mcc_cq_lock);
351
352	return status;
353}
354
355static void beiscsi_get_params(struct beiscsi_hba *phba)
356{
357	phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
358				    - (phba->fw_config.iscsi_cid_count
359				    + BE2_TMFS
360				    + BE2_NOPOUT_REQ));
361	phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
362	phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
363	phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
364	phba->params.num_sge_per_io = BE2_SGE;
365	phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
366	phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
367	phba->params.eq_timer = 64;
368	phba->params.num_eq_entries =
369	    (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
370				    + BE2_TMFS) / 512) + 1) * 512;
371	phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
372				? 1024 : phba->params.num_eq_entries;
373	SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
374			     phba->params.num_eq_entries);
375	phba->params.num_cq_entries =
376	    (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
377				    + BE2_TMFS) / 512) + 1) * 512;
378	phba->params.wrbs_per_cxn = 256;
379}
380
381static void hwi_ring_eq_db(struct beiscsi_hba *phba,
382			   unsigned int id, unsigned int clr_interrupt,
383			   unsigned int num_processed,
384			   unsigned char rearm, unsigned char event)
385{
386	u32 val = 0;
387	val |= id & DB_EQ_RING_ID_MASK;
388	if (rearm)
389		val |= 1 << DB_EQ_REARM_SHIFT;
390	if (clr_interrupt)
391		val |= 1 << DB_EQ_CLR_SHIFT;
392	if (event)
393		val |= 1 << DB_EQ_EVNT_SHIFT;
394	val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
395	iowrite32(val, phba->db_va + DB_EQ_OFFSET);
396}
397
398/**
399 * be_isr_mcc - The isr routine of the driver.
400 * @irq: Not used
401 * @dev_id: Pointer to host adapter structure
402 */
403static irqreturn_t be_isr_mcc(int irq, void *dev_id)
404{
405	struct beiscsi_hba *phba;
406	struct be_eq_entry *eqe = NULL;
407	struct be_queue_info *eq;
408	struct be_queue_info *mcc;
409	unsigned int num_eq_processed;
410	struct be_eq_obj *pbe_eq;
411	unsigned long flags;
412
413	pbe_eq = dev_id;
414	eq = &pbe_eq->q;
415	phba =  pbe_eq->phba;
416	mcc = &phba->ctrl.mcc_obj.cq;
417	eqe = queue_tail_node(eq);
418	if (!eqe)
419		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
420
421	num_eq_processed = 0;
422
423	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
424				& EQE_VALID_MASK) {
425		if (((eqe->dw[offsetof(struct amap_eq_entry,
426		     resource_id) / 32] &
427		     EQE_RESID_MASK) >> 16) == mcc->id) {
428			spin_lock_irqsave(&phba->isr_lock, flags);
429			phba->todo_mcc_cq = 1;
430			spin_unlock_irqrestore(&phba->isr_lock, flags);
431		}
432		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
433		queue_tail_inc(eq);
434		eqe = queue_tail_node(eq);
435		num_eq_processed++;
436	}
437	if (phba->todo_mcc_cq)
438		queue_work(phba->wq, &phba->work_cqs);
439	if (num_eq_processed)
440		hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 1, 1);
441
442	return IRQ_HANDLED;
443}
444
445/**
446 * be_isr_msix - The isr routine of the driver.
447 * @irq: Not used
448 * @dev_id: Pointer to host adapter structure
449 */
450static irqreturn_t be_isr_msix(int irq, void *dev_id)
451{
452	struct beiscsi_hba *phba;
453	struct be_eq_entry *eqe = NULL;
454	struct be_queue_info *eq;
455	struct be_queue_info *cq;
456	unsigned int num_eq_processed;
457	struct be_eq_obj *pbe_eq;
458	unsigned long flags;
459
460	pbe_eq = dev_id;
461	eq = &pbe_eq->q;
462	cq = pbe_eq->cq;
463	eqe = queue_tail_node(eq);
464	if (!eqe)
465		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
466
467	phba = pbe_eq->phba;
468	num_eq_processed = 0;
469	if (blk_iopoll_enabled) {
470		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
471					& EQE_VALID_MASK) {
472			if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
473				blk_iopoll_sched(&pbe_eq->iopoll);
474
475			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
476			queue_tail_inc(eq);
477			eqe = queue_tail_node(eq);
478			num_eq_processed++;
479		}
480		if (num_eq_processed)
481			hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 0, 1);
482
483		return IRQ_HANDLED;
484	} else {
485		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
486						& EQE_VALID_MASK) {
487			spin_lock_irqsave(&phba->isr_lock, flags);
488			phba->todo_cq = 1;
489			spin_unlock_irqrestore(&phba->isr_lock, flags);
490			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
491			queue_tail_inc(eq);
492			eqe = queue_tail_node(eq);
493			num_eq_processed++;
494		}
495		if (phba->todo_cq)
496			queue_work(phba->wq, &phba->work_cqs);
497
498		if (num_eq_processed)
499			hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
500
501		return IRQ_HANDLED;
502	}
503}
504
505/**
506 * be_isr - The isr routine of the driver.
507 * @irq: Not used
508 * @dev_id: Pointer to host adapter structure
509 */
510static irqreturn_t be_isr(int irq, void *dev_id)
511{
512	struct beiscsi_hba *phba;
513	struct hwi_controller *phwi_ctrlr;
514	struct hwi_context_memory *phwi_context;
515	struct be_eq_entry *eqe = NULL;
516	struct be_queue_info *eq;
517	struct be_queue_info *cq;
518	struct be_queue_info *mcc;
519	unsigned long flags, index;
520	unsigned int num_mcceq_processed, num_ioeq_processed;
521	struct be_ctrl_info *ctrl;
522	struct be_eq_obj *pbe_eq;
523	int isr;
524
525	phba = dev_id;
526	ctrl = &phba->ctrl;;
527	isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
528		       (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
529	if (!isr)
530		return IRQ_NONE;
531
532	phwi_ctrlr = phba->phwi_ctrlr;
533	phwi_context = phwi_ctrlr->phwi_ctxt;
534	pbe_eq = &phwi_context->be_eq[0];
535
536	eq = &phwi_context->be_eq[0].q;
537	mcc = &phba->ctrl.mcc_obj.cq;
538	index = 0;
539	eqe = queue_tail_node(eq);
540	if (!eqe)
541		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
542
543	num_ioeq_processed = 0;
544	num_mcceq_processed = 0;
545	if (blk_iopoll_enabled) {
546		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
547					& EQE_VALID_MASK) {
548			if (((eqe->dw[offsetof(struct amap_eq_entry,
549			     resource_id) / 32] &
550			     EQE_RESID_MASK) >> 16) == mcc->id) {
551				spin_lock_irqsave(&phba->isr_lock, flags);
552				phba->todo_mcc_cq = 1;
553				spin_unlock_irqrestore(&phba->isr_lock, flags);
554				num_mcceq_processed++;
555			} else {
556				if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
557					blk_iopoll_sched(&pbe_eq->iopoll);
558				num_ioeq_processed++;
559			}
560			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
561			queue_tail_inc(eq);
562			eqe = queue_tail_node(eq);
563		}
564		if (num_ioeq_processed || num_mcceq_processed) {
565			if (phba->todo_mcc_cq)
566				queue_work(phba->wq, &phba->work_cqs);
567
568			if ((num_mcceq_processed) && (!num_ioeq_processed))
569				hwi_ring_eq_db(phba, eq->id, 0,
570					      (num_ioeq_processed +
571					       num_mcceq_processed) , 1, 1);
572			else
573				hwi_ring_eq_db(phba, eq->id, 0,
574					       (num_ioeq_processed +
575						num_mcceq_processed), 0, 1);
576
577			return IRQ_HANDLED;
578		} else
579			return IRQ_NONE;
580	} else {
581		cq = &phwi_context->be_cq[0];
582		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
583						& EQE_VALID_MASK) {
584
585			if (((eqe->dw[offsetof(struct amap_eq_entry,
586			     resource_id) / 32] &
587			     EQE_RESID_MASK) >> 16) != cq->id) {
588				spin_lock_irqsave(&phba->isr_lock, flags);
589				phba->todo_mcc_cq = 1;
590				spin_unlock_irqrestore(&phba->isr_lock, flags);
591			} else {
592				spin_lock_irqsave(&phba->isr_lock, flags);
593				phba->todo_cq = 1;
594				spin_unlock_irqrestore(&phba->isr_lock, flags);
595			}
596			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
597			queue_tail_inc(eq);
598			eqe = queue_tail_node(eq);
599			num_ioeq_processed++;
600		}
601		if (phba->todo_cq || phba->todo_mcc_cq)
602			queue_work(phba->wq, &phba->work_cqs);
603
604		if (num_ioeq_processed) {
605			hwi_ring_eq_db(phba, eq->id, 0,
606				       num_ioeq_processed, 1, 1);
607			return IRQ_HANDLED;
608		} else
609			return IRQ_NONE;
610	}
611}
612
613static int beiscsi_init_irqs(struct beiscsi_hba *phba)
614{
615	struct pci_dev *pcidev = phba->pcidev;
616	struct hwi_controller *phwi_ctrlr;
617	struct hwi_context_memory *phwi_context;
618	int ret, msix_vec, i = 0;
619	char desc[32];
620
621	phwi_ctrlr = phba->phwi_ctrlr;
622	phwi_context = phwi_ctrlr->phwi_ctxt;
623
624	if (phba->msix_enabled) {
625		for (i = 0; i < phba->num_cpus; i++) {
626			sprintf(desc, "beiscsi_msix_%04x", i);
627			msix_vec = phba->msix_entries[i].vector;
628			ret = request_irq(msix_vec, be_isr_msix, 0, desc,
629					  &phwi_context->be_eq[i]);
630		}
631		msix_vec = phba->msix_entries[i].vector;
632		ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
633				  &phwi_context->be_eq[i]);
634	} else {
635		ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
636				  "beiscsi", phba);
637		if (ret) {
638			shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
639				     "Failed to register irq\\n");
640			return ret;
641		}
642	}
643	return 0;
644}
645
646static void hwi_ring_cq_db(struct beiscsi_hba *phba,
647			   unsigned int id, unsigned int num_processed,
648			   unsigned char rearm, unsigned char event)
649{
650	u32 val = 0;
651	val |= id & DB_CQ_RING_ID_MASK;
652	if (rearm)
653		val |= 1 << DB_CQ_REARM_SHIFT;
654	val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
655	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
656}
657
658static unsigned int
659beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
660			  struct beiscsi_hba *phba,
661			  unsigned short cid,
662			  struct pdu_base *ppdu,
663			  unsigned long pdu_len,
664			  void *pbuffer, unsigned long buf_len)
665{
666	struct iscsi_conn *conn = beiscsi_conn->conn;
667	struct iscsi_session *session = conn->session;
668	struct iscsi_task *task;
669	struct beiscsi_io_task *io_task;
670	struct iscsi_hdr *login_hdr;
671
672	switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
673						PDUBASE_OPCODE_MASK) {
674	case ISCSI_OP_NOOP_IN:
675		pbuffer = NULL;
676		buf_len = 0;
677		break;
678	case ISCSI_OP_ASYNC_EVENT:
679		break;
680	case ISCSI_OP_REJECT:
681		WARN_ON(!pbuffer);
682		WARN_ON(!(buf_len == 48));
683		SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
684		break;
685	case ISCSI_OP_LOGIN_RSP:
686	case ISCSI_OP_TEXT_RSP:
687		task = conn->login_task;
688		io_task = task->dd_data;
689		login_hdr = (struct iscsi_hdr *)ppdu;
690		login_hdr->itt = io_task->libiscsi_itt;
691		break;
692	default:
693		shost_printk(KERN_WARNING, phba->shost,
694			     "Unrecognized opcode 0x%x in async msg \n",
695			     (ppdu->
696			     dw[offsetof(struct amap_pdu_base, opcode) / 32]
697						& PDUBASE_OPCODE_MASK));
698		return 1;
699	}
700
701	spin_lock_bh(&session->lock);
702	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
703	spin_unlock_bh(&session->lock);
704	return 0;
705}
706
707static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
708{
709	struct sgl_handle *psgl_handle;
710
711	if (phba->io_sgl_hndl_avbl) {
712		SE_DEBUG(DBG_LVL_8,
713			 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
714			 phba->io_sgl_alloc_index);
715		psgl_handle = phba->io_sgl_hndl_base[phba->
716						io_sgl_alloc_index];
717		phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
718		phba->io_sgl_hndl_avbl--;
719		if (phba->io_sgl_alloc_index == (phba->params.
720						 ios_per_ctrl - 1))
721			phba->io_sgl_alloc_index = 0;
722		else
723			phba->io_sgl_alloc_index++;
724	} else
725		psgl_handle = NULL;
726	return psgl_handle;
727}
728
729static void
730free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
731{
732	SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
733		 phba->io_sgl_free_index);
734	if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
735		/*
736		 * this can happen if clean_task is called on a task that
737		 * failed in xmit_task or alloc_pdu.
738		 */
739		 SE_DEBUG(DBG_LVL_8,
740			 "Double Free in IO SGL io_sgl_free_index=%d,"
741			 "value there=%p \n", phba->io_sgl_free_index,
742			 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
743		return;
744	}
745	phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
746	phba->io_sgl_hndl_avbl++;
747	if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
748		phba->io_sgl_free_index = 0;
749	else
750		phba->io_sgl_free_index++;
751}
752
753/**
754 * alloc_wrb_handle - To allocate a wrb handle
755 * @phba: The hba pointer
756 * @cid: The cid to use for allocation
757 *
758 * This happens under session_lock until submission to chip
759 */
760struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
761{
762	struct hwi_wrb_context *pwrb_context;
763	struct hwi_controller *phwi_ctrlr;
764	struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
765
766	phwi_ctrlr = phba->phwi_ctrlr;
767	pwrb_context = &phwi_ctrlr->wrb_context[cid];
768	if (pwrb_context->wrb_handles_available >= 2) {
769		pwrb_handle = pwrb_context->pwrb_handle_base[
770					    pwrb_context->alloc_index];
771		pwrb_context->wrb_handles_available--;
772		if (pwrb_context->alloc_index ==
773						(phba->params.wrbs_per_cxn - 1))
774			pwrb_context->alloc_index = 0;
775		else
776			pwrb_context->alloc_index++;
777		pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
778						pwrb_context->alloc_index];
779		pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
780	} else
781		pwrb_handle = NULL;
782	return pwrb_handle;
783}
784
785/**
786 * free_wrb_handle - To free the wrb handle back to pool
787 * @phba: The hba pointer
788 * @pwrb_context: The context to free from
789 * @pwrb_handle: The wrb_handle to free
790 *
791 * This happens under session_lock until submission to chip
792 */
793static void
794free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
795		struct wrb_handle *pwrb_handle)
796{
797	pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
798	pwrb_context->wrb_handles_available++;
799	if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
800		pwrb_context->free_index = 0;
801	else
802		pwrb_context->free_index++;
803
804	SE_DEBUG(DBG_LVL_8,
805		 "FREE WRB: pwrb_handle=%p free_index=0x%x"
806		 "wrb_handles_available=%d \n",
807		 pwrb_handle, pwrb_context->free_index,
808		 pwrb_context->wrb_handles_available);
809}
810
811static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
812{
813	struct sgl_handle *psgl_handle;
814
815	if (phba->eh_sgl_hndl_avbl) {
816		psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
817		phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
818		SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
819			 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
820		phba->eh_sgl_hndl_avbl--;
821		if (phba->eh_sgl_alloc_index ==
822		    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
823		     1))
824			phba->eh_sgl_alloc_index = 0;
825		else
826			phba->eh_sgl_alloc_index++;
827	} else
828		psgl_handle = NULL;
829	return psgl_handle;
830}
831
832void
833free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
834{
835
836	SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
837			     phba->eh_sgl_free_index);
838	if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
839		/*
840		 * this can happen if clean_task is called on a task that
841		 * failed in xmit_task or alloc_pdu.
842		 */
843		SE_DEBUG(DBG_LVL_8,
844			 "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
845			 phba->eh_sgl_free_index);
846		return;
847	}
848	phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
849	phba->eh_sgl_hndl_avbl++;
850	if (phba->eh_sgl_free_index ==
851	    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
852		phba->eh_sgl_free_index = 0;
853	else
854		phba->eh_sgl_free_index++;
855}
856
857static void
858be_complete_io(struct beiscsi_conn *beiscsi_conn,
859	       struct iscsi_task *task, struct sol_cqe *psol)
860{
861	struct beiscsi_io_task *io_task = task->dd_data;
862	struct be_status_bhs *sts_bhs =
863				(struct be_status_bhs *)io_task->cmd_bhs;
864	struct iscsi_conn *conn = beiscsi_conn->conn;
865	unsigned int sense_len;
866	unsigned char *sense;
867	u32 resid = 0, exp_cmdsn, max_cmdsn;
868	u8 rsp, status, flags;
869
870	exp_cmdsn = (psol->
871			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
872			& SOL_EXP_CMD_SN_MASK);
873	max_cmdsn = ((psol->
874			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
875			& SOL_EXP_CMD_SN_MASK) +
876			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
877				/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
878	rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
879						& SOL_RESP_MASK) >> 16);
880	status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
881						& SOL_STS_MASK) >> 8);
882	flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
883					& SOL_FLAGS_MASK) >> 24) | 0x80;
884
885	task->sc->result = (DID_OK << 16) | status;
886	if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
887		task->sc->result = DID_ERROR << 16;
888		goto unmap;
889	}
890
891	/* bidi not initially supported */
892	if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
893		resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
894				32] & SOL_RES_CNT_MASK);
895
896		if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
897			task->sc->result = DID_ERROR << 16;
898
899		if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
900			scsi_set_resid(task->sc, resid);
901			if (!status && (scsi_bufflen(task->sc) - resid <
902			    task->sc->underflow))
903				task->sc->result = DID_ERROR << 16;
904		}
905	}
906
907	if (status == SAM_STAT_CHECK_CONDITION) {
908		unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
909		sense = sts_bhs->sense_info + sizeof(unsigned short);
910		sense_len =  cpu_to_be16(*slen);
911		memcpy(task->sc->sense_buffer, sense,
912		       min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
913	}
914
915	if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
916		if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
917							& SOL_RES_CNT_MASK)
918			 conn->rxdata_octets += (psol->
919			     dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
920			     & SOL_RES_CNT_MASK);
921	}
922unmap:
923	scsi_dma_unmap(io_task->scsi_cmnd);
924	iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
925}
926
927static void
928be_complete_logout(struct beiscsi_conn *beiscsi_conn,
929		   struct iscsi_task *task, struct sol_cqe *psol)
930{
931	struct iscsi_logout_rsp *hdr;
932	struct beiscsi_io_task *io_task = task->dd_data;
933	struct iscsi_conn *conn = beiscsi_conn->conn;
934
935	hdr = (struct iscsi_logout_rsp *)task->hdr;
936	hdr->opcode = ISCSI_OP_LOGOUT_RSP;
937	hdr->t2wait = 5;
938	hdr->t2retain = 0;
939	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
940					& SOL_FLAGS_MASK) >> 24) | 0x80;
941	hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
942					32] & SOL_RESP_MASK);
943	hdr->exp_cmdsn = cpu_to_be32(psol->
944			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
945					& SOL_EXP_CMD_SN_MASK);
946	hdr->max_cmdsn = be32_to_cpu((psol->
947			 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
948					& SOL_EXP_CMD_SN_MASK) +
949			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
950					/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
951	hdr->dlength[0] = 0;
952	hdr->dlength[1] = 0;
953	hdr->dlength[2] = 0;
954	hdr->hlength = 0;
955	hdr->itt = io_task->libiscsi_itt;
956	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
957}
958
959static void
960be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
961		struct iscsi_task *task, struct sol_cqe *psol)
962{
963	struct iscsi_tm_rsp *hdr;
964	struct iscsi_conn *conn = beiscsi_conn->conn;
965	struct beiscsi_io_task *io_task = task->dd_data;
966
967	hdr = (struct iscsi_tm_rsp *)task->hdr;
968	hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
969	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
970					& SOL_FLAGS_MASK) >> 24) | 0x80;
971	hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
972					32] & SOL_RESP_MASK);
973	hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
974				    i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
975	hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
976			i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
977			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
978			/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
979	hdr->itt = io_task->libiscsi_itt;
980	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
981}
982
983static void
984hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
985		       struct beiscsi_hba *phba, struct sol_cqe *psol)
986{
987	struct hwi_wrb_context *pwrb_context;
988	struct wrb_handle *pwrb_handle = NULL;
989	struct hwi_controller *phwi_ctrlr;
990	struct iscsi_task *task;
991	struct beiscsi_io_task *io_task;
992	struct iscsi_conn *conn = beiscsi_conn->conn;
993	struct iscsi_session *session = conn->session;
994
995	phwi_ctrlr = phba->phwi_ctrlr;
996	pwrb_context = &phwi_ctrlr->wrb_context[((psol->
997				dw[offsetof(struct amap_sol_cqe, cid) / 32] &
998				SOL_CID_MASK) >> 6) -
999				phba->fw_config.iscsi_cid_start];
1000	pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1001				dw[offsetof(struct amap_sol_cqe, wrb_index) /
1002				32] & SOL_WRB_INDEX_MASK) >> 16)];
1003	task = pwrb_handle->pio_handle;
1004
1005	io_task = task->dd_data;
1006	spin_lock(&phba->mgmt_sgl_lock);
1007	free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1008	spin_unlock(&phba->mgmt_sgl_lock);
1009	spin_lock_bh(&session->lock);
1010	free_wrb_handle(phba, pwrb_context, pwrb_handle);
1011	spin_unlock_bh(&session->lock);
1012}
1013
1014static void
1015be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1016		       struct iscsi_task *task, struct sol_cqe *psol)
1017{
1018	struct iscsi_nopin *hdr;
1019	struct iscsi_conn *conn = beiscsi_conn->conn;
1020	struct beiscsi_io_task *io_task = task->dd_data;
1021
1022	hdr = (struct iscsi_nopin *)task->hdr;
1023	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1024			& SOL_FLAGS_MASK) >> 24) | 0x80;
1025	hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1026				     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1027	hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1028			i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1029			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1030			/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1031	hdr->opcode = ISCSI_OP_NOOP_IN;
1032	hdr->itt = io_task->libiscsi_itt;
1033	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1034}
1035
1036static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1037			     struct beiscsi_hba *phba, struct sol_cqe *psol)
1038{
1039	struct hwi_wrb_context *pwrb_context;
1040	struct wrb_handle *pwrb_handle;
1041	struct iscsi_wrb *pwrb = NULL;
1042	struct hwi_controller *phwi_ctrlr;
1043	struct iscsi_task *task;
1044	unsigned int type;
1045	struct iscsi_conn *conn = beiscsi_conn->conn;
1046	struct iscsi_session *session = conn->session;
1047
1048	phwi_ctrlr = phba->phwi_ctrlr;
1049	pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
1050				(struct amap_sol_cqe, cid) / 32]
1051				& SOL_CID_MASK) >> 6) -
1052				phba->fw_config.iscsi_cid_start];
1053	pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1054				dw[offsetof(struct amap_sol_cqe, wrb_index) /
1055				32] & SOL_WRB_INDEX_MASK) >> 16)];
1056	task = pwrb_handle->pio_handle;
1057	pwrb = pwrb_handle->pwrb;
1058	type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1059				 WRB_TYPE_MASK) >> 28;
1060
1061	spin_lock_bh(&session->lock);
1062	switch (type) {
1063	case HWH_TYPE_IO:
1064	case HWH_TYPE_IO_RD:
1065		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1066		     ISCSI_OP_NOOP_OUT)
1067			be_complete_nopin_resp(beiscsi_conn, task, psol);
1068		else
1069			be_complete_io(beiscsi_conn, task, psol);
1070		break;
1071
1072	case HWH_TYPE_LOGOUT:
1073		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1074			be_complete_logout(beiscsi_conn, task, psol);
1075		else
1076			be_complete_tmf(beiscsi_conn, task, psol);
1077
1078		break;
1079
1080	case HWH_TYPE_LOGIN:
1081		SE_DEBUG(DBG_LVL_1,
1082			 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
1083			 "- Solicited path \n");
1084		break;
1085
1086	case HWH_TYPE_NOP:
1087		be_complete_nopin_resp(beiscsi_conn, task, psol);
1088		break;
1089
1090	default:
1091		shost_printk(KERN_WARNING, phba->shost,
1092				"In hwi_complete_cmd, unknown type = %d"
1093				"wrb_index 0x%x CID 0x%x\n", type,
1094				((psol->dw[offsetof(struct amap_iscsi_wrb,
1095				type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1096				((psol->dw[offsetof(struct amap_sol_cqe,
1097				cid) / 32] & SOL_CID_MASK) >> 6));
1098		break;
1099	}
1100
1101	spin_unlock_bh(&session->lock);
1102}
1103
1104static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1105					  *pasync_ctx, unsigned int is_header,
1106					  unsigned int host_write_ptr)
1107{
1108	if (is_header)
1109		return &pasync_ctx->async_entry[host_write_ptr].
1110		    header_busy_list;
1111	else
1112		return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1113}
1114
1115static struct async_pdu_handle *
1116hwi_get_async_handle(struct beiscsi_hba *phba,
1117		     struct beiscsi_conn *beiscsi_conn,
1118		     struct hwi_async_pdu_context *pasync_ctx,
1119		     struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1120{
1121	struct be_bus_address phys_addr;
1122	struct list_head *pbusy_list;
1123	struct async_pdu_handle *pasync_handle = NULL;
1124	int buffer_len = 0;
1125	unsigned char buffer_index = -1;
1126	unsigned char is_header = 0;
1127
1128	phys_addr.u.a32.address_lo =
1129	    pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1130	    ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1131						& PDUCQE_DPL_MASK) >> 16);
1132	phys_addr.u.a32.address_hi =
1133	    pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1134
1135	phys_addr.u.a64.address =
1136			*((unsigned long long *)(&phys_addr.u.a64.address));
1137
1138	switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1139			& PDUCQE_CODE_MASK) {
1140	case UNSOL_HDR_NOTIFY:
1141		is_header = 1;
1142
1143		pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1144			(pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1145			index) / 32] & PDUCQE_INDEX_MASK));
1146
1147		buffer_len = (unsigned int)(phys_addr.u.a64.address -
1148				pasync_ctx->async_header.pa_base.u.a64.address);
1149
1150		buffer_index = buffer_len /
1151				pasync_ctx->async_header.buffer_size;
1152
1153		break;
1154	case UNSOL_DATA_NOTIFY:
1155		pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1156					dw[offsetof(struct amap_i_t_dpdu_cqe,
1157					index) / 32] & PDUCQE_INDEX_MASK));
1158		buffer_len = (unsigned long)(phys_addr.u.a64.address -
1159					pasync_ctx->async_data.pa_base.u.
1160					a64.address);
1161		buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1162		break;
1163	default:
1164		pbusy_list = NULL;
1165		shost_printk(KERN_WARNING, phba->shost,
1166			"Unexpected code=%d \n",
1167			 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1168					code) / 32] & PDUCQE_CODE_MASK);
1169		return NULL;
1170	}
1171
1172	WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1173	WARN_ON(list_empty(pbusy_list));
1174	list_for_each_entry(pasync_handle, pbusy_list, link) {
1175		WARN_ON(pasync_handle->consumed);
1176		if (pasync_handle->index == buffer_index)
1177			break;
1178	}
1179
1180	WARN_ON(!pasync_handle);
1181
1182	pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1183					     phba->fw_config.iscsi_cid_start;
1184	pasync_handle->is_header = is_header;
1185	pasync_handle->buffer_len = ((pdpdu_cqe->
1186			dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1187			& PDUCQE_DPL_MASK) >> 16);
1188
1189	*pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1190			index) / 32] & PDUCQE_INDEX_MASK);
1191	return pasync_handle;
1192}
1193
1194static unsigned int
1195hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1196			   unsigned int is_header, unsigned int cq_index)
1197{
1198	struct list_head *pbusy_list;
1199	struct async_pdu_handle *pasync_handle;
1200	unsigned int num_entries, writables = 0;
1201	unsigned int *pep_read_ptr, *pwritables;
1202
1203
1204	if (is_header) {
1205		pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1206		pwritables = &pasync_ctx->async_header.writables;
1207		num_entries = pasync_ctx->async_header.num_entries;
1208	} else {
1209		pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1210		pwritables = &pasync_ctx->async_data.writables;
1211		num_entries = pasync_ctx->async_data.num_entries;
1212	}
1213
1214	while ((*pep_read_ptr) != cq_index) {
1215		(*pep_read_ptr)++;
1216		*pep_read_ptr = (*pep_read_ptr) % num_entries;
1217
1218		pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1219						     *pep_read_ptr);
1220		if (writables == 0)
1221			WARN_ON(list_empty(pbusy_list));
1222
1223		if (!list_empty(pbusy_list)) {
1224			pasync_handle = list_entry(pbusy_list->next,
1225						   struct async_pdu_handle,
1226						   link);
1227			WARN_ON(!pasync_handle);
1228			pasync_handle->consumed = 1;
1229		}
1230
1231		writables++;
1232	}
1233
1234	if (!writables) {
1235		SE_DEBUG(DBG_LVL_1,
1236			 "Duplicate notification received - index 0x%x!!\n",
1237			 cq_index);
1238		WARN_ON(1);
1239	}
1240
1241	*pwritables = *pwritables + writables;
1242	return 0;
1243}
1244
1245static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1246				       unsigned int cri)
1247{
1248	struct hwi_controller *phwi_ctrlr;
1249	struct hwi_async_pdu_context *pasync_ctx;
1250	struct async_pdu_handle *pasync_handle, *tmp_handle;
1251	struct list_head *plist;
1252	unsigned int i = 0;
1253
1254	phwi_ctrlr = phba->phwi_ctrlr;
1255	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1256
1257	plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1258
1259	list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1260		list_del(&pasync_handle->link);
1261
1262		if (i == 0) {
1263			list_add_tail(&pasync_handle->link,
1264				      &pasync_ctx->async_header.free_list);
1265			pasync_ctx->async_header.free_entries++;
1266			i++;
1267		} else {
1268			list_add_tail(&pasync_handle->link,
1269				      &pasync_ctx->async_data.free_list);
1270			pasync_ctx->async_data.free_entries++;
1271			i++;
1272		}
1273	}
1274
1275	INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1276	pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1277	pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1278	return 0;
1279}
1280
1281static struct phys_addr *
1282hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1283		     unsigned int is_header, unsigned int host_write_ptr)
1284{
1285	struct phys_addr *pasync_sge = NULL;
1286
1287	if (is_header)
1288		pasync_sge = pasync_ctx->async_header.ring_base;
1289	else
1290		pasync_sge = pasync_ctx->async_data.ring_base;
1291
1292	return pasync_sge + host_write_ptr;
1293}
1294
1295static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1296				   unsigned int is_header)
1297{
1298	struct hwi_controller *phwi_ctrlr;
1299	struct hwi_async_pdu_context *pasync_ctx;
1300	struct async_pdu_handle *pasync_handle;
1301	struct list_head *pfree_link, *pbusy_list;
1302	struct phys_addr *pasync_sge;
1303	unsigned int ring_id, num_entries;
1304	unsigned int host_write_num;
1305	unsigned int writables;
1306	unsigned int i = 0;
1307	u32 doorbell = 0;
1308
1309	phwi_ctrlr = phba->phwi_ctrlr;
1310	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1311
1312	if (is_header) {
1313		num_entries = pasync_ctx->async_header.num_entries;
1314		writables = min(pasync_ctx->async_header.writables,
1315				pasync_ctx->async_header.free_entries);
1316		pfree_link = pasync_ctx->async_header.free_list.next;
1317		host_write_num = pasync_ctx->async_header.host_write_ptr;
1318		ring_id = phwi_ctrlr->default_pdu_hdr.id;
1319	} else {
1320		num_entries = pasync_ctx->async_data.num_entries;
1321		writables = min(pasync_ctx->async_data.writables,
1322				pasync_ctx->async_data.free_entries);
1323		pfree_link = pasync_ctx->async_data.free_list.next;
1324		host_write_num = pasync_ctx->async_data.host_write_ptr;
1325		ring_id = phwi_ctrlr->default_pdu_data.id;
1326	}
1327
1328	writables = (writables / 8) * 8;
1329	if (writables) {
1330		for (i = 0; i < writables; i++) {
1331			pbusy_list =
1332			    hwi_get_async_busy_list(pasync_ctx, is_header,
1333						    host_write_num);
1334			pasync_handle =
1335			    list_entry(pfree_link, struct async_pdu_handle,
1336								link);
1337			WARN_ON(!pasync_handle);
1338			pasync_handle->consumed = 0;
1339
1340			pfree_link = pfree_link->next;
1341
1342			pasync_sge = hwi_get_ring_address(pasync_ctx,
1343						is_header, host_write_num);
1344
1345			pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1346			pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1347
1348			list_move(&pasync_handle->link, pbusy_list);
1349
1350			host_write_num++;
1351			host_write_num = host_write_num % num_entries;
1352		}
1353
1354		if (is_header) {
1355			pasync_ctx->async_header.host_write_ptr =
1356							host_write_num;
1357			pasync_ctx->async_header.free_entries -= writables;
1358			pasync_ctx->async_header.writables -= writables;
1359			pasync_ctx->async_header.busy_entries += writables;
1360		} else {
1361			pasync_ctx->async_data.host_write_ptr = host_write_num;
1362			pasync_ctx->async_data.free_entries -= writables;
1363			pasync_ctx->async_data.writables -= writables;
1364			pasync_ctx->async_data.busy_entries += writables;
1365		}
1366
1367		doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1368		doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1369		doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1370		doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1371					<< DB_DEF_PDU_CQPROC_SHIFT;
1372
1373		iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1374	}
1375}
1376
1377static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1378					 struct beiscsi_conn *beiscsi_conn,
1379					 struct i_t_dpdu_cqe *pdpdu_cqe)
1380{
1381	struct hwi_controller *phwi_ctrlr;
1382	struct hwi_async_pdu_context *pasync_ctx;
1383	struct async_pdu_handle *pasync_handle = NULL;
1384	unsigned int cq_index = -1;
1385
1386	phwi_ctrlr = phba->phwi_ctrlr;
1387	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1388
1389	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1390					     pdpdu_cqe, &cq_index);
1391	BUG_ON(pasync_handle->is_header != 0);
1392	if (pasync_handle->consumed == 0)
1393		hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1394					   cq_index);
1395
1396	hwi_free_async_msg(phba, pasync_handle->cri);
1397	hwi_post_async_buffers(phba, pasync_handle->is_header);
1398}
1399
1400static unsigned int
1401hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1402		  struct beiscsi_hba *phba,
1403		  struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1404{
1405	struct list_head *plist;
1406	struct async_pdu_handle *pasync_handle;
1407	void *phdr = NULL;
1408	unsigned int hdr_len = 0, buf_len = 0;
1409	unsigned int status, index = 0, offset = 0;
1410	void *pfirst_buffer = NULL;
1411	unsigned int num_buf = 0;
1412
1413	plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1414
1415	list_for_each_entry(pasync_handle, plist, link) {
1416		if (index == 0) {
1417			phdr = pasync_handle->pbuffer;
1418			hdr_len = pasync_handle->buffer_len;
1419		} else {
1420			buf_len = pasync_handle->buffer_len;
1421			if (!num_buf) {
1422				pfirst_buffer = pasync_handle->pbuffer;
1423				num_buf++;
1424			}
1425			memcpy(pfirst_buffer + offset,
1426			       pasync_handle->pbuffer, buf_len);
1427			offset = buf_len;
1428		}
1429		index++;
1430	}
1431
1432	status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1433					   (beiscsi_conn->beiscsi_conn_cid -
1434					    phba->fw_config.iscsi_cid_start),
1435					    phdr, hdr_len, pfirst_buffer,
1436					    buf_len);
1437
1438	if (status == 0)
1439		hwi_free_async_msg(phba, cri);
1440	return 0;
1441}
1442
1443static unsigned int
1444hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1445		     struct beiscsi_hba *phba,
1446		     struct async_pdu_handle *pasync_handle)
1447{
1448	struct hwi_async_pdu_context *pasync_ctx;
1449	struct hwi_controller *phwi_ctrlr;
1450	unsigned int bytes_needed = 0, status = 0;
1451	unsigned short cri = pasync_handle->cri;
1452	struct pdu_base *ppdu;
1453
1454	phwi_ctrlr = phba->phwi_ctrlr;
1455	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1456
1457	list_del(&pasync_handle->link);
1458	if (pasync_handle->is_header) {
1459		pasync_ctx->async_header.busy_entries--;
1460		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1461			hwi_free_async_msg(phba, cri);
1462			BUG();
1463		}
1464
1465		pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1466		pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1467		pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1468				(unsigned short)pasync_handle->buffer_len;
1469		list_add_tail(&pasync_handle->link,
1470			      &pasync_ctx->async_entry[cri].wait_queue.list);
1471
1472		ppdu = pasync_handle->pbuffer;
1473		bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1474			data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1475			0xFFFF0000) | ((be16_to_cpu((ppdu->
1476			dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1477			& PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1478
1479		if (status == 0) {
1480			pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1481			    bytes_needed;
1482
1483			if (bytes_needed == 0)
1484				status = hwi_fwd_async_msg(beiscsi_conn, phba,
1485							   pasync_ctx, cri);
1486		}
1487	} else {
1488		pasync_ctx->async_data.busy_entries--;
1489		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1490			list_add_tail(&pasync_handle->link,
1491				      &pasync_ctx->async_entry[cri].wait_queue.
1492				      list);
1493			pasync_ctx->async_entry[cri].wait_queue.
1494				bytes_received +=
1495				(unsigned short)pasync_handle->buffer_len;
1496
1497			if (pasync_ctx->async_entry[cri].wait_queue.
1498			    bytes_received >=
1499			    pasync_ctx->async_entry[cri].wait_queue.
1500			    bytes_needed)
1501				status = hwi_fwd_async_msg(beiscsi_conn, phba,
1502							   pasync_ctx, cri);
1503		}
1504	}
1505	return status;
1506}
1507
1508static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1509					 struct beiscsi_hba *phba,
1510					 struct i_t_dpdu_cqe *pdpdu_cqe)
1511{
1512	struct hwi_controller *phwi_ctrlr;
1513	struct hwi_async_pdu_context *pasync_ctx;
1514	struct async_pdu_handle *pasync_handle = NULL;
1515	unsigned int cq_index = -1;
1516
1517	phwi_ctrlr = phba->phwi_ctrlr;
1518	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1519	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1520					     pdpdu_cqe, &cq_index);
1521
1522	if (pasync_handle->consumed == 0)
1523		hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1524					   cq_index);
1525	hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1526	hwi_post_async_buffers(phba, pasync_handle->is_header);
1527}
1528
1529static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1530{
1531	struct be_queue_info *mcc_cq;
1532	struct  be_mcc_compl *mcc_compl;
1533	unsigned int num_processed = 0;
1534
1535	mcc_cq = &phba->ctrl.mcc_obj.cq;
1536	mcc_compl = queue_tail_node(mcc_cq);
1537	mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1538	while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1539
1540		if (num_processed >= 32) {
1541			hwi_ring_cq_db(phba, mcc_cq->id,
1542					num_processed, 0, 0);
1543			num_processed = 0;
1544		}
1545		if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1546			/* Interpret flags as an async trailer */
1547			if (is_link_state_evt(mcc_compl->flags))
1548				/* Interpret compl as a async link evt */
1549				beiscsi_async_link_state_process(phba,
1550				(struct be_async_event_link_state *) mcc_compl);
1551			else
1552				SE_DEBUG(DBG_LVL_1,
1553					" Unsupported Async Event, flags"
1554					" = 0x%08x \n", mcc_compl->flags);
1555		} else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1556			be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1557			atomic_dec(&phba->ctrl.mcc_obj.q.used);
1558		}
1559
1560		mcc_compl->flags = 0;
1561		queue_tail_inc(mcc_cq);
1562		mcc_compl = queue_tail_node(mcc_cq);
1563		mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1564		num_processed++;
1565	}
1566
1567	if (num_processed > 0)
1568		hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1569
1570}
1571
1572static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1573{
1574	struct be_queue_info *cq;
1575	struct sol_cqe *sol;
1576	struct dmsg_cqe *dmsg;
1577	unsigned int num_processed = 0;
1578	unsigned int tot_nump = 0;
1579	struct beiscsi_conn *beiscsi_conn;
1580	struct beiscsi_endpoint *beiscsi_ep;
1581	struct iscsi_endpoint *ep;
1582	struct beiscsi_hba *phba;
1583
1584	cq = pbe_eq->cq;
1585	sol = queue_tail_node(cq);
1586	phba = pbe_eq->phba;
1587
1588	while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1589	       CQE_VALID_MASK) {
1590		be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1591
1592		ep = phba->ep_array[(u32) ((sol->
1593				   dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1594				   SOL_CID_MASK) >> 6) -
1595				   phba->fw_config.iscsi_cid_start];
1596
1597		beiscsi_ep = ep->dd_data;
1598		beiscsi_conn = beiscsi_ep->conn;
1599
1600		if (num_processed >= 32) {
1601			hwi_ring_cq_db(phba, cq->id,
1602					num_processed, 0, 0);
1603			tot_nump += num_processed;
1604			num_processed = 0;
1605		}
1606
1607		switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1608			32] & CQE_CODE_MASK) {
1609		case SOL_CMD_COMPLETE:
1610			hwi_complete_cmd(beiscsi_conn, phba, sol);
1611			break;
1612		case DRIVERMSG_NOTIFY:
1613			SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
1614			dmsg = (struct dmsg_cqe *)sol;
1615			hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1616			break;
1617		case UNSOL_HDR_NOTIFY:
1618			SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1619			hwi_process_default_pdu_ring(beiscsi_conn, phba,
1620					     (struct i_t_dpdu_cqe *)sol);
1621			break;
1622		case UNSOL_DATA_NOTIFY:
1623			SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1624			hwi_process_default_pdu_ring(beiscsi_conn, phba,
1625					     (struct i_t_dpdu_cqe *)sol);
1626			break;
1627		case CXN_INVALIDATE_INDEX_NOTIFY:
1628		case CMD_INVALIDATED_NOTIFY:
1629		case CXN_INVALIDATE_NOTIFY:
1630			SE_DEBUG(DBG_LVL_1,
1631				 "Ignoring CQ Error notification for cmd/cxn"
1632				 "invalidate\n");
1633			break;
1634		case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1635		case CMD_KILLED_INVALID_STATSN_RCVD:
1636		case CMD_KILLED_INVALID_R2T_RCVD:
1637		case CMD_CXN_KILLED_LUN_INVALID:
1638		case CMD_CXN_KILLED_ICD_INVALID:
1639		case CMD_CXN_KILLED_ITT_INVALID:
1640		case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1641		case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1642			SE_DEBUG(DBG_LVL_1,
1643				 "CQ Error notification for cmd.. "
1644				 "code %d cid 0x%x\n",
1645				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1646				 32] & CQE_CODE_MASK,
1647				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1648				 32] & SOL_CID_MASK));
1649			break;
1650		case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1651			SE_DEBUG(DBG_LVL_1,
1652				 "Digest error on def pdu ring, dropping..\n");
1653			hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1654					     (struct i_t_dpdu_cqe *) sol);
1655			break;
1656		case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1657		case CXN_KILLED_BURST_LEN_MISMATCH:
1658		case CXN_KILLED_AHS_RCVD:
1659		case CXN_KILLED_HDR_DIGEST_ERR:
1660		case CXN_KILLED_UNKNOWN_HDR:
1661		case CXN_KILLED_STALE_ITT_TTT_RCVD:
1662		case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1663		case CXN_KILLED_TIMED_OUT:
1664		case CXN_KILLED_FIN_RCVD:
1665		case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1666		case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1667		case CXN_KILLED_OVER_RUN_RESIDUAL:
1668		case CXN_KILLED_UNDER_RUN_RESIDUAL:
1669		case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1670			SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1671				 "0x%x...\n",
1672				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1673				 32] & CQE_CODE_MASK,
1674				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1675				 32] & CQE_CID_MASK));
1676			iscsi_conn_failure(beiscsi_conn->conn,
1677					   ISCSI_ERR_CONN_FAILED);
1678			break;
1679		case CXN_KILLED_RST_SENT:
1680		case CXN_KILLED_RST_RCVD:
1681			SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1682				"received/sent on CID 0x%x...\n",
1683				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1684				 32] & CQE_CODE_MASK,
1685				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1686				 32] & CQE_CID_MASK));
1687			iscsi_conn_failure(beiscsi_conn->conn,
1688					   ISCSI_ERR_CONN_FAILED);
1689			break;
1690		default:
1691			SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1692				 "received on CID 0x%x...\n",
1693				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1694				 32] & CQE_CODE_MASK,
1695				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1696				 32] & CQE_CID_MASK));
1697			break;
1698		}
1699
1700		AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1701		queue_tail_inc(cq);
1702		sol = queue_tail_node(cq);
1703		num_processed++;
1704	}
1705
1706	if (num_processed > 0) {
1707		tot_nump += num_processed;
1708		hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1709	}
1710	return tot_nump;
1711}
1712
1713void beiscsi_process_all_cqs(struct work_struct *work)
1714{
1715	unsigned long flags;
1716	struct hwi_controller *phwi_ctrlr;
1717	struct hwi_context_memory *phwi_context;
1718	struct be_eq_obj *pbe_eq;
1719	struct beiscsi_hba *phba =
1720	    container_of(work, struct beiscsi_hba, work_cqs);
1721
1722	phwi_ctrlr = phba->phwi_ctrlr;
1723	phwi_context = phwi_ctrlr->phwi_ctxt;
1724	if (phba->msix_enabled)
1725		pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1726	else
1727		pbe_eq = &phwi_context->be_eq[0];
1728
1729	if (phba->todo_mcc_cq) {
1730		spin_lock_irqsave(&phba->isr_lock, flags);
1731		phba->todo_mcc_cq = 0;
1732		spin_unlock_irqrestore(&phba->isr_lock, flags);
1733		beiscsi_process_mcc_isr(phba);
1734	}
1735
1736	if (phba->todo_cq) {
1737		spin_lock_irqsave(&phba->isr_lock, flags);
1738		phba->todo_cq = 0;
1739		spin_unlock_irqrestore(&phba->isr_lock, flags);
1740		beiscsi_process_cq(pbe_eq);
1741	}
1742}
1743
1744static int be_iopoll(struct blk_iopoll *iop, int budget)
1745{
1746	static unsigned int ret;
1747	struct beiscsi_hba *phba;
1748	struct be_eq_obj *pbe_eq;
1749
1750	pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1751	ret = beiscsi_process_cq(pbe_eq);
1752	if (ret < budget) {
1753		phba = pbe_eq->phba;
1754		blk_iopoll_complete(iop);
1755		SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1756		hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1757	}
1758	return ret;
1759}
1760
1761static void
1762hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1763	      unsigned int num_sg, struct beiscsi_io_task *io_task)
1764{
1765	struct iscsi_sge *psgl;
1766	unsigned short sg_len, index;
1767	unsigned int sge_len = 0;
1768	unsigned long long addr;
1769	struct scatterlist *l_sg;
1770	unsigned int offset;
1771
1772	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1773				      io_task->bhs_pa.u.a32.address_lo);
1774	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1775				      io_task->bhs_pa.u.a32.address_hi);
1776
1777	l_sg = sg;
1778	for (index = 0; (index < num_sg) && (index < 2); index++,
1779							 sg = sg_next(sg)) {
1780		if (index == 0) {
1781			sg_len = sg_dma_len(sg);
1782			addr = (u64) sg_dma_address(sg);
1783			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1784							(addr & 0xFFFFFFFF));
1785			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1786							(addr >> 32));
1787			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1788							sg_len);
1789			sge_len = sg_len;
1790		} else {
1791			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1792							pwrb, sge_len);
1793			sg_len = sg_dma_len(sg);
1794			addr = (u64) sg_dma_address(sg);
1795			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1796							(addr & 0xFFFFFFFF));
1797			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1798							(addr >> 32));
1799			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1800							sg_len);
1801		}
1802	}
1803	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1804	memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1805
1806	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1807
1808	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1809			io_task->bhs_pa.u.a32.address_hi);
1810	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1811			io_task->bhs_pa.u.a32.address_lo);
1812
1813	if (num_sg == 1) {
1814		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1815								1);
1816		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1817								0);
1818	} else if (num_sg == 2) {
1819		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1820								0);
1821		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1822								1);
1823	} else {
1824		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1825								0);
1826		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1827								0);
1828	}
1829	sg = l_sg;
1830	psgl++;
1831	psgl++;
1832	offset = 0;
1833	for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
1834		sg_len = sg_dma_len(sg);
1835		addr = (u64) sg_dma_address(sg);
1836		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1837						(addr & 0xFFFFFFFF));
1838		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1839						(addr >> 32));
1840		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1841		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1842		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1843		offset += sg_len;
1844	}
1845	psgl--;
1846	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1847}
1848
1849static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1850{
1851	struct iscsi_sge *psgl;
1852	unsigned long long addr;
1853	struct beiscsi_io_task *io_task = task->dd_data;
1854	struct beiscsi_conn *beiscsi_conn = io_task->conn;
1855	struct beiscsi_hba *phba = beiscsi_conn->phba;
1856
1857	io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1858	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1859				io_task->bhs_pa.u.a32.address_lo);
1860	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1861				io_task->bhs_pa.u.a32.address_hi);
1862
1863	if (task->data) {
1864		if (task->data_count) {
1865			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1866			addr = (u64) pci_map_single(phba->pcidev,
1867						    task->data,
1868						    task->data_count, 1);
1869		} else {
1870			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1871			addr = 0;
1872		}
1873		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1874						(addr & 0xFFFFFFFF));
1875		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1876						(addr >> 32));
1877		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1878						task->data_count);
1879
1880		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1881	} else {
1882		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1883		addr = 0;
1884	}
1885
1886	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1887
1888	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1889
1890	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1891		      io_task->bhs_pa.u.a32.address_hi);
1892	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1893		      io_task->bhs_pa.u.a32.address_lo);
1894	if (task->data) {
1895		psgl++;
1896		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1897		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1898		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1899		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1900		AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1901		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1902
1903		psgl++;
1904		if (task->data) {
1905			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1906						(addr & 0xFFFFFFFF));
1907			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1908						(addr >> 32));
1909		}
1910		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1911	}
1912	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1913}
1914
1915static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1916{
1917	unsigned int num_cq_pages, num_async_pdu_buf_pages;
1918	unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1919	unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1920
1921	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1922				      sizeof(struct sol_cqe));
1923	num_async_pdu_buf_pages =
1924			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1925				       phba->params.defpdu_hdr_sz);
1926	num_async_pdu_buf_sgl_pages =
1927			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1928				       sizeof(struct phys_addr));
1929	num_async_pdu_data_pages =
1930			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1931				       phba->params.defpdu_data_sz);
1932	num_async_pdu_data_sgl_pages =
1933			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1934				       sizeof(struct phys_addr));
1935
1936	phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1937
1938	phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1939						 BE_ISCSI_PDU_HEADER_SIZE;
1940	phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1941					    sizeof(struct hwi_context_memory);
1942
1943
1944	phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1945	    * (phba->params.wrbs_per_cxn)
1946	    * phba->params.cxns_per_ctrl;
1947	wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
1948				 (phba->params.wrbs_per_cxn);
1949	phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1950				phba->params.cxns_per_ctrl);
1951
1952	phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1953		phba->params.icds_per_ctrl;
1954	phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1955		phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1956
1957	phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1958		num_async_pdu_buf_pages * PAGE_SIZE;
1959	phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1960		num_async_pdu_data_pages * PAGE_SIZE;
1961	phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1962		num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1963	phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1964		num_async_pdu_data_sgl_pages * PAGE_SIZE;
1965	phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1966		phba->params.asyncpdus_per_ctrl *
1967		sizeof(struct async_pdu_handle);
1968	phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1969		phba->params.asyncpdus_per_ctrl *
1970		sizeof(struct async_pdu_handle);
1971	phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1972		sizeof(struct hwi_async_pdu_context) +
1973		(phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1974}
1975
1976static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1977{
1978	struct be_mem_descriptor *mem_descr;
1979	dma_addr_t bus_add;
1980	struct mem_array *mem_arr, *mem_arr_orig;
1981	unsigned int i, j, alloc_size, curr_alloc_size;
1982
1983	phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
1984	if (!phba->phwi_ctrlr)
1985		return -ENOMEM;
1986
1987	phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
1988				 GFP_KERNEL);
1989	if (!phba->init_mem) {
1990		kfree(phba->phwi_ctrlr);
1991		return -ENOMEM;
1992	}
1993
1994	mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
1995			       GFP_KERNEL);
1996	if (!mem_arr_orig) {
1997		kfree(phba->init_mem);
1998		kfree(phba->phwi_ctrlr);
1999		return -ENOMEM;
2000	}
2001
2002	mem_descr = phba->init_mem;
2003	for (i = 0; i < SE_MEM_MAX; i++) {
2004		j = 0;
2005		mem_arr = mem_arr_orig;
2006		alloc_size = phba->mem_req[i];
2007		memset(mem_arr, 0, sizeof(struct mem_array) *
2008		       BEISCSI_MAX_FRAGS_INIT);
2009		curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2010		do {
2011			mem_arr->virtual_address = pci_alloc_consistent(
2012							phba->pcidev,
2013							curr_alloc_size,
2014							&bus_add);
2015			if (!mem_arr->virtual_address) {
2016				if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2017					goto free_mem;
2018				if (curr_alloc_size -
2019					rounddown_pow_of_two(curr_alloc_size))
2020					curr_alloc_size = rounddown_pow_of_two
2021							     (curr_alloc_size);
2022				else
2023					curr_alloc_size = curr_alloc_size / 2;
2024			} else {
2025				mem_arr->bus_address.u.
2026				    a64.address = (__u64) bus_add;
2027				mem_arr->size = curr_alloc_size;
2028				alloc_size -= curr_alloc_size;
2029				curr_alloc_size = min(be_max_phys_size *
2030						      1024, alloc_size);
2031				j++;
2032				mem_arr++;
2033			}
2034		} while (alloc_size);
2035		mem_descr->num_elements = j;
2036		mem_descr->size_in_bytes = phba->mem_req[i];
2037		mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2038					       GFP_KERNEL);
2039		if (!mem_descr->mem_array)
2040			goto free_mem;
2041
2042		memcpy(mem_descr->mem_array, mem_arr_orig,
2043		       sizeof(struct mem_array) * j);
2044		mem_descr++;
2045	}
2046	kfree(mem_arr_orig);
2047	return 0;
2048free_mem:
2049	mem_descr->num_elements = j;
2050	while ((i) || (j)) {
2051		for (j = mem_descr->num_elements; j > 0; j--) {
2052			pci_free_consistent(phba->pcidev,
2053					    mem_descr->mem_array[j - 1].size,
2054					    mem_descr->mem_array[j - 1].
2055					    virtual_address,
2056					    mem_descr->mem_array[j - 1].
2057					    bus_address.u.a64.address);
2058		}
2059		if (i) {
2060			i--;
2061			kfree(mem_descr->mem_array);
2062			mem_descr--;
2063		}
2064	}
2065	kfree(mem_arr_orig);
2066	kfree(phba->init_mem);
2067	kfree(phba->phwi_ctrlr);
2068	return -ENOMEM;
2069}
2070
2071static int beiscsi_get_memory(struct beiscsi_hba *phba)
2072{
2073	beiscsi_find_mem_req(phba);
2074	return beiscsi_alloc_mem(phba);
2075}
2076
2077static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2078{
2079	struct pdu_data_out *pdata_out;
2080	struct pdu_nop_out *pnop_out;
2081	struct be_mem_descriptor *mem_descr;
2082
2083	mem_descr = phba->init_mem;
2084	mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2085	pdata_out =
2086	    (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2087	memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2088
2089	AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2090		      IIOC_SCSI_DATA);
2091
2092	pnop_out =
2093	    (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2094				   virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2095
2096	memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2097	AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2098	AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2099	AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2100}
2101
2102static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2103{
2104	struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2105	struct wrb_handle *pwrb_handle;
2106	struct hwi_controller *phwi_ctrlr;
2107	struct hwi_wrb_context *pwrb_context;
2108	struct iscsi_wrb *pwrb;
2109	unsigned int num_cxn_wrbh;
2110	unsigned int num_cxn_wrb, j, idx, index;
2111
2112	mem_descr_wrbh = phba->init_mem;
2113	mem_descr_wrbh += HWI_MEM_WRBH;
2114
2115	mem_descr_wrb = phba->init_mem;
2116	mem_descr_wrb += HWI_MEM_WRB;
2117
2118	idx = 0;
2119	pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2120	num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2121			((sizeof(struct wrb_handle)) *
2122			 phba->params.wrbs_per_cxn));
2123	phwi_ctrlr = phba->phwi_ctrlr;
2124
2125	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2126		pwrb_context = &phwi_ctrlr->wrb_context[index];
2127		pwrb_context->pwrb_handle_base =
2128				kzalloc(sizeof(struct wrb_handle *) *
2129					phba->params.wrbs_per_cxn, GFP_KERNEL);
2130		pwrb_context->pwrb_handle_basestd =
2131				kzalloc(sizeof(struct wrb_handle *) *
2132					phba->params.wrbs_per_cxn, GFP_KERNEL);
2133		if (num_cxn_wrbh) {
2134			pwrb_context->alloc_index = 0;
2135			pwrb_context->wrb_handles_available = 0;
2136			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2137				pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2138				pwrb_context->pwrb_handle_basestd[j] =
2139								pwrb_handle;
2140				pwrb_context->wrb_handles_available++;
2141				pwrb_handle->wrb_index = j;
2142				pwrb_handle++;
2143			}
2144			pwrb_context->free_index = 0;
2145			num_cxn_wrbh--;
2146		} else {
2147			idx++;
2148			pwrb_handle =
2149			    mem_descr_wrbh->mem_array[idx].virtual_address;
2150			num_cxn_wrbh =
2151			    ((mem_descr_wrbh->mem_array[idx].size) /
2152			     ((sizeof(struct wrb_handle)) *
2153			      phba->params.wrbs_per_cxn));
2154			pwrb_context->alloc_index = 0;
2155			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2156				pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2157				pwrb_context->pwrb_handle_basestd[j] =
2158				    pwrb_handle;
2159				pwrb_context->wrb_handles_available++;
2160				pwrb_handle->wrb_index = j;
2161				pwrb_handle++;
2162			}
2163			pwrb_context->free_index = 0;
2164			num_cxn_wrbh--;
2165		}
2166	}
2167	idx = 0;
2168	pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2169	num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2170		      ((sizeof(struct iscsi_wrb) *
2171			phba->params.wrbs_per_cxn));
2172	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2173		pwrb_context = &phwi_ctrlr->wrb_context[index];
2174		if (num_cxn_wrb) {
2175			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2176				pwrb_handle = pwrb_context->pwrb_handle_base[j];
2177				pwrb_handle->pwrb = pwrb;
2178				pwrb++;
2179			}
2180			num_cxn_wrb--;
2181		} else {
2182			idx++;
2183			pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2184			num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2185				      ((sizeof(struct iscsi_wrb) *
2186					phba->params.wrbs_per_cxn));
2187			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2188				pwrb_handle = pwrb_context->pwrb_handle_base[j];
2189				pwrb_handle->pwrb = pwrb;
2190				pwrb++;
2191			}
2192			num_cxn_wrb--;
2193		}
2194	}
2195}
2196
2197static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2198{
2199	struct hwi_controller *phwi_ctrlr;
2200	struct hba_parameters *p = &phba->params;
2201	struct hwi_async_pdu_context *pasync_ctx;
2202	struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2203	unsigned int index;
2204	struct be_mem_descriptor *mem_descr;
2205
2206	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2207	mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2208
2209	phwi_ctrlr = phba->phwi_ctrlr;
2210	phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2211				mem_descr->mem_array[0].virtual_address;
2212	pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2213	memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2214
2215	pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2216	pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2217	pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2218	pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2219
2220	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2221	mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2222	if (mem_descr->mem_array[0].virtual_address) {
2223		SE_DEBUG(DBG_LVL_8,
2224			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2225			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2226	} else
2227		shost_printk(KERN_WARNING, phba->shost,
2228			     "No Virtual address \n");
2229
2230	pasync_ctx->async_header.va_base =
2231			mem_descr->mem_array[0].virtual_address;
2232
2233	pasync_ctx->async_header.pa_base.u.a64.address =
2234			mem_descr->mem_array[0].bus_address.u.a64.address;
2235
2236	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2237	mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2238	if (mem_descr->mem_array[0].virtual_address) {
2239		SE_DEBUG(DBG_LVL_8,
2240			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2241			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2242	} else
2243		shost_printk(KERN_WARNING, phba->shost,
2244			    "No Virtual address \n");
2245	pasync_ctx->async_header.ring_base =
2246			mem_descr->mem_array[0].virtual_address;
2247
2248	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2249	mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2250	if (mem_descr->mem_array[0].virtual_address) {
2251		SE_DEBUG(DBG_LVL_8,
2252			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2253			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2254	} else
2255		shost_printk(KERN_WARNING, phba->shost,
2256			    "No Virtual address \n");
2257
2258	pasync_ctx->async_header.handle_base =
2259			mem_descr->mem_array[0].virtual_address;
2260	pasync_ctx->async_header.writables = 0;
2261	INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2262
2263	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2264	mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2265	if (mem_descr->mem_array[0].virtual_address) {
2266		SE_DEBUG(DBG_LVL_8,
2267			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2268			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2269	} else
2270		shost_printk(KERN_WARNING, phba->shost,
2271			    "No Virtual address \n");
2272	pasync_ctx->async_data.va_base =
2273			mem_descr->mem_array[0].virtual_address;
2274	pasync_ctx->async_data.pa_base.u.a64.address =
2275			mem_descr->mem_array[0].bus_address.u.a64.address;
2276
2277	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2278	mem_descr += HWI_MEM_ASYNC_DATA_RING;
2279	if (mem_descr->mem_array[0].virtual_address) {
2280		SE_DEBUG(DBG_LVL_8,
2281			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2282			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2283	} else
2284		shost_printk(KERN_WARNING, phba->shost,
2285			     "No Virtual address \n");
2286
2287	pasync_ctx->async_data.ring_base =
2288			mem_descr->mem_array[0].virtual_address;
2289
2290	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2291	mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2292	if (!mem_descr->mem_array[0].virtual_address)
2293		shost_printk(KERN_WARNING, phba->shost,
2294			    "No Virtual address \n");
2295
2296	pasync_ctx->async_data.handle_base =
2297			mem_descr->mem_array[0].virtual_address;
2298	pasync_ctx->async_data.writables = 0;
2299	INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2300
2301	pasync_header_h =
2302		(struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2303	pasync_data_h =
2304		(struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2305
2306	for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2307		pasync_header_h->cri = -1;
2308		pasync_header_h->index = (char)index;
2309		INIT_LIST_HEAD(&pasync_header_h->link);
2310		pasync_header_h->pbuffer =
2311			(void *)((unsigned long)
2312			(pasync_ctx->async_header.va_base) +
2313			(p->defpdu_hdr_sz * index));
2314
2315		pasync_header_h->pa.u.a64.address =
2316			pasync_ctx->async_header.pa_base.u.a64.address +
2317			(p->defpdu_hdr_sz * index);
2318
2319		list_add_tail(&pasync_header_h->link,
2320				&pasync_ctx->async_header.free_list);
2321		pasync_header_h++;
2322		pasync_ctx->async_header.free_entries++;
2323		pasync_ctx->async_header.writables++;
2324
2325		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2326		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2327			       header_busy_list);
2328		pasync_data_h->cri = -1;
2329		pasync_data_h->index = (char)index;
2330		INIT_LIST_HEAD(&pasync_data_h->link);
2331		pasync_data_h->pbuffer =
2332			(void *)((unsigned long)
2333			(pasync_ctx->async_data.va_base) +
2334			(p->defpdu_data_sz * index));
2335
2336		pasync_data_h->pa.u.a64.address =
2337		    pasync_ctx->async_data.pa_base.u.a64.address +
2338		    (p->defpdu_data_sz * index);
2339
2340		list_add_tail(&pasync_data_h->link,
2341			      &pasync_ctx->async_data.free_list);
2342		pasync_data_h++;
2343		pasync_ctx->async_data.free_entries++;
2344		pasync_ctx->async_data.writables++;
2345
2346		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2347	}
2348
2349	pasync_ctx->async_header.host_write_ptr = 0;
2350	pasync_ctx->async_header.ep_read_ptr = -1;
2351	pasync_ctx->async_data.host_write_ptr = 0;
2352	pasync_ctx->async_data.ep_read_ptr = -1;
2353}
2354
2355static int
2356be_sgl_create_contiguous(void *virtual_address,
2357			 u64 physical_address, u32 length,
2358			 struct be_dma_mem *sgl)
2359{
2360	WARN_ON(!virtual_address);
2361	WARN_ON(!physical_address);
2362	WARN_ON(!length > 0);
2363	WARN_ON(!sgl);
2364
2365	sgl->va = virtual_address;
2366	sgl->dma = physical_address;
2367	sgl->size = length;
2368
2369	return 0;
2370}
2371
2372static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2373{
2374	memset(sgl, 0, sizeof(*sgl));
2375}
2376
2377static void
2378hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2379		     struct mem_array *pmem, struct be_dma_mem *sgl)
2380{
2381	if (sgl->va)
2382		be_sgl_destroy_contiguous(sgl);
2383
2384	be_sgl_create_contiguous(pmem->virtual_address,
2385				 pmem->bus_address.u.a64.address,
2386				 pmem->size, sgl);
2387}
2388
2389static void
2390hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2391			   struct mem_array *pmem, struct be_dma_mem *sgl)
2392{
2393	if (sgl->va)
2394		be_sgl_destroy_contiguous(sgl);
2395
2396	be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2397				 pmem->bus_address.u.a64.address,
2398				 pmem->size, sgl);
2399}
2400
2401static int be_fill_queue(struct be_queue_info *q,
2402		u16 len, u16 entry_size, void *vaddress)
2403{
2404	struct be_dma_mem *mem = &q->dma_mem;
2405
2406	memset(q, 0, sizeof(*q));
2407	q->len = len;
2408	q->entry_size = entry_size;
2409	mem->size = len * entry_size;
2410	mem->va = vaddress;
2411	if (!mem->va)
2412		return -ENOMEM;
2413	memset(mem->va, 0, mem->size);
2414	return 0;
2415}
2416
2417static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2418			     struct hwi_context_memory *phwi_context)
2419{
2420	unsigned int i, num_eq_pages;
2421	int ret, eq_for_mcc;
2422	struct be_queue_info *eq;
2423	struct be_dma_mem *mem;
2424	void *eq_vaddress;
2425	dma_addr_t paddr;
2426
2427	num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2428				      sizeof(struct be_eq_entry));
2429
2430	if (phba->msix_enabled)
2431		eq_for_mcc = 1;
2432	else
2433		eq_for_mcc = 0;
2434	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2435		eq = &phwi_context->be_eq[i].q;
2436		mem = &eq->dma_mem;
2437		phwi_context->be_eq[i].phba = phba;
2438		eq_vaddress = pci_alloc_consistent(phba->pcidev,
2439						     num_eq_pages * PAGE_SIZE,
2440						     &paddr);
2441		if (!eq_vaddress)
2442			goto create_eq_error;
2443
2444		mem->va = eq_vaddress;
2445		ret = be_fill_queue(eq, phba->params.num_eq_entries,
2446				    sizeof(struct be_eq_entry), eq_vaddress);
2447		if (ret) {
2448			shost_printk(KERN_ERR, phba->shost,
2449				     "be_fill_queue Failed for EQ \n");
2450			goto create_eq_error;
2451		}
2452
2453		mem->dma = paddr;
2454		ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2455					    phwi_context->cur_eqd);
2456		if (ret) {
2457			shost_printk(KERN_ERR, phba->shost,
2458				     "beiscsi_cmd_eq_create"
2459				     "Failedfor EQ \n");
2460			goto create_eq_error;
2461		}
2462		SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2463	}
2464	return 0;
2465create_eq_error:
2466	for (i = 0; i < (phba->num_cpus + 1); i++) {
2467		eq = &phwi_context->be_eq[i].q;
2468		mem = &eq->dma_mem;
2469		if (mem->va)
2470			pci_free_consistent(phba->pcidev, num_eq_pages
2471					    * PAGE_SIZE,
2472					    mem->va, mem->dma);
2473	}
2474	return ret;
2475}
2476
2477static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2478			     struct hwi_context_memory *phwi_context)
2479{
2480	unsigned int i, num_cq_pages;
2481	int ret;
2482	struct be_queue_info *cq, *eq;
2483	struct be_dma_mem *mem;
2484	struct be_eq_obj *pbe_eq;
2485	void *cq_vaddress;
2486	dma_addr_t paddr;
2487
2488	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2489				      sizeof(struct sol_cqe));
2490
2491	for (i = 0; i < phba->num_cpus; i++) {
2492		cq = &phwi_context->be_cq[i];
2493		eq = &phwi_context->be_eq[i].q;
2494		pbe_eq = &phwi_context->be_eq[i];
2495		pbe_eq->cq = cq;
2496		pbe_eq->phba = phba;
2497		mem = &cq->dma_mem;
2498		cq_vaddress = pci_alloc_consistent(phba->pcidev,
2499						     num_cq_pages * PAGE_SIZE,
2500						     &paddr);
2501		if (!cq_vaddress)
2502			goto create_cq_error;
2503		ret = be_fill_queue(cq, phba->params.num_cq_entries,
2504				    sizeof(struct sol_cqe), cq_vaddress);
2505		if (ret) {
2506			shost_printk(KERN_ERR, phba->shost,
2507				     "be_fill_queue Failed for ISCSI CQ \n");
2508			goto create_cq_error;
2509		}
2510
2511		mem->dma = paddr;
2512		ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2513					    false, 0);
2514		if (ret) {
2515			shost_printk(KERN_ERR, phba->shost,
2516				     "beiscsi_cmd_eq_create"
2517				     "Failed for ISCSI CQ \n");
2518			goto create_cq_error;
2519		}
2520		SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2521						 cq->id, eq->id);
2522		SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2523	}
2524	return 0;
2525
2526create_cq_error:
2527	for (i = 0; i < phba->num_cpus; i++) {
2528		cq = &phwi_context->be_cq[i];
2529		mem = &cq->dma_mem;
2530		if (mem->va)
2531			pci_free_consistent(phba->pcidev, num_cq_pages
2532					    * PAGE_SIZE,
2533					    mem->va, mem->dma);
2534	}
2535	return ret;
2536
2537}
2538
2539static int
2540beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2541		       struct hwi_context_memory *phwi_context,
2542		       struct hwi_controller *phwi_ctrlr,
2543		       unsigned int def_pdu_ring_sz)
2544{
2545	unsigned int idx;
2546	int ret;
2547	struct be_queue_info *dq, *cq;
2548	struct be_dma_mem *mem;
2549	struct be_mem_descriptor *mem_descr;
2550	void *dq_vaddress;
2551
2552	idx = 0;
2553	dq = &phwi_context->be_def_hdrq;
2554	cq = &phwi_context->be_cq[0];
2555	mem = &dq->dma_mem;
2556	mem_descr = phba->init_mem;
2557	mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2558	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2559	ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2560			    sizeof(struct phys_addr),
2561			    sizeof(struct phys_addr), dq_vaddress);
2562	if (ret) {
2563		shost_printk(KERN_ERR, phba->shost,
2564			     "be_fill_queue Failed for DEF PDU HDR\n");
2565		return ret;
2566	}
2567	mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2568	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2569					      def_pdu_ring_sz,
2570					      phba->params.defpdu_hdr_sz);
2571	if (ret) {
2572		shost_printk(KERN_ERR, phba->shost,
2573			     "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2574		return ret;
2575	}
2576	phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2577	SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2578		 phwi_context->be_def_hdrq.id);
2579	hwi_post_async_buffers(phba, 1);
2580	return 0;
2581}
2582
2583static int
2584beiscsi_create_def_data(struct beiscsi_hba *phba,
2585			struct hwi_context_memory *phwi_context,
2586			struct hwi_controller *phwi_ctrlr,
2587			unsigned int def_pdu_ring_sz)
2588{
2589	unsigned int idx;
2590	int ret;
2591	struct be_queue_info *dataq, *cq;
2592	struct be_dma_mem *mem;
2593	struct be_mem_descriptor *mem_descr;
2594	void *dq_vaddress;
2595
2596	idx = 0;
2597	dataq = &phwi_context->be_def_dataq;
2598	cq = &phwi_context->be_cq[0];
2599	mem = &dataq->dma_mem;
2600	mem_descr = phba->init_mem;
2601	mem_descr += HWI_MEM_ASYNC_DATA_RING;
2602	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2603	ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2604			    sizeof(struct phys_addr),
2605			    sizeof(struct phys_addr), dq_vaddress);
2606	if (ret) {
2607		shost_printk(KERN_ERR, phba->shost,
2608			     "be_fill_queue Failed for DEF PDU DATA\n");
2609		return ret;
2610	}
2611	mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2612	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2613					      def_pdu_ring_sz,
2614					      phba->params.defpdu_data_sz);
2615	if (ret) {
2616		shost_printk(KERN_ERR, phba->shost,
2617			     "be_cmd_create_default_pdu_queue Failed"
2618			     " for DEF PDU DATA\n");
2619		return ret;
2620	}
2621	phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2622	SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2623		 phwi_context->be_def_dataq.id);
2624	hwi_post_async_buffers(phba, 0);
2625	SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
2626	return 0;
2627}
2628
2629static int
2630beiscsi_post_pages(struct beiscsi_hba *phba)
2631{
2632	struct be_mem_descriptor *mem_descr;
2633	struct mem_array *pm_arr;
2634	unsigned int page_offset, i;
2635	struct be_dma_mem sgl;
2636	int status;
2637
2638	mem_descr = phba->init_mem;
2639	mem_descr += HWI_MEM_SGE;
2640	pm_arr = mem_descr->mem_array;
2641
2642	page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2643			phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2644	for (i = 0; i < mem_descr->num_elements; i++) {
2645		hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2646		status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2647						page_offset,
2648						(pm_arr->size / PAGE_SIZE));
2649		page_offset += pm_arr->size / PAGE_SIZE;
2650		if (status != 0) {
2651			shost_printk(KERN_ERR, phba->shost,
2652				     "post sgl failed.\n");
2653			return status;
2654		}
2655		pm_arr++;
2656	}
2657	SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
2658	return 0;
2659}
2660
2661static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2662{
2663	struct be_dma_mem *mem = &q->dma_mem;
2664	if (mem->va)
2665		pci_free_consistent(phba->pcidev, mem->size,
2666			mem->va, mem->dma);
2667}
2668
2669static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2670		u16 len, u16 entry_size)
2671{
2672	struct be_dma_mem *mem = &q->dma_mem;
2673
2674	memset(q, 0, sizeof(*q));
2675	q->len = len;
2676	q->entry_size = entry_size;
2677	mem->size = len * entry_size;
2678	mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2679	if (!mem->va)
2680		return -1;
2681	memset(mem->va, 0, mem->size);
2682	return 0;
2683}
2684
2685static int
2686beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2687			 struct hwi_context_memory *phwi_context,
2688			 struct hwi_controller *phwi_ctrlr)
2689{
2690	unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2691	u64 pa_addr_lo;
2692	unsigned int idx, num, i;
2693	struct mem_array *pwrb_arr;
2694	void *wrb_vaddr;
2695	struct be_dma_mem sgl;
2696	struct be_mem_descriptor *mem_descr;
2697	int status;
2698
2699	idx = 0;
2700	mem_descr = phba->init_mem;
2701	mem_descr += HWI_MEM_WRB;
2702	pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2703			   GFP_KERNEL);
2704	if (!pwrb_arr) {
2705		shost_printk(KERN_ERR, phba->shost,
2706			     "Memory alloc failed in create wrb ring.\n");
2707		return -ENOMEM;
2708	}
2709	wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2710	pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2711	num_wrb_rings = mem_descr->mem_array[idx].size /
2712		(phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2713
2714	for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2715		if (num_wrb_rings) {
2716			pwrb_arr[num].virtual_address = wrb_vaddr;
2717			pwrb_arr[num].bus_address.u.a64.address	= pa_addr_lo;
2718			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2719					    sizeof(struct iscsi_wrb);
2720			wrb_vaddr += pwrb_arr[num].size;
2721			pa_addr_lo += pwrb_arr[num].size;
2722			num_wrb_rings--;
2723		} else {
2724			idx++;
2725			wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2726			pa_addr_lo = mem_descr->mem_array[idx].\
2727					bus_address.u.a64.address;
2728			num_wrb_rings = mem_descr->mem_array[idx].size /
2729					(phba->params.wrbs_per_cxn *
2730					sizeof(struct iscsi_wrb));
2731			pwrb_arr[num].virtual_address = wrb_vaddr;
2732			pwrb_arr[num].bus_address.u.a64.address\
2733						= pa_addr_lo;
2734			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2735						 sizeof(struct iscsi_wrb);
2736			wrb_vaddr += pwrb_arr[num].size;
2737			pa_addr_lo   += pwrb_arr[num].size;
2738			num_wrb_rings--;
2739		}
2740	}
2741	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2742		wrb_mem_index = 0;
2743		offset = 0;
2744		size = 0;
2745
2746		hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2747		status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2748					    &phwi_context->be_wrbq[i]);
2749		if (status != 0) {
2750			shost_printk(KERN_ERR, phba->shost,
2751				     "wrbq create failed.");
2752			return status;
2753		}
2754		phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2755								   id;
2756	}
2757	kfree(pwrb_arr);
2758	return 0;
2759}
2760
2761static void free_wrb_handles(struct beiscsi_hba *phba)
2762{
2763	unsigned int index;
2764	struct hwi_controller *phwi_ctrlr;
2765	struct hwi_wrb_context *pwrb_context;
2766
2767	phwi_ctrlr = phba->phwi_ctrlr;
2768	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2769		pwrb_context = &phwi_ctrlr->wrb_context[index];
2770		kfree(pwrb_context->pwrb_handle_base);
2771		kfree(pwrb_context->pwrb_handle_basestd);
2772	}
2773}
2774
2775static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2776{
2777	struct be_queue_info *q;
2778	struct be_ctrl_info *ctrl = &phba->ctrl;
2779
2780	q = &phba->ctrl.mcc_obj.q;
2781	if (q->created)
2782		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2783	be_queue_free(phba, q);
2784
2785	q = &phba->ctrl.mcc_obj.cq;
2786	if (q->created)
2787		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2788	be_queue_free(phba, q);
2789}
2790
2791static void hwi_cleanup(struct beiscsi_hba *phba)
2792{
2793	struct be_queue_info *q;
2794	struct be_ctrl_info *ctrl = &phba->ctrl;
2795	struct hwi_controller *phwi_ctrlr;
2796	struct hwi_context_memory *phwi_context;
2797	int i, eq_num;
2798
2799	phwi_ctrlr = phba->phwi_ctrlr;
2800	phwi_context = phwi_ctrlr->phwi_ctxt;
2801	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2802		q = &phwi_context->be_wrbq[i];
2803		if (q->created)
2804			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2805	}
2806	free_wrb_handles(phba);
2807
2808	q = &phwi_context->be_def_hdrq;
2809	if (q->created)
2810		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2811
2812	q = &phwi_context->be_def_dataq;
2813	if (q->created)
2814		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2815
2816	beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2817
2818	for (i = 0; i < (phba->num_cpus); i++) {
2819		q = &phwi_context->be_cq[i];
2820		if (q->created)
2821			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2822	}
2823	if (phba->msix_enabled)
2824		eq_num = 1;
2825	else
2826		eq_num = 0;
2827	for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2828		q = &phwi_context->be_eq[i].q;
2829		if (q->created)
2830			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2831	}
2832	be_mcc_queues_destroy(phba);
2833}
2834
2835static int be_mcc_queues_create(struct beiscsi_hba *phba,
2836				struct hwi_context_memory *phwi_context)
2837{
2838	struct be_queue_info *q, *cq;
2839	struct be_ctrl_info *ctrl = &phba->ctrl;
2840
2841	/* Alloc MCC compl queue */
2842	cq = &phba->ctrl.mcc_obj.cq;
2843	if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2844			sizeof(struct be_mcc_compl)))
2845		goto err;
2846	/* Ask BE to create MCC compl queue; */
2847	if (phba->msix_enabled) {
2848		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2849					 [phba->num_cpus].q, false, true, 0))
2850		goto mcc_cq_free;
2851	} else {
2852		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2853					  false, true, 0))
2854		goto mcc_cq_free;
2855	}
2856
2857	/* Alloc MCC queue */
2858	q = &phba->ctrl.mcc_obj.q;
2859	if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2860		goto mcc_cq_destroy;
2861
2862	/* Ask BE to create MCC queue */
2863	if (beiscsi_cmd_mccq_create(phba, q, cq))
2864		goto mcc_q_free;
2865
2866	return 0;
2867
2868mcc_q_free:
2869	be_queue_free(phba, q);
2870mcc_cq_destroy:
2871	beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2872mcc_cq_free:
2873	be_queue_free(phba, cq);
2874err:
2875	return -1;
2876}
2877
2878static int find_num_cpus(void)
2879{
2880	int  num_cpus = 0;
2881
2882	num_cpus = num_online_cpus();
2883	if (num_cpus >= MAX_CPUS)
2884		num_cpus = MAX_CPUS - 1;
2885
2886	SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
2887	return num_cpus;
2888}
2889
2890static int hwi_init_port(struct beiscsi_hba *phba)
2891{
2892	struct hwi_controller *phwi_ctrlr;
2893	struct hwi_context_memory *phwi_context;
2894	unsigned int def_pdu_ring_sz;
2895	struct be_ctrl_info *ctrl = &phba->ctrl;
2896	int status;
2897
2898	def_pdu_ring_sz =
2899		phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2900	phwi_ctrlr = phba->phwi_ctrlr;
2901	phwi_context = phwi_ctrlr->phwi_ctxt;
2902	phwi_context->max_eqd = 0;
2903	phwi_context->min_eqd = 0;
2904	phwi_context->cur_eqd = 64;
2905	be_cmd_fw_initialize(&phba->ctrl);
2906
2907	status = beiscsi_create_eqs(phba, phwi_context);
2908	if (status != 0) {
2909		shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2910		goto error;
2911	}
2912
2913	status = be_mcc_queues_create(phba, phwi_context);
2914	if (status != 0)
2915		goto error;
2916
2917	status = mgmt_check_supported_fw(ctrl, phba);
2918	if (status != 0) {
2919		shost_printk(KERN_ERR, phba->shost,
2920			     "Unsupported fw version \n");
2921		goto error;
2922	}
2923
2924	status = beiscsi_create_cqs(phba, phwi_context);
2925	if (status != 0) {
2926		shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2927		goto error;
2928	}
2929
2930	status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2931					def_pdu_ring_sz);
2932	if (status != 0) {
2933		shost_printk(KERN_ERR, phba->shost,
2934			     "Default Header not created\n");
2935		goto error;
2936	}
2937
2938	status = beiscsi_create_def_data(phba, phwi_context,
2939					 phwi_ctrlr, def_pdu_ring_sz);
2940	if (status != 0) {
2941		shost_printk(KERN_ERR, phba->shost,
2942			     "Default Data not created\n");
2943		goto error;
2944	}
2945
2946	status = beiscsi_post_pages(phba);
2947	if (status != 0) {
2948		shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2949		goto error;
2950	}
2951
2952	status = beiscsi_create_wrb_rings(phba,	phwi_context, phwi_ctrlr);
2953	if (status != 0) {
2954		shost_printk(KERN_ERR, phba->shost,
2955			     "WRB Rings not created\n");
2956		goto error;
2957	}
2958
2959	SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2960	return 0;
2961
2962error:
2963	shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2964	hwi_cleanup(phba);
2965	return -ENOMEM;
2966}
2967
2968static int hwi_init_controller(struct beiscsi_hba *phba)
2969{
2970	struct hwi_controller *phwi_ctrlr;
2971
2972	phwi_ctrlr = phba->phwi_ctrlr;
2973	if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2974		phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2975		    init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2976		SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
2977			 phwi_ctrlr->phwi_ctxt);
2978	} else {
2979		shost_printk(KERN_ERR, phba->shost,
2980			     "HWI_MEM_ADDN_CONTEXT is more than one element."
2981			     "Failing to load\n");
2982		return -ENOMEM;
2983	}
2984
2985	iscsi_init_global_templates(phba);
2986	beiscsi_init_wrb_handle(phba);
2987	hwi_init_async_pdu_ctx(phba);
2988	if (hwi_init_port(phba) != 0) {
2989		shost_printk(KERN_ERR, phba->shost,
2990			     "hwi_init_controller failed\n");
2991		return -ENOMEM;
2992	}
2993	return 0;
2994}
2995
2996static void beiscsi_free_mem(struct beiscsi_hba *phba)
2997{
2998	struct be_mem_descriptor *mem_descr;
2999	int i, j;
3000
3001	mem_descr = phba->init_mem;
3002	i = 0;
3003	j = 0;
3004	for (i = 0; i < SE_MEM_MAX; i++) {
3005		for (j = mem_descr->num_elements; j > 0; j--) {
3006			pci_free_consistent(phba->pcidev,
3007			  mem_descr->mem_array[j - 1].size,
3008			  mem_descr->mem_array[j - 1].virtual_address,
3009			  mem_descr->mem_array[j - 1].bus_address.
3010				u.a64.address);
3011		}
3012		kfree(mem_descr->mem_array);
3013		mem_descr++;
3014	}
3015	kfree(phba->init_mem);
3016	kfree(phba->phwi_ctrlr);
3017}
3018
3019static int beiscsi_init_controller(struct beiscsi_hba *phba)
3020{
3021	int ret = -ENOMEM;
3022
3023	ret = beiscsi_get_memory(phba);
3024	if (ret < 0) {
3025		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
3026			     "Failed in beiscsi_alloc_memory \n");
3027		return ret;
3028	}
3029
3030	ret = hwi_init_controller(phba);
3031	if (ret)
3032		goto free_init;
3033	SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
3034	return 0;
3035
3036free_init:
3037	beiscsi_free_mem(phba);
3038	return -ENOMEM;
3039}
3040
3041static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3042{
3043	struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3044	struct sgl_handle *psgl_handle;
3045	struct iscsi_sge *pfrag;
3046	unsigned int arr_index, i, idx;
3047
3048	phba->io_sgl_hndl_avbl = 0;
3049	phba->eh_sgl_hndl_avbl = 0;
3050
3051	mem_descr_sglh = phba->init_mem;
3052	mem_descr_sglh += HWI_MEM_SGLH;
3053	if (1 == mem_descr_sglh->num_elements) {
3054		phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3055						 phba->params.ios_per_ctrl,
3056						 GFP_KERNEL);
3057		if (!phba->io_sgl_hndl_base) {
3058			shost_printk(KERN_ERR, phba->shost,
3059				     "Mem Alloc Failed. Failing to load\n");
3060			return -ENOMEM;
3061		}
3062		phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3063						 (phba->params.icds_per_ctrl -
3064						 phba->params.ios_per_ctrl),
3065						 GFP_KERNEL);
3066		if (!phba->eh_sgl_hndl_base) {
3067			kfree(phba->io_sgl_hndl_base);
3068			shost_printk(KERN_ERR, phba->shost,
3069				     "Mem Alloc Failed. Failing to load\n");
3070			return -ENOMEM;
3071		}
3072	} else {
3073		shost_printk(KERN_ERR, phba->shost,
3074			     "HWI_MEM_SGLH is more than one element."
3075			     "Failing to load\n");
3076		return -ENOMEM;
3077	}
3078
3079	arr_index = 0;
3080	idx = 0;
3081	while (idx < mem_descr_sglh->num_elements) {
3082		psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3083
3084		for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3085		      sizeof(struct sgl_handle)); i++) {
3086			if (arr_index < phba->params.ios_per_ctrl) {
3087				phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3088				phba->io_sgl_hndl_avbl++;
3089				arr_index++;
3090			} else {
3091				phba->eh_sgl_hndl_base[arr_index -
3092					phba->params.ios_per_ctrl] =
3093								psgl_handle;
3094				arr_index++;
3095				phba->eh_sgl_hndl_avbl++;
3096			}
3097			psgl_handle++;
3098		}
3099		idx++;
3100	}
3101	SE_DEBUG(DBG_LVL_8,
3102		 "phba->io_sgl_hndl_avbl=%d"
3103		 "phba->eh_sgl_hndl_avbl=%d \n",
3104		 phba->io_sgl_hndl_avbl,
3105		 phba->eh_sgl_hndl_avbl);
3106	mem_descr_sg = phba->init_mem;
3107	mem_descr_sg += HWI_MEM_SGE;
3108	SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
3109		 mem_descr_sg->num_elements);
3110	arr_index = 0;
3111	idx = 0;
3112	while (idx < mem_descr_sg->num_elements) {
3113		pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3114
3115		for (i = 0;
3116		     i < (mem_descr_sg->mem_array[idx].size) /
3117		     (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3118		     i++) {
3119			if (arr_index < phba->params.ios_per_ctrl)
3120				psgl_handle = phba->io_sgl_hndl_base[arr_index];
3121			else
3122				psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3123						phba->params.ios_per_ctrl];
3124			psgl_handle->pfrag = pfrag;
3125			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3126			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3127			pfrag += phba->params.num_sge_per_io;
3128			psgl_handle->sgl_index =
3129				phba->fw_config.iscsi_icd_start + arr_index++;
3130		}
3131		idx++;
3132	}
3133	phba->io_sgl_free_index = 0;
3134	phba->io_sgl_alloc_index = 0;
3135	phba->eh_sgl_free_index = 0;
3136	phba->eh_sgl_alloc_index = 0;
3137	return 0;
3138}
3139
3140static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3141{
3142	int i, new_cid;
3143
3144	phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3145				  GFP_KERNEL);
3146	if (!phba->cid_array) {
3147		shost_printk(KERN_ERR, phba->shost,
3148			     "Failed to allocate memory in "
3149			     "hba_setup_cid_tbls\n");
3150		return -ENOMEM;
3151	}
3152	phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3153				 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3154	if (!phba->ep_array) {
3155		shost_printk(KERN_ERR, phba->shost,
3156			     "Failed to allocate memory in "
3157			     "hba_setup_cid_tbls \n");
3158		kfree(phba->cid_array);
3159		return -ENOMEM;
3160	}
3161	new_cid = phba->fw_config.iscsi_cid_start;
3162	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3163		phba->cid_array[i] = new_cid;
3164		new_cid += 2;
3165	}
3166	phba->avlbl_cids = phba->params.cxns_per_ctrl;
3167	return 0;
3168}
3169
3170static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
3171{
3172	struct be_ctrl_info *ctrl = &phba->ctrl;
3173	struct hwi_controller *phwi_ctrlr;
3174	struct hwi_context_memory *phwi_context;
3175	struct be_queue_info *eq;
3176	u8 __iomem *addr;
3177	u32 reg, i;
3178	u32 enabled;
3179
3180	phwi_ctrlr = phba->phwi_ctrlr;
3181	phwi_context = phwi_ctrlr->phwi_ctxt;
3182
3183	addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3184			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3185	reg = ioread32(addr);
3186	SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
3187
3188	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3189	if (!enabled) {
3190		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3191		SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
3192		iowrite32(reg, addr);
3193		if (!phba->msix_enabled) {
3194			eq = &phwi_context->be_eq[0].q;
3195			SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3196			hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3197		} else {
3198			for (i = 0; i <= phba->num_cpus; i++) {
3199				eq = &phwi_context->be_eq[i].q;
3200				SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3201				hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3202			}
3203		}
3204	}
3205	return true;
3206}
3207
3208static void hwi_disable_intr(struct beiscsi_hba *phba)
3209{
3210	struct be_ctrl_info *ctrl = &phba->ctrl;
3211
3212	u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3213	u32 reg = ioread32(addr);
3214
3215	u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3216	if (enabled) {
3217		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3218		iowrite32(reg, addr);
3219	} else
3220		shost_printk(KERN_WARNING, phba->shost,
3221			     "In hwi_disable_intr, Already Disabled \n");
3222}
3223
3224static int beiscsi_init_port(struct beiscsi_hba *phba)
3225{
3226	int ret;
3227
3228	ret = beiscsi_init_controller(phba);
3229	if (ret < 0) {
3230		shost_printk(KERN_ERR, phba->shost,
3231			     "beiscsi_dev_probe - Failed in"
3232			     "beiscsi_init_controller \n");
3233		return ret;
3234	}
3235	ret = beiscsi_init_sgl_handle(phba);
3236	if (ret < 0) {
3237		shost_printk(KERN_ERR, phba->shost,
3238			     "beiscsi_dev_probe - Failed in"
3239			     "beiscsi_init_sgl_handle \n");
3240		goto do_cleanup_ctrlr;
3241	}
3242
3243	if (hba_setup_cid_tbls(phba)) {
3244		shost_printk(KERN_ERR, phba->shost,
3245			     "Failed in hba_setup_cid_tbls\n");
3246		kfree(phba->io_sgl_hndl_base);
3247		kfree(phba->eh_sgl_hndl_base);
3248		goto do_cleanup_ctrlr;
3249	}
3250
3251	return ret;
3252
3253do_cleanup_ctrlr:
3254	hwi_cleanup(phba);
3255	return ret;
3256}
3257
3258static void hwi_purge_eq(struct beiscsi_hba *phba)
3259{
3260	struct hwi_controller *phwi_ctrlr;
3261	struct hwi_context_memory *phwi_context;
3262	struct be_queue_info *eq;
3263	struct be_eq_entry *eqe = NULL;
3264	int i, eq_msix;
3265	unsigned int num_processed;
3266
3267	phwi_ctrlr = phba->phwi_ctrlr;
3268	phwi_context = phwi_ctrlr->phwi_ctxt;
3269	if (phba->msix_enabled)
3270		eq_msix = 1;
3271	else
3272		eq_msix = 0;
3273
3274	for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3275		eq = &phwi_context->be_eq[i].q;
3276		eqe = queue_tail_node(eq);
3277		num_processed = 0;
3278		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3279					& EQE_VALID_MASK) {
3280			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3281			queue_tail_inc(eq);
3282			eqe = queue_tail_node(eq);
3283			num_processed++;
3284		}
3285
3286		if (num_processed)
3287			hwi_ring_eq_db(phba, eq->id, 1,	num_processed, 1, 1);
3288	}
3289}
3290
3291static void beiscsi_clean_port(struct beiscsi_hba *phba)
3292{
3293	unsigned char mgmt_status;
3294
3295	mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3296	if (mgmt_status)
3297		shost_printk(KERN_WARNING, phba->shost,
3298			     "mgmt_epfw_cleanup FAILED \n");
3299
3300	hwi_purge_eq(phba);
3301	hwi_cleanup(phba);
3302	kfree(phba->io_sgl_hndl_base);
3303	kfree(phba->eh_sgl_hndl_base);
3304	kfree(phba->cid_array);
3305	kfree(phba->ep_array);
3306}
3307
3308void
3309beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3310			   struct beiscsi_offload_params *params)
3311{
3312	struct wrb_handle *pwrb_handle;
3313	struct iscsi_target_context_update_wrb *pwrb = NULL;
3314	struct be_mem_descriptor *mem_descr;
3315	struct beiscsi_hba *phba = beiscsi_conn->phba;
3316	u32 doorbell = 0;
3317
3318	/*
3319	 * We can always use 0 here because it is reserved by libiscsi for
3320	 * login/startup related tasks.
3321	 */
3322	pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3323				       phba->fw_config.iscsi_cid_start));
3324	pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3325	memset(pwrb, 0, sizeof(*pwrb));
3326	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3327		      max_burst_length, pwrb, params->dw[offsetof
3328		      (struct amap_beiscsi_offload_params,
3329		      max_burst_length) / 32]);
3330	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3331		      max_send_data_segment_length, pwrb,
3332		      params->dw[offsetof(struct amap_beiscsi_offload_params,
3333		      max_send_data_segment_length) / 32]);
3334	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3335		      first_burst_length,
3336		      pwrb,
3337		      params->dw[offsetof(struct amap_beiscsi_offload_params,
3338		      first_burst_length) / 32]);
3339
3340	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3341		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3342		      erl) / 32] & OFFLD_PARAMS_ERL));
3343	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3344		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3345		      dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3346	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3347		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3348		      hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3349	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3350		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3351		      ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3352	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3353		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3354		       imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3355	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3356		      pwrb,
3357		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3358		      exp_statsn) / 32] + 1));
3359	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3360		      0x7);
3361	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3362		      pwrb, pwrb_handle->wrb_index);
3363	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3364		      pwrb, pwrb_handle->nxt_wrb_index);
3365	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3366			session_state, pwrb, 0);
3367	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3368		      pwrb, 1);
3369	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3370		      pwrb, 0);
3371	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3372		      0);
3373
3374	mem_descr = phba->init_mem;
3375	mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3376
3377	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3378			pad_buffer_addr_hi, pwrb,
3379		      mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3380	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3381			pad_buffer_addr_lo, pwrb,
3382		      mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3383
3384	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3385
3386	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3387	doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3388			     << DB_DEF_PDU_WRB_INDEX_SHIFT;
3389	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3390
3391	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3392}
3393
3394static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3395			      int *index, int *age)
3396{
3397	*index = (int)itt;
3398	if (age)
3399		*age = conn->session->age;
3400}
3401
3402/**
3403 * beiscsi_alloc_pdu - allocates pdu and related resources
3404 * @task: libiscsi task
3405 * @opcode: opcode of pdu for task
3406 *
3407 * This is called with the session lock held. It will allocate
3408 * the wrb and sgl if needed for the command. And it will prep
3409 * the pdu's itt. beiscsi_parse_pdu will later translate
3410 * the pdu itt to the libiscsi task itt.
3411 */
3412static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3413{
3414	struct beiscsi_io_task *io_task = task->dd_data;
3415	struct iscsi_conn *conn = task->conn;
3416	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3417	struct beiscsi_hba *phba = beiscsi_conn->phba;
3418	struct hwi_wrb_context *pwrb_context;
3419	struct hwi_controller *phwi_ctrlr;
3420	itt_t itt;
3421	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3422	dma_addr_t paddr;
3423
3424	io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3425					  GFP_KERNEL, &paddr);
3426	if (!io_task->cmd_bhs)
3427		return -ENOMEM;
3428	io_task->bhs_pa.u.a64.address = paddr;
3429	io_task->libiscsi_itt = (itt_t)task->itt;
3430	io_task->pwrb_handle = alloc_wrb_handle(phba,
3431						beiscsi_conn->beiscsi_conn_cid -
3432						phba->fw_config.iscsi_cid_start
3433						);
3434	io_task->conn = beiscsi_conn;
3435
3436	task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3437	task->hdr_max = sizeof(struct be_cmd_bhs);
3438
3439	if (task->sc) {
3440		spin_lock(&phba->io_sgl_lock);
3441		io_task->psgl_handle = alloc_io_sgl_handle(phba);
3442		spin_unlock(&phba->io_sgl_lock);
3443		if (!io_task->psgl_handle)
3444			goto free_hndls;
3445	} else {
3446		io_task->scsi_cmnd = NULL;
3447		if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3448			if (!beiscsi_conn->login_in_progress) {
3449				spin_lock(&phba->mgmt_sgl_lock);
3450				io_task->psgl_handle = (struct sgl_handle *)
3451						alloc_mgmt_sgl_handle(phba);
3452				spin_unlock(&phba->mgmt_sgl_lock);
3453				if (!io_task->psgl_handle)
3454					goto free_hndls;
3455
3456				beiscsi_conn->login_in_progress = 1;
3457				beiscsi_conn->plogin_sgl_handle =
3458							io_task->psgl_handle;
3459			} else {
3460				io_task->psgl_handle =
3461						beiscsi_conn->plogin_sgl_handle;
3462			}
3463		} else {
3464			spin_lock(&phba->mgmt_sgl_lock);
3465			io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3466			spin_unlock(&phba->mgmt_sgl_lock);
3467			if (!io_task->psgl_handle)
3468				goto free_hndls;
3469		}
3470	}
3471	itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3472				 wrb_index << 16) | (unsigned int)
3473				(io_task->psgl_handle->sgl_index));
3474	io_task->pwrb_handle->pio_handle = task;
3475
3476	io_task->cmd_bhs->iscsi_hdr.itt = itt;
3477	return 0;
3478
3479free_hndls:
3480	phwi_ctrlr = phba->phwi_ctrlr;
3481	pwrb_context = &phwi_ctrlr->wrb_context[
3482			beiscsi_conn->beiscsi_conn_cid -
3483			phba->fw_config.iscsi_cid_start];
3484	free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3485	io_task->pwrb_handle = NULL;
3486	pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3487		      io_task->bhs_pa.u.a64.address);
3488	SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
3489	return -ENOMEM;
3490}
3491
3492static void beiscsi_cleanup_task(struct iscsi_task *task)
3493{
3494	struct beiscsi_io_task *io_task = task->dd_data;
3495	struct iscsi_conn *conn = task->conn;
3496	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3497	struct beiscsi_hba *phba = beiscsi_conn->phba;
3498	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3499	struct hwi_wrb_context *pwrb_context;
3500	struct hwi_controller *phwi_ctrlr;
3501
3502	phwi_ctrlr = phba->phwi_ctrlr;
3503	pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3504			- phba->fw_config.iscsi_cid_start];
3505	if (io_task->pwrb_handle) {
3506		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3507		io_task->pwrb_handle = NULL;
3508	}
3509
3510	if (io_task->cmd_bhs) {
3511		pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3512			      io_task->bhs_pa.u.a64.address);
3513	}
3514
3515	if (task->sc) {
3516		if (io_task->psgl_handle) {
3517			spin_lock(&phba->io_sgl_lock);
3518			free_io_sgl_handle(phba, io_task->psgl_handle);
3519			spin_unlock(&phba->io_sgl_lock);
3520			io_task->psgl_handle = NULL;
3521		}
3522	} else {
3523		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3524			return;
3525		if (io_task->psgl_handle) {
3526			spin_lock(&phba->mgmt_sgl_lock);
3527			free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3528			spin_unlock(&phba->mgmt_sgl_lock);
3529			io_task->psgl_handle = NULL;
3530		}
3531	}
3532}
3533
3534static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3535			  unsigned int num_sg, unsigned int xferlen,
3536			  unsigned int writedir)
3537{
3538
3539	struct beiscsi_io_task *io_task = task->dd_data;
3540	struct iscsi_conn *conn = task->conn;
3541	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3542	struct beiscsi_hba *phba = beiscsi_conn->phba;
3543	struct iscsi_wrb *pwrb = NULL;
3544	unsigned int doorbell = 0;
3545
3546	pwrb = io_task->pwrb_handle->pwrb;
3547	io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3548	io_task->bhs_len = sizeof(struct be_cmd_bhs);
3549
3550	if (writedir) {
3551		memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3552		AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3553			      &io_task->cmd_bhs->iscsi_data_pdu,
3554			      (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3555		AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3556			      &io_task->cmd_bhs->iscsi_data_pdu,
3557			      ISCSI_OPCODE_SCSI_DATA_OUT);
3558		AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3559			      &io_task->cmd_bhs->iscsi_data_pdu, 1);
3560		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3561			      INI_WR_CMD);
3562		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3563	} else {
3564		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3565			      INI_RD_CMD);
3566		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3567	}
3568	memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3569	       dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3570	       io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3571
3572	AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3573		      cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3574				  lun[0]));
3575	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3576	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3577		      io_task->pwrb_handle->wrb_index);
3578	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3579		      be32_to_cpu(task->cmdsn));
3580	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3581		      io_task->psgl_handle->sgl_index);
3582
3583	hwi_write_sgl(pwrb, sg, num_sg, io_task);
3584
3585	AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3586		      io_task->pwrb_handle->nxt_wrb_index);
3587	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3588
3589	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3590	doorbell |= (io_task->pwrb_handle->wrb_index &
3591		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3592	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3593
3594	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3595	return 0;
3596}
3597
3598static int beiscsi_mtask(struct iscsi_task *task)
3599{
3600	struct beiscsi_io_task *io_task = task->dd_data;
3601	struct iscsi_conn *conn = task->conn;
3602	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3603	struct beiscsi_hba *phba = beiscsi_conn->phba;
3604	struct iscsi_wrb *pwrb = NULL;
3605	unsigned int doorbell = 0;
3606	unsigned int cid;
3607
3608	cid = beiscsi_conn->beiscsi_conn_cid;
3609	pwrb = io_task->pwrb_handle->pwrb;
3610	memset(pwrb, 0, sizeof(*pwrb));
3611	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3612		      be32_to_cpu(task->cmdsn));
3613	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3614		      io_task->pwrb_handle->wrb_index);
3615	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3616		      io_task->psgl_handle->sgl_index);
3617
3618	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3619	case ISCSI_OP_LOGIN:
3620		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3621			      TGT_DM_CMD);
3622		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3623		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3624		hwi_write_buffer(pwrb, task);
3625		break;
3626	case ISCSI_OP_NOOP_OUT:
3627		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3628			      INI_RD_CMD);
3629		if (task->hdr->ttt == ISCSI_RESERVED_TAG)
3630			AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3631		else
3632			AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
3633		hwi_write_buffer(pwrb, task);
3634		break;
3635	case ISCSI_OP_TEXT:
3636		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3637			      TGT_DM_CMD);
3638		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3639		hwi_write_buffer(pwrb, task);
3640		break;
3641	case ISCSI_OP_SCSI_TMFUNC:
3642		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3643			      INI_TMF_CMD);
3644		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3645		hwi_write_buffer(pwrb, task);
3646		break;
3647	case ISCSI_OP_LOGOUT:
3648		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3649		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3650			      HWH_TYPE_LOGOUT);
3651		hwi_write_buffer(pwrb, task);
3652		break;
3653
3654	default:
3655		SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
3656			 task->hdr->opcode & ISCSI_OPCODE_MASK);
3657		return -EINVAL;
3658	}
3659
3660	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3661		      task->data_count);
3662	AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3663		      io_task->pwrb_handle->nxt_wrb_index);
3664	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3665
3666	doorbell |= cid & DB_WRB_POST_CID_MASK;
3667	doorbell |= (io_task->pwrb_handle->wrb_index &
3668		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3669	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3670	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3671	return 0;
3672}
3673
3674static int beiscsi_task_xmit(struct iscsi_task *task)
3675{
3676	struct beiscsi_io_task *io_task = task->dd_data;
3677	struct scsi_cmnd *sc = task->sc;
3678	struct scatterlist *sg;
3679	int num_sg;
3680	unsigned int  writedir = 0, xferlen = 0;
3681
3682	if (!sc)
3683		return beiscsi_mtask(task);
3684
3685	io_task->scsi_cmnd = sc;
3686	num_sg = scsi_dma_map(sc);
3687	if (num_sg < 0) {
3688		SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3689		return num_sg;
3690	}
3691	SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3692		  (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3693	xferlen = scsi_bufflen(sc);
3694	sg = scsi_sglist(sc);
3695	if (sc->sc_data_direction == DMA_TO_DEVICE) {
3696		writedir = 1;
3697		SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
3698			 task->imm_count);
3699	} else
3700		writedir = 0;
3701	return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3702}
3703
3704static void beiscsi_remove(struct pci_dev *pcidev)
3705{
3706	struct beiscsi_hba *phba = NULL;
3707	struct hwi_controller *phwi_ctrlr;
3708	struct hwi_context_memory *phwi_context;
3709	struct be_eq_obj *pbe_eq;
3710	unsigned int i, msix_vec;
3711
3712	phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3713	if (!phba) {
3714		dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
3715		return;
3716	}
3717
3718	phwi_ctrlr = phba->phwi_ctrlr;
3719	phwi_context = phwi_ctrlr->phwi_ctxt;
3720	hwi_disable_intr(phba);
3721	if (phba->msix_enabled) {
3722		for (i = 0; i <= phba->num_cpus; i++) {
3723			msix_vec = phba->msix_entries[i].vector;
3724			free_irq(msix_vec, &phwi_context->be_eq[i]);
3725		}
3726	} else
3727		if (phba->pcidev->irq)
3728			free_irq(phba->pcidev->irq, phba);
3729	pci_disable_msix(phba->pcidev);
3730	destroy_workqueue(phba->wq);
3731	if (blk_iopoll_enabled)
3732		for (i = 0; i < phba->num_cpus; i++) {
3733			pbe_eq = &phwi_context->be_eq[i];
3734			blk_iopoll_disable(&pbe_eq->iopoll);
3735		}
3736
3737	beiscsi_clean_port(phba);
3738	beiscsi_free_mem(phba);
3739	beiscsi_unmap_pci_function(phba);
3740	pci_free_consistent(phba->pcidev,
3741			    phba->ctrl.mbox_mem_alloced.size,
3742			    phba->ctrl.mbox_mem_alloced.va,
3743			    phba->ctrl.mbox_mem_alloced.dma);
3744	iscsi_host_remove(phba->shost);
3745	pci_dev_put(phba->pcidev);
3746	iscsi_host_free(phba->shost);
3747}
3748
3749static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3750{
3751	int i, status;
3752
3753	for (i = 0; i <= phba->num_cpus; i++)
3754		phba->msix_entries[i].entry = i;
3755
3756	status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3757				 (phba->num_cpus + 1));
3758	if (!status)
3759		phba->msix_enabled = true;
3760
3761	return;
3762}
3763
3764static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3765				const struct pci_device_id *id)
3766{
3767	struct beiscsi_hba *phba = NULL;
3768	struct hwi_controller *phwi_ctrlr;
3769	struct hwi_context_memory *phwi_context;
3770	struct be_eq_obj *pbe_eq;
3771	int ret, msix_vec, num_cpus, i;
3772
3773	ret = beiscsi_enable_pci(pcidev);
3774	if (ret < 0) {
3775		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3776			     "Failed to enable pci device \n");
3777		return ret;
3778	}
3779
3780	phba = beiscsi_hba_alloc(pcidev);
3781	if (!phba) {
3782		dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3783			" Failed in beiscsi_hba_alloc \n");
3784		goto disable_pci;
3785	}
3786
3787	switch (pcidev->device) {
3788	case BE_DEVICE_ID1:
3789	case OC_DEVICE_ID1:
3790	case OC_DEVICE_ID2:
3791		phba->generation = BE_GEN2;
3792		break;
3793	case BE_DEVICE_ID2:
3794	case OC_DEVICE_ID3:
3795		phba->generation = BE_GEN3;
3796		break;
3797	default:
3798		phba->generation = 0;
3799	}
3800
3801	if (enable_msix)
3802		num_cpus = find_num_cpus();
3803	else
3804		num_cpus = 1;
3805	phba->num_cpus = num_cpus;
3806	SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
3807
3808	if (enable_msix)
3809		beiscsi_msix_enable(phba);
3810	ret = be_ctrl_init(phba, pcidev);
3811	if (ret) {
3812		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3813				"Failed in be_ctrl_init\n");
3814		goto hba_free;
3815	}
3816
3817	spin_lock_init(&phba->io_sgl_lock);
3818	spin_lock_init(&phba->mgmt_sgl_lock);
3819	spin_lock_init(&phba->isr_lock);
3820	ret = mgmt_get_fw_config(&phba->ctrl, phba);
3821	if (ret != 0) {
3822		shost_printk(KERN_ERR, phba->shost,
3823			     "Error getting fw config\n");
3824		goto free_port;
3825	}
3826	phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3827	beiscsi_get_params(phba);
3828	phba->shost->can_queue = phba->params.ios_per_ctrl;
3829	ret = beiscsi_init_port(phba);
3830	if (ret < 0) {
3831		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3832			     "Failed in beiscsi_init_port\n");
3833		goto free_port;
3834	}
3835
3836	for (i = 0; i < MAX_MCC_CMD ; i++) {
3837		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
3838		phba->ctrl.mcc_tag[i] = i + 1;
3839		phba->ctrl.mcc_numtag[i + 1] = 0;
3840		phba->ctrl.mcc_tag_available++;
3841	}
3842
3843	phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
3844
3845	snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3846		 phba->shost->host_no);
3847	phba->wq = create_workqueue(phba->wq_name);
3848	if (!phba->wq) {
3849		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3850				"Failed to allocate work queue\n");
3851		goto free_twq;
3852	}
3853
3854	INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3855
3856	phwi_ctrlr = phba->phwi_ctrlr;
3857	phwi_context = phwi_ctrlr->phwi_ctxt;
3858	if (blk_iopoll_enabled) {
3859		for (i = 0; i < phba->num_cpus; i++) {
3860			pbe_eq = &phwi_context->be_eq[i];
3861			blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3862					be_iopoll);
3863			blk_iopoll_enable(&pbe_eq->iopoll);
3864		}
3865	}
3866	ret = beiscsi_init_irqs(phba);
3867	if (ret < 0) {
3868		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3869			     "Failed to beiscsi_init_irqs\n");
3870		goto free_blkenbld;
3871	}
3872	ret = hwi_enable_intr(phba);
3873	if (ret < 0) {
3874		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3875			     "Failed to hwi_enable_intr\n");
3876		goto free_ctrlr;
3877	}
3878	SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3879	return 0;
3880
3881free_ctrlr:
3882	if (phba->msix_enabled) {
3883		for (i = 0; i <= phba->num_cpus; i++) {
3884			msix_vec = phba->msix_entries[i].vector;
3885			free_irq(msix_vec, &phwi_context->be_eq[i]);
3886		}
3887	} else
3888		if (phba->pcidev->irq)
3889			free_irq(phba->pcidev->irq, phba);
3890	pci_disable_msix(phba->pcidev);
3891free_blkenbld:
3892	destroy_workqueue(phba->wq);
3893	if (blk_iopoll_enabled)
3894		for (i = 0; i < phba->num_cpus; i++) {
3895			pbe_eq = &phwi_context->be_eq[i];
3896			blk_iopoll_disable(&pbe_eq->iopoll);
3897		}
3898free_twq:
3899	beiscsi_clean_port(phba);
3900	beiscsi_free_mem(phba);
3901free_port:
3902	pci_free_consistent(phba->pcidev,
3903			    phba->ctrl.mbox_mem_alloced.size,
3904			    phba->ctrl.mbox_mem_alloced.va,
3905			   phba->ctrl.mbox_mem_alloced.dma);
3906	beiscsi_unmap_pci_function(phba);
3907hba_free:
3908	iscsi_host_remove(phba->shost);
3909	pci_dev_put(phba->pcidev);
3910	iscsi_host_free(phba->shost);
3911disable_pci:
3912	pci_disable_device(pcidev);
3913	return ret;
3914}
3915
3916struct iscsi_transport beiscsi_iscsi_transport = {
3917	.owner = THIS_MODULE,
3918	.name = DRV_NAME,
3919	.caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
3920		CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3921	.param_mask = ISCSI_MAX_RECV_DLENGTH |
3922		ISCSI_MAX_XMIT_DLENGTH |
3923		ISCSI_HDRDGST_EN |
3924		ISCSI_DATADGST_EN |
3925		ISCSI_INITIAL_R2T_EN |
3926		ISCSI_MAX_R2T |
3927		ISCSI_IMM_DATA_EN |
3928		ISCSI_FIRST_BURST |
3929		ISCSI_MAX_BURST |
3930		ISCSI_PDU_INORDER_EN |
3931		ISCSI_DATASEQ_INORDER_EN |
3932		ISCSI_ERL |
3933		ISCSI_CONN_PORT |
3934		ISCSI_CONN_ADDRESS |
3935		ISCSI_EXP_STATSN |
3936		ISCSI_PERSISTENT_PORT |
3937		ISCSI_PERSISTENT_ADDRESS |
3938		ISCSI_TARGET_NAME | ISCSI_TPGT |
3939		ISCSI_USERNAME | ISCSI_PASSWORD |
3940		ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3941		ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3942		ISCSI_LU_RESET_TMO |
3943		ISCSI_PING_TMO | ISCSI_RECV_TMO |
3944		ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3945	.host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3946				ISCSI_HOST_INITIATOR_NAME,
3947	.create_session = beiscsi_session_create,
3948	.destroy_session = beiscsi_session_destroy,
3949	.create_conn = beiscsi_conn_create,
3950	.bind_conn = beiscsi_conn_bind,
3951	.destroy_conn = iscsi_conn_teardown,
3952	.set_param = beiscsi_set_param,
3953	.get_conn_param = beiscsi_conn_get_param,
3954	.get_session_param = iscsi_session_get_param,
3955	.get_host_param = beiscsi_get_host_param,
3956	.start_conn = beiscsi_conn_start,
3957	.stop_conn = beiscsi_conn_stop,
3958	.send_pdu = iscsi_conn_send_pdu,
3959	.xmit_task = beiscsi_task_xmit,
3960	.cleanup_task = beiscsi_cleanup_task,
3961	.alloc_pdu = beiscsi_alloc_pdu,
3962	.parse_pdu_itt = beiscsi_parse_pdu,
3963	.get_stats = beiscsi_conn_get_stats,
3964	.ep_connect = beiscsi_ep_connect,
3965	.ep_poll = beiscsi_ep_poll,
3966	.ep_disconnect = beiscsi_ep_disconnect,
3967	.session_recovery_timedout = iscsi_session_recovery_timedout,
3968};
3969
3970static struct pci_driver beiscsi_pci_driver = {
3971	.name = DRV_NAME,
3972	.probe = beiscsi_dev_probe,
3973	.remove = beiscsi_remove,
3974	.id_table = beiscsi_pci_id_table
3975};
3976
3977
3978static int __init beiscsi_module_init(void)
3979{
3980	int ret;
3981
3982	beiscsi_scsi_transport =
3983			iscsi_register_transport(&beiscsi_iscsi_transport);
3984	if (!beiscsi_scsi_transport) {
3985		SE_DEBUG(DBG_LVL_1,
3986			 "beiscsi_module_init - Unable to  register beiscsi"
3987			 "transport.\n");
3988		return -ENOMEM;
3989	}
3990	SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3991		 &beiscsi_iscsi_transport);
3992
3993	ret = pci_register_driver(&beiscsi_pci_driver);
3994	if (ret) {
3995		SE_DEBUG(DBG_LVL_1,
3996			 "beiscsi_module_init - Unable to  register"
3997			 "beiscsi pci driver.\n");
3998		goto unregister_iscsi_transport;
3999	}
4000	return 0;
4001
4002unregister_iscsi_transport:
4003	iscsi_unregister_transport(&beiscsi_iscsi_transport);
4004	return ret;
4005}
4006
4007static void __exit beiscsi_module_exit(void)
4008{
4009	pci_unregister_driver(&beiscsi_pci_driver);
4010	iscsi_unregister_transport(&beiscsi_iscsi_transport);
4011}
4012
4013module_init(beiscsi_module_init);
4014module_exit(beiscsi_module_exit);
4015