1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * General Public License for more details.
16 */
17
18/*
19 *  bfad.c Linux driver PCI interface module.
20 */
21#include <linux/module.h>
22#include <linux/kthread.h>
23#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/init.h>
26#include <linux/fs.h>
27#include <linux/pci.h>
28#include <linux/firmware.h>
29#include <asm/uaccess.h>
30#include <asm/fcntl.h>
31
32#include "bfad_drv.h"
33#include "bfad_im.h"
34#include "bfa_fcs.h"
35#include "bfa_defs.h"
36#include "bfa.h"
37
38BFA_TRC_FILE(LDRV, BFAD);
39DEFINE_MUTEX(bfad_mutex);
40LIST_HEAD(bfad_list);
41
42static int	bfad_inst;
43static int      num_sgpgs_parm;
44int		supported_fc4s;
45char		*host_name, *os_name, *os_patch;
46int		num_rports, num_ios, num_tms;
47int		num_fcxps, num_ufbufs;
48int		reqq_size, rspq_size, num_sgpgs;
49int		rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT;
50int		bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
51int		bfa_io_max_sge = BFAD_IO_MAX_SGE;
52int		bfa_log_level = 3; /* WARNING log level */
53int		ioc_auto_recover = BFA_TRUE;
54int		bfa_linkup_delay = -1;
55int		fdmi_enable = BFA_TRUE;
56int		pcie_max_read_reqsz;
57int		bfa_debugfs_enable = 1;
58int		msix_disable_cb = 0, msix_disable_ct = 0;
59int		max_xfer_size = BFAD_MAX_SECTORS >> 1;
60
61/* Firmware releated */
62u32	bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
63u32	*bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
64
65#define BFAD_FW_FILE_CB		"cbfw.bin"
66#define BFAD_FW_FILE_CT		"ctfw.bin"
67#define BFAD_FW_FILE_CT2	"ct2fw.bin"
68
69static u32 *bfad_load_fwimg(struct pci_dev *pdev);
70static void bfad_free_fwimg(void);
71static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
72		u32 *bfi_image_size, char *fw_name);
73
74static const char *msix_name_ct[] = {
75	"ctrl",
76	"cpe0", "cpe1", "cpe2", "cpe3",
77	"rme0", "rme1", "rme2", "rme3" };
78
79static const char *msix_name_cb[] = {
80	"cpe0", "cpe1", "cpe2", "cpe3",
81	"rme0", "rme1", "rme2", "rme3",
82	"eemc", "elpu0", "elpu1", "epss", "mlpu" };
83
84MODULE_FIRMWARE(BFAD_FW_FILE_CB);
85MODULE_FIRMWARE(BFAD_FW_FILE_CT);
86MODULE_FIRMWARE(BFAD_FW_FILE_CT2);
87
88module_param(os_name, charp, S_IRUGO | S_IWUSR);
89MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
90module_param(os_patch, charp, S_IRUGO | S_IWUSR);
91MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine");
92module_param(host_name, charp, S_IRUGO | S_IWUSR);
93MODULE_PARM_DESC(host_name, "Hostname of the hba host machine");
94module_param(num_rports, int, S_IRUGO | S_IWUSR);
95MODULE_PARM_DESC(num_rports, "Max number of rports supported per port "
96				"(physical/logical), default=1024");
97module_param(num_ios, int, S_IRUGO | S_IWUSR);
98MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000");
99module_param(num_tms, int, S_IRUGO | S_IWUSR);
100MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128");
101module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
102MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64");
103module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
104MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame "
105				"buffers, default=64");
106module_param(reqq_size, int, S_IRUGO | S_IWUSR);
107MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, "
108				"default=256");
109module_param(rspq_size, int, S_IRUGO | S_IWUSR);
110MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, "
111				"default=64");
112module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
113MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048");
114module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
115MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, "
116					"Range[>0]");
117module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
118MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]");
119module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
120MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
121module_param(bfa_log_level, int, S_IRUGO | S_IWUSR);
122MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, "
123				"Range[Critical:1|Error:2|Warning:3|Info:4]");
124module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
125MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, "
126				"Range[off:0|on:1]");
127module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
128MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for "
129			"boot port. Otherwise 10 secs in RHEL4 & 0 for "
130			"[RHEL5, SLES10, ESX40] Range[>0]");
131module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
132MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts "
133			"for Brocade-415/425/815/825 cards, default=0, "
134			" Range[false:0|true:1]");
135module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
136MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts "
137			"if possible for Brocade-1010/1020/804/1007/902/1741 "
138			"cards, default=0, Range[false:0|true:1]");
139module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
140MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, "
141				"Range[false:0|true:1]");
142module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR);
143MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
144		"(use system setting), Range[128|256|512|1024|2048|4096]");
145module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
146MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
147		" Range[false:0|true:1]");
148module_param(max_xfer_size, int, S_IRUGO | S_IWUSR);
149MODULE_PARM_DESC(max_xfer_size, "default=32MB,"
150		" Range[64k|128k|256k|512k|1024k|2048k]");
151
152static void
153bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
154static void
155bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event);
156static void
157bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event);
158static void
159bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event);
160static void
161bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event);
162static void
163bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
164static void
165bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
166
167/*
168 * Beginning state for the driver instance, awaiting the pci_probe event
169 */
170static void
171bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
172{
173	bfa_trc(bfad, event);
174
175	switch (event) {
176	case BFAD_E_CREATE:
177		bfa_sm_set_state(bfad, bfad_sm_created);
178		bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad,
179						"%s", "bfad_worker");
180		if (IS_ERR(bfad->bfad_tsk)) {
181			printk(KERN_INFO "bfad[%d]: Kernel thread "
182				"creation failed!\n", bfad->inst_no);
183			bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED);
184		}
185		bfa_sm_send_event(bfad, BFAD_E_INIT);
186		break;
187
188	case BFAD_E_STOP:
189		/* Ignore stop; already in uninit */
190		break;
191
192	default:
193		bfa_sm_fault(bfad, event);
194	}
195}
196
197/*
198 * Driver Instance is created, awaiting event INIT to initialize the bfad
199 */
200static void
201bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
202{
203	unsigned long flags;
204
205	bfa_trc(bfad, event);
206
207	switch (event) {
208	case BFAD_E_INIT:
209		bfa_sm_set_state(bfad, bfad_sm_initializing);
210
211		init_completion(&bfad->comp);
212
213		/* Enable Interrupt and wait bfa_init completion */
214		if (bfad_setup_intr(bfad)) {
215			printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
216					bfad->inst_no);
217			bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED);
218			break;
219		}
220
221		spin_lock_irqsave(&bfad->bfad_lock, flags);
222		bfa_iocfc_init(&bfad->bfa);
223		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
224
225		/* Set up interrupt handler for each vectors */
226		if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
227			bfad_install_msix_handler(bfad)) {
228			printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
229				__func__, bfad->inst_no);
230		}
231
232		bfad_init_timer(bfad);
233
234		wait_for_completion(&bfad->comp);
235
236		if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
237			bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
238		} else {
239			printk(KERN_WARNING
240				"bfa %s: bfa init failed\n",
241				bfad->pci_name);
242			bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
243			bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
244		}
245
246		break;
247
248	case BFAD_E_KTHREAD_CREATE_FAILED:
249		bfa_sm_set_state(bfad, bfad_sm_uninit);
250		break;
251
252	default:
253		bfa_sm_fault(bfad, event);
254	}
255}
256
257static void
258bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)
259{
260	int	retval;
261	unsigned long	flags;
262
263	bfa_trc(bfad, event);
264
265	switch (event) {
266	case BFAD_E_INIT_SUCCESS:
267		kthread_stop(bfad->bfad_tsk);
268		spin_lock_irqsave(&bfad->bfad_lock, flags);
269		bfad->bfad_tsk = NULL;
270		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
271
272		retval = bfad_start_ops(bfad);
273		if (retval != BFA_STATUS_OK)
274			break;
275		bfa_sm_set_state(bfad, bfad_sm_operational);
276		break;
277
278	case BFAD_E_INTR_INIT_FAILED:
279		bfa_sm_set_state(bfad, bfad_sm_uninit);
280		kthread_stop(bfad->bfad_tsk);
281		spin_lock_irqsave(&bfad->bfad_lock, flags);
282		bfad->bfad_tsk = NULL;
283		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
284		break;
285
286	case BFAD_E_INIT_FAILED:
287		bfa_sm_set_state(bfad, bfad_sm_failed);
288		break;
289	default:
290		bfa_sm_fault(bfad, event);
291	}
292}
293
294static void
295bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)
296{
297	int	retval;
298
299	bfa_trc(bfad, event);
300
301	switch (event) {
302	case BFAD_E_INIT_SUCCESS:
303		retval = bfad_start_ops(bfad);
304		if (retval != BFA_STATUS_OK)
305			break;
306		bfa_sm_set_state(bfad, bfad_sm_operational);
307		break;
308
309	case BFAD_E_STOP:
310		if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
311			bfad_uncfg_pport(bfad);
312		if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) {
313			bfad_im_probe_undo(bfad);
314			bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
315		}
316		bfad_stop(bfad);
317		break;
318
319	case BFAD_E_EXIT_COMP:
320		bfa_sm_set_state(bfad, bfad_sm_uninit);
321		bfad_remove_intr(bfad);
322		del_timer_sync(&bfad->hal_tmo);
323		break;
324
325	default:
326		bfa_sm_fault(bfad, event);
327	}
328}
329
330static void
331bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event)
332{
333	bfa_trc(bfad, event);
334
335	switch (event) {
336	case BFAD_E_STOP:
337		bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
338		bfad_fcs_stop(bfad);
339		break;
340
341	default:
342		bfa_sm_fault(bfad, event);
343	}
344}
345
346static void
347bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event)
348{
349	bfa_trc(bfad, event);
350
351	switch (event) {
352	case BFAD_E_FCS_EXIT_COMP:
353		bfa_sm_set_state(bfad, bfad_sm_stopping);
354		bfad_stop(bfad);
355		break;
356
357	default:
358		bfa_sm_fault(bfad, event);
359	}
360}
361
362static void
363bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
364{
365	bfa_trc(bfad, event);
366
367	switch (event) {
368	case BFAD_E_EXIT_COMP:
369		bfa_sm_set_state(bfad, bfad_sm_uninit);
370		bfad_remove_intr(bfad);
371		del_timer_sync(&bfad->hal_tmo);
372		bfad_im_probe_undo(bfad);
373		bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
374		bfad_uncfg_pport(bfad);
375		break;
376
377	default:
378		bfa_sm_fault(bfad, event);
379		break;
380	}
381}
382
383/*
384 *  BFA callbacks
385 */
386void
387bfad_hcb_comp(void *arg, bfa_status_t status)
388{
389	struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg;
390
391	fcomp->status = status;
392	complete(&fcomp->comp);
393}
394
395/*
396 * bfa_init callback
397 */
398void
399bfa_cb_init(void *drv, bfa_status_t init_status)
400{
401	struct bfad_s	      *bfad = drv;
402
403	if (init_status == BFA_STATUS_OK) {
404		bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
405
406		/*
407		 * If BFAD_HAL_INIT_FAIL flag is set:
408		 * Wake up the kernel thread to start
409		 * the bfad operations after HAL init done
410		 */
411		if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) {
412			bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL;
413			wake_up_process(bfad->bfad_tsk);
414		}
415	}
416
417	complete(&bfad->comp);
418}
419
420/*
421 *  BFA_FCS callbacks
422 */
423struct bfad_port_s *
424bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port,
425		 enum bfa_lport_role roles, struct bfad_vf_s *vf_drv,
426		 struct bfad_vport_s *vp_drv)
427{
428	bfa_status_t	rc;
429	struct bfad_port_s    *port_drv;
430
431	if (!vp_drv && !vf_drv) {
432		port_drv = &bfad->pport;
433		port_drv->pvb_type = BFAD_PORT_PHYS_BASE;
434	} else if (!vp_drv && vf_drv) {
435		port_drv = &vf_drv->base_port;
436		port_drv->pvb_type = BFAD_PORT_VF_BASE;
437	} else if (vp_drv && !vf_drv) {
438		port_drv = &vp_drv->drv_port;
439		port_drv->pvb_type = BFAD_PORT_PHYS_VPORT;
440	} else {
441		port_drv = &vp_drv->drv_port;
442		port_drv->pvb_type = BFAD_PORT_VF_VPORT;
443	}
444
445	port_drv->fcs_port = port;
446	port_drv->roles = roles;
447
448	if (roles & BFA_LPORT_ROLE_FCP_IM) {
449		rc = bfad_im_port_new(bfad, port_drv);
450		if (rc != BFA_STATUS_OK) {
451			bfad_im_port_delete(bfad, port_drv);
452			port_drv = NULL;
453		}
454	}
455
456	return port_drv;
457}
458
459void
460bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
461		    struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
462{
463	struct bfad_port_s    *port_drv;
464
465	/* this will be only called from rmmod context */
466	if (vp_drv && !vp_drv->comp_del) {
467		port_drv = (vp_drv) ? (&(vp_drv)->drv_port) :
468				((vf_drv) ? (&(vf_drv)->base_port) :
469				(&(bfad)->pport));
470		bfa_trc(bfad, roles);
471		if (roles & BFA_LPORT_ROLE_FCP_IM)
472			bfad_im_port_delete(bfad, port_drv);
473	}
474}
475
476/*
477 * FCS RPORT alloc callback, after successful PLOGI by FCS
478 */
479bfa_status_t
480bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
481		    struct bfad_rport_s **rport_drv)
482{
483	bfa_status_t	rc = BFA_STATUS_OK;
484
485	*rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC);
486	if (*rport_drv == NULL) {
487		rc = BFA_STATUS_ENOMEM;
488		goto ext;
489	}
490
491	*rport = &(*rport_drv)->fcs_rport;
492
493ext:
494	return rc;
495}
496
497/*
498 * FCS PBC VPORT Create
499 */
500void
501bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
502{
503
504	struct bfa_lport_cfg_s port_cfg = {0};
505	struct bfad_vport_s   *vport;
506	int rc;
507
508	vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
509	if (!vport) {
510		bfa_trc(bfad, 0);
511		return;
512	}
513
514	vport->drv_port.bfad = bfad;
515	port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
516	port_cfg.pwwn = pbc_vport.vp_pwwn;
517	port_cfg.nwwn = pbc_vport.vp_nwwn;
518	port_cfg.preboot_vp  = BFA_TRUE;
519
520	rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0,
521				  &port_cfg, vport);
522
523	if (rc != BFA_STATUS_OK) {
524		bfa_trc(bfad, 0);
525		return;
526	}
527
528	list_add_tail(&vport->list_entry, &bfad->pbc_vport_list);
529}
530
531void
532bfad_hal_mem_release(struct bfad_s *bfad)
533{
534	struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
535	struct bfa_mem_dma_s *dma_info, *dma_elem;
536	struct bfa_mem_kva_s *kva_info, *kva_elem;
537	struct list_head *dm_qe, *km_qe;
538
539	dma_info = &hal_meminfo->dma_info;
540	kva_info = &hal_meminfo->kva_info;
541
542	/* Iterate through the KVA meminfo queue */
543	list_for_each(km_qe, &kva_info->qe) {
544		kva_elem = (struct bfa_mem_kva_s *) km_qe;
545		vfree(kva_elem->kva);
546	}
547
548	/* Iterate through the DMA meminfo queue */
549	list_for_each(dm_qe, &dma_info->qe) {
550		dma_elem = (struct bfa_mem_dma_s *) dm_qe;
551		dma_free_coherent(&bfad->pcidev->dev,
552				dma_elem->mem_len, dma_elem->kva,
553				(dma_addr_t) dma_elem->dma);
554	}
555
556	memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
557}
558
559void
560bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
561{
562	if (num_rports > 0)
563		bfa_cfg->fwcfg.num_rports = num_rports;
564	if (num_ios > 0)
565		bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
566	if (num_tms > 0)
567		bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
568	if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX)
569		bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
570	if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX)
571		bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
572	if (reqq_size > 0)
573		bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
574	if (rspq_size > 0)
575		bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
576	if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX)
577		bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
578
579	/*
580	 * populate the hal values back to the driver for sysfs use.
581	 * otherwise, the default values will be shown as 0 in sysfs
582	 */
583	num_rports = bfa_cfg->fwcfg.num_rports;
584	num_ios = bfa_cfg->fwcfg.num_ioim_reqs;
585	num_tms = bfa_cfg->fwcfg.num_tskim_reqs;
586	num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs;
587	num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs;
588	reqq_size = bfa_cfg->drvcfg.num_reqq_elems;
589	rspq_size = bfa_cfg->drvcfg.num_rspq_elems;
590	num_sgpgs = bfa_cfg->drvcfg.num_sgpgs;
591}
592
593bfa_status_t
594bfad_hal_mem_alloc(struct bfad_s *bfad)
595{
596	struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
597	struct bfa_mem_dma_s *dma_info, *dma_elem;
598	struct bfa_mem_kva_s *kva_info, *kva_elem;
599	struct list_head *dm_qe, *km_qe;
600	bfa_status_t	rc = BFA_STATUS_OK;
601	dma_addr_t	phys_addr;
602
603	bfa_cfg_get_default(&bfad->ioc_cfg);
604	bfad_update_hal_cfg(&bfad->ioc_cfg);
605	bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
606	bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa);
607
608	dma_info = &hal_meminfo->dma_info;
609	kva_info = &hal_meminfo->kva_info;
610
611	/* Iterate through the KVA meminfo queue */
612	list_for_each(km_qe, &kva_info->qe) {
613		kva_elem = (struct bfa_mem_kva_s *) km_qe;
614		kva_elem->kva = vmalloc(kva_elem->mem_len);
615		if (kva_elem->kva == NULL) {
616			bfad_hal_mem_release(bfad);
617			rc = BFA_STATUS_ENOMEM;
618			goto ext;
619		}
620		memset(kva_elem->kva, 0, kva_elem->mem_len);
621	}
622
623	/* Iterate through the DMA meminfo queue */
624	list_for_each(dm_qe, &dma_info->qe) {
625		dma_elem = (struct bfa_mem_dma_s *) dm_qe;
626		dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev,
627						dma_elem->mem_len,
628						&phys_addr, GFP_KERNEL);
629		if (dma_elem->kva == NULL) {
630			bfad_hal_mem_release(bfad);
631			rc = BFA_STATUS_ENOMEM;
632			goto ext;
633		}
634		dma_elem->dma = phys_addr;
635		memset(dma_elem->kva, 0, dma_elem->mem_len);
636	}
637ext:
638	return rc;
639}
640
641/*
642 * Create a vport under a vf.
643 */
644bfa_status_t
645bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
646		  struct bfa_lport_cfg_s *port_cfg, struct device *dev)
647{
648	struct bfad_vport_s   *vport;
649	int		rc = BFA_STATUS_OK;
650	unsigned long	flags;
651	struct completion fcomp;
652
653	vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
654	if (!vport) {
655		rc = BFA_STATUS_ENOMEM;
656		goto ext;
657	}
658
659	vport->drv_port.bfad = bfad;
660	spin_lock_irqsave(&bfad->bfad_lock, flags);
661	rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id,
662				  port_cfg, vport);
663	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
664
665	if (rc != BFA_STATUS_OK)
666		goto ext_free_vport;
667
668	if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) {
669		rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
670							dev);
671		if (rc != BFA_STATUS_OK)
672			goto ext_free_fcs_vport;
673	}
674
675	spin_lock_irqsave(&bfad->bfad_lock, flags);
676	bfa_fcs_vport_start(&vport->fcs_vport);
677	list_add_tail(&vport->list_entry, &bfad->vport_list);
678	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
679
680	return BFA_STATUS_OK;
681
682ext_free_fcs_vport:
683	spin_lock_irqsave(&bfad->bfad_lock, flags);
684	vport->comp_del = &fcomp;
685	init_completion(vport->comp_del);
686	bfa_fcs_vport_delete(&vport->fcs_vport);
687	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
688	wait_for_completion(vport->comp_del);
689ext_free_vport:
690	kfree(vport);
691ext:
692	return rc;
693}
694
695void
696bfad_bfa_tmo(unsigned long data)
697{
698	struct bfad_s	      *bfad = (struct bfad_s *) data;
699	unsigned long	flags;
700	struct list_head	       doneq;
701
702	spin_lock_irqsave(&bfad->bfad_lock, flags);
703
704	bfa_timer_beat(&bfad->bfa.timer_mod);
705
706	bfa_comp_deq(&bfad->bfa, &doneq);
707	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
708
709	if (!list_empty(&doneq)) {
710		bfa_comp_process(&bfad->bfa, &doneq);
711		spin_lock_irqsave(&bfad->bfad_lock, flags);
712		bfa_comp_free(&bfad->bfa, &doneq);
713		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
714	}
715
716	mod_timer(&bfad->hal_tmo,
717		  jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
718}
719
720void
721bfad_init_timer(struct bfad_s *bfad)
722{
723	init_timer(&bfad->hal_tmo);
724	bfad->hal_tmo.function = bfad_bfa_tmo;
725	bfad->hal_tmo.data = (unsigned long)bfad;
726
727	mod_timer(&bfad->hal_tmo,
728		  jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
729}
730
731int
732bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
733{
734	int		rc = -ENODEV;
735
736	if (pci_enable_device(pdev)) {
737		printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
738		goto out;
739	}
740
741	if (pci_request_regions(pdev, BFAD_DRIVER_NAME))
742		goto out_disable_device;
743
744	pci_set_master(pdev);
745
746
747	if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
748	    (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
749		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
750		   (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
751			printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
752			goto out_release_region;
753		}
754	}
755
756	bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
757	bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
758
759	if (bfad->pci_bar0_kva == NULL) {
760		printk(KERN_ERR "Fail to map bar0\n");
761		goto out_release_region;
762	}
763
764	bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn);
765	bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
766	bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
767	bfad->hal_pcidev.device_id = pdev->device;
768	bfad->hal_pcidev.ssid = pdev->subsystem_device;
769	bfad->pci_name = pci_name(pdev);
770
771	bfad->pci_attr.vendor_id = pdev->vendor;
772	bfad->pci_attr.device_id = pdev->device;
773	bfad->pci_attr.ssid = pdev->subsystem_device;
774	bfad->pci_attr.ssvid = pdev->subsystem_vendor;
775	bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn);
776
777	bfad->pcidev = pdev;
778
779	/* Adjust PCIe Maximum Read Request Size */
780	if (pcie_max_read_reqsz > 0) {
781		int pcie_cap_reg;
782		u16 pcie_dev_ctl;
783		u16 mask = 0xffff;
784
785		switch (pcie_max_read_reqsz) {
786		case 128:
787			mask = 0x0;
788			break;
789		case 256:
790			mask = 0x1000;
791			break;
792		case 512:
793			mask = 0x2000;
794			break;
795		case 1024:
796			mask = 0x3000;
797			break;
798		case 2048:
799			mask = 0x4000;
800			break;
801		case 4096:
802			mask = 0x5000;
803			break;
804		default:
805			break;
806		}
807
808		pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
809		if (mask != 0xffff && pcie_cap_reg) {
810			pcie_cap_reg += 0x08;
811			pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl);
812			if ((pcie_dev_ctl & 0x7000) != mask) {
813				printk(KERN_WARNING "BFA[%s]: "
814				"pcie_max_read_request_size is %d, "
815				"reset to %d\n", bfad->pci_name,
816				(1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7,
817				pcie_max_read_reqsz);
818
819				pcie_dev_ctl &= ~0x7000;
820				pci_write_config_word(pdev, pcie_cap_reg,
821						pcie_dev_ctl | mask);
822			}
823		}
824	}
825
826	return 0;
827
828out_release_region:
829	pci_release_regions(pdev);
830out_disable_device:
831	pci_disable_device(pdev);
832out:
833	return rc;
834}
835
836void
837bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
838{
839	pci_iounmap(pdev, bfad->pci_bar0_kva);
840	pci_iounmap(pdev, bfad->pci_bar2_kva);
841	pci_release_regions(pdev);
842	pci_disable_device(pdev);
843	pci_set_drvdata(pdev, NULL);
844}
845
846bfa_status_t
847bfad_drv_init(struct bfad_s *bfad)
848{
849	bfa_status_t	rc;
850	unsigned long	flags;
851
852	bfad->cfg_data.rport_del_timeout = rport_del_timeout;
853	bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
854	bfad->cfg_data.io_max_sge = bfa_io_max_sge;
855	bfad->cfg_data.binding_method = FCP_PWWN_BINDING;
856
857	rc = bfad_hal_mem_alloc(bfad);
858	if (rc != BFA_STATUS_OK) {
859		printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n",
860		       bfad->inst_no);
861		printk(KERN_WARNING
862			"Not enough memory to attach all Brocade HBA ports, %s",
863			"System may need more memory.\n");
864		goto out_hal_mem_alloc_failure;
865	}
866
867	bfad->bfa.trcmod = bfad->trcmod;
868	bfad->bfa.plog = &bfad->plog_buf;
869	bfa_plog_init(&bfad->plog_buf);
870	bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
871		     0, "Driver Attach");
872
873	bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo,
874		   &bfad->hal_pcidev);
875
876	/* FCS INIT */
877	spin_lock_irqsave(&bfad->bfad_lock, flags);
878	bfad->bfa_fcs.trcmod = bfad->trcmod;
879	bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
880	bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
881	bfa_fcs_init(&bfad->bfa_fcs);
882	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
883
884	bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
885
886	/* configure base port */
887	rc = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
888	if (rc != BFA_STATUS_OK)
889		goto out_cfg_pport_fail;
890
891	return BFA_STATUS_OK;
892
893out_cfg_pport_fail:
894	/* fcs exit - on cfg pport failure */
895	spin_lock_irqsave(&bfad->bfad_lock, flags);
896	init_completion(&bfad->comp);
897	bfad->pport.flags |= BFAD_PORT_DELETE;
898	bfa_fcs_exit(&bfad->bfa_fcs);
899	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
900	wait_for_completion(&bfad->comp);
901	/* bfa detach - free hal memory */
902	bfa_detach(&bfad->bfa);
903	bfad_hal_mem_release(bfad);
904out_hal_mem_alloc_failure:
905	return BFA_STATUS_FAILED;
906}
907
908void
909bfad_drv_uninit(struct bfad_s *bfad)
910{
911	unsigned long   flags;
912
913	spin_lock_irqsave(&bfad->bfad_lock, flags);
914	init_completion(&bfad->comp);
915	bfa_iocfc_stop(&bfad->bfa);
916	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
917	wait_for_completion(&bfad->comp);
918
919	del_timer_sync(&bfad->hal_tmo);
920	bfa_isr_disable(&bfad->bfa);
921	bfa_detach(&bfad->bfa);
922	bfad_remove_intr(bfad);
923	bfad_hal_mem_release(bfad);
924
925	bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE;
926}
927
928void
929bfad_drv_start(struct bfad_s *bfad)
930{
931	unsigned long	flags;
932
933	spin_lock_irqsave(&bfad->bfad_lock, flags);
934	bfa_iocfc_start(&bfad->bfa);
935	bfa_fcs_pbc_vport_init(&bfad->bfa_fcs);
936	bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
937	bfad->bfad_flags |= BFAD_HAL_START_DONE;
938	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
939
940	if (bfad->im)
941		flush_workqueue(bfad->im->drv_workq);
942}
943
944void
945bfad_fcs_stop(struct bfad_s *bfad)
946{
947	unsigned long	flags;
948
949	spin_lock_irqsave(&bfad->bfad_lock, flags);
950	init_completion(&bfad->comp);
951	bfad->pport.flags |= BFAD_PORT_DELETE;
952	bfa_fcs_exit(&bfad->bfa_fcs);
953	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
954	wait_for_completion(&bfad->comp);
955
956	bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
957}
958
959void
960bfad_stop(struct bfad_s *bfad)
961{
962	unsigned long	flags;
963
964	spin_lock_irqsave(&bfad->bfad_lock, flags);
965	init_completion(&bfad->comp);
966	bfa_iocfc_stop(&bfad->bfa);
967	bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
968	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
969	wait_for_completion(&bfad->comp);
970
971	bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP);
972}
973
974bfa_status_t
975bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role)
976{
977	int		rc = BFA_STATUS_OK;
978
979	/* Allocate scsi_host for the physical port */
980	if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
981	    (role & BFA_LPORT_ROLE_FCP_IM)) {
982		if (bfad->pport.im_port == NULL) {
983			rc = BFA_STATUS_FAILED;
984			goto out;
985		}
986
987		rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port,
988						&bfad->pcidev->dev);
989		if (rc != BFA_STATUS_OK)
990			goto out;
991
992		bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM;
993	}
994
995	bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
996
997out:
998	return rc;
999}
1000
1001void
1002bfad_uncfg_pport(struct bfad_s *bfad)
1003{
1004	if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
1005	    (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) {
1006		bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
1007		bfad_im_port_clean(bfad->pport.im_port);
1008		kfree(bfad->pport.im_port);
1009		bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM;
1010	}
1011
1012	bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE;
1013}
1014
1015bfa_status_t
1016bfad_start_ops(struct bfad_s *bfad) {
1017
1018	int	retval;
1019	unsigned long	flags;
1020	struct bfad_vport_s *vport, *vport_new;
1021	struct bfa_fcs_driver_info_s driver_info;
1022
1023	/* Limit min/max. xfer size to [64k-32MB] */
1024	if (max_xfer_size < BFAD_MIN_SECTORS >> 1)
1025		max_xfer_size = BFAD_MIN_SECTORS >> 1;
1026	if (max_xfer_size > BFAD_MAX_SECTORS >> 1)
1027		max_xfer_size = BFAD_MAX_SECTORS >> 1;
1028
1029	/* Fill the driver_info info to fcs*/
1030	memset(&driver_info, 0, sizeof(driver_info));
1031	strncpy(driver_info.version, BFAD_DRIVER_VERSION,
1032		sizeof(driver_info.version) - 1);
1033	if (host_name)
1034		strncpy(driver_info.host_machine_name, host_name,
1035			sizeof(driver_info.host_machine_name) - 1);
1036	if (os_name)
1037		strncpy(driver_info.host_os_name, os_name,
1038			sizeof(driver_info.host_os_name) - 1);
1039	if (os_patch)
1040		strncpy(driver_info.host_os_patch, os_patch,
1041			sizeof(driver_info.host_os_patch) - 1);
1042
1043	strncpy(driver_info.os_device_name, bfad->pci_name,
1044		sizeof(driver_info.os_device_name - 1));
1045
1046	/* FCS driver info init */
1047	spin_lock_irqsave(&bfad->bfad_lock, flags);
1048	bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
1049	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1050
1051	/*
1052	 * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
1053	 * with values learned during bfa_init firmware GETATTR REQ.
1054	 */
1055	bfa_fcs_update_cfg(&bfad->bfa_fcs);
1056
1057	/* Setup fc host fixed attribute if the lk supports */
1058	bfad_fc_host_init(bfad->pport.im_port);
1059
1060	/* BFAD level FC4 IM specific resource allocation */
1061	retval = bfad_im_probe(bfad);
1062	if (retval != BFA_STATUS_OK) {
1063		printk(KERN_WARNING "bfad_im_probe failed\n");
1064		if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
1065			bfa_sm_set_state(bfad, bfad_sm_failed);
1066		bfad_im_probe_undo(bfad);
1067		bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
1068		bfad_uncfg_pport(bfad);
1069		bfad_stop(bfad);
1070		return BFA_STATUS_FAILED;
1071	} else
1072		bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
1073
1074	bfad_drv_start(bfad);
1075
1076	/* Complete pbc vport create */
1077	list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list,
1078				list_entry) {
1079		struct fc_vport_identifiers vid;
1080		struct fc_vport *fc_vport;
1081		char pwwn_buf[BFA_STRING_32];
1082
1083		memset(&vid, 0, sizeof(vid));
1084		vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
1085		vid.vport_type = FC_PORTTYPE_NPIV;
1086		vid.disable = false;
1087		vid.node_name = wwn_to_u64((u8 *)
1088				(&((vport->fcs_vport).lport.port_cfg.nwwn)));
1089		vid.port_name = wwn_to_u64((u8 *)
1090				(&((vport->fcs_vport).lport.port_cfg.pwwn)));
1091		fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid);
1092		if (!fc_vport) {
1093			wwn2str(pwwn_buf, vid.port_name);
1094			printk(KERN_WARNING "bfad%d: failed to create pbc vport"
1095				" %s\n", bfad->inst_no, pwwn_buf);
1096		}
1097		list_del(&vport->list_entry);
1098		kfree(vport);
1099	}
1100
1101	/*
1102	 * If bfa_linkup_delay is set to -1 default; try to retrive the
1103	 * value using the bfad_get_linkup_delay(); else use the
1104	 * passed in module param value as the bfa_linkup_delay.
1105	 */
1106	if (bfa_linkup_delay < 0) {
1107		bfa_linkup_delay = bfad_get_linkup_delay(bfad);
1108		bfad_rport_online_wait(bfad);
1109		bfa_linkup_delay = -1;
1110	} else
1111		bfad_rport_online_wait(bfad);
1112
1113	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n");
1114
1115	return BFA_STATUS_OK;
1116}
1117
1118int
1119bfad_worker(void *ptr)
1120{
1121	struct bfad_s *bfad;
1122	unsigned long   flags;
1123
1124	bfad = (struct bfad_s *)ptr;
1125
1126	while (!kthread_should_stop()) {
1127
1128		/* Send event BFAD_E_INIT_SUCCESS */
1129		bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
1130
1131		spin_lock_irqsave(&bfad->bfad_lock, flags);
1132		bfad->bfad_tsk = NULL;
1133		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1134
1135		break;
1136	}
1137
1138	return 0;
1139}
1140
1141/*
1142 *  BFA driver interrupt functions
1143 */
1144irqreturn_t
1145bfad_intx(int irq, void *dev_id)
1146{
1147	struct bfad_s	*bfad = dev_id;
1148	struct list_head	doneq;
1149	unsigned long	flags;
1150	bfa_boolean_t rc;
1151
1152	spin_lock_irqsave(&bfad->bfad_lock, flags);
1153	rc = bfa_intx(&bfad->bfa);
1154	if (!rc) {
1155		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1156		return IRQ_NONE;
1157	}
1158
1159	bfa_comp_deq(&bfad->bfa, &doneq);
1160	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1161
1162	if (!list_empty(&doneq)) {
1163		bfa_comp_process(&bfad->bfa, &doneq);
1164
1165		spin_lock_irqsave(&bfad->bfad_lock, flags);
1166		bfa_comp_free(&bfad->bfa, &doneq);
1167		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1168	}
1169
1170	return IRQ_HANDLED;
1171
1172}
1173
1174static irqreturn_t
1175bfad_msix(int irq, void *dev_id)
1176{
1177	struct bfad_msix_s *vec = dev_id;
1178	struct bfad_s *bfad = vec->bfad;
1179	struct list_head doneq;
1180	unsigned long   flags;
1181
1182	spin_lock_irqsave(&bfad->bfad_lock, flags);
1183
1184	bfa_msix(&bfad->bfa, vec->msix.entry);
1185	bfa_comp_deq(&bfad->bfa, &doneq);
1186	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1187
1188	if (!list_empty(&doneq)) {
1189		bfa_comp_process(&bfad->bfa, &doneq);
1190
1191		spin_lock_irqsave(&bfad->bfad_lock, flags);
1192		bfa_comp_free(&bfad->bfa, &doneq);
1193		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1194	}
1195
1196	return IRQ_HANDLED;
1197}
1198
1199/*
1200 * Initialize the MSIX entry table.
1201 */
1202static void
1203bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
1204			 int mask, int max_bit)
1205{
1206	int	i;
1207	int	match = 0x00000001;
1208
1209	for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
1210		if (mask & match) {
1211			bfad->msix_tab[bfad->nvec].msix.entry = i;
1212			bfad->msix_tab[bfad->nvec].bfad = bfad;
1213			msix_entries[bfad->nvec].entry = i;
1214			bfad->nvec++;
1215		}
1216
1217		match <<= 1;
1218	}
1219
1220}
1221
1222int
1223bfad_install_msix_handler(struct bfad_s *bfad)
1224{
1225	int i, error = 0;
1226
1227	for (i = 0; i < bfad->nvec; i++) {
1228		sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
1229				bfad->pci_name,
1230				((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ?
1231				msix_name_cb[i] : msix_name_ct[i]));
1232
1233		error = request_irq(bfad->msix_tab[i].msix.vector,
1234				    (irq_handler_t) bfad_msix, 0,
1235				    bfad->msix_tab[i].name, &bfad->msix_tab[i]);
1236		bfa_trc(bfad, i);
1237		bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
1238		if (error) {
1239			int	j;
1240
1241			for (j = 0; j < i; j++)
1242				free_irq(bfad->msix_tab[j].msix.vector,
1243						&bfad->msix_tab[j]);
1244
1245			bfad->bfad_flags &= ~BFAD_MSIX_ON;
1246			pci_disable_msix(bfad->pcidev);
1247
1248			return 1;
1249		}
1250	}
1251
1252	return 0;
1253}
1254
1255/*
1256 * Setup MSIX based interrupt.
1257 */
1258int
1259bfad_setup_intr(struct bfad_s *bfad)
1260{
1261	int error = 0;
1262	u32 mask = 0, i, num_bit = 0, max_bit = 0;
1263	struct msix_entry msix_entries[MAX_MSIX_ENTRY];
1264	struct pci_dev *pdev = bfad->pcidev;
1265	u16	reg;
1266
1267	/* Call BFA to get the msix map for this PCI function.  */
1268	bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
1269
1270	/* Set up the msix entry table */
1271	bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
1272
1273	if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) ||
1274	   (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) {
1275
1276		error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
1277		if (error) {
1278			/*
1279			 * Only error number of vector is available.
1280			 * We don't have a mechanism to map multiple
1281			 * interrupts into one vector, so even if we
1282			 * can try to request less vectors, we don't
1283			 * know how to associate interrupt events to
1284			 *  vectors. Linux doesn't duplicate vectors
1285			 * in the MSIX table for this case.
1286			 */
1287
1288			printk(KERN_WARNING "bfad%d: "
1289				"pci_enable_msix failed (%d),"
1290				" use line based.\n", bfad->inst_no, error);
1291
1292			goto line_based;
1293		}
1294
1295		/* Disable INTX in MSI-X mode */
1296		pci_read_config_word(pdev, PCI_COMMAND, &reg);
1297
1298		if (!(reg & PCI_COMMAND_INTX_DISABLE))
1299			pci_write_config_word(pdev, PCI_COMMAND,
1300				reg | PCI_COMMAND_INTX_DISABLE);
1301
1302		/* Save the vectors */
1303		for (i = 0; i < bfad->nvec; i++) {
1304			bfa_trc(bfad, msix_entries[i].vector);
1305			bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
1306		}
1307
1308		bfa_msix_init(&bfad->bfa, bfad->nvec);
1309
1310		bfad->bfad_flags |= BFAD_MSIX_ON;
1311
1312		return error;
1313	}
1314
1315line_based:
1316	error = 0;
1317	if (request_irq
1318	    (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS,
1319	     BFAD_DRIVER_NAME, bfad) != 0) {
1320		/* Enable interrupt handler failed */
1321		return 1;
1322	}
1323	bfad->bfad_flags |= BFAD_INTX_ON;
1324
1325	return error;
1326}
1327
1328void
1329bfad_remove_intr(struct bfad_s *bfad)
1330{
1331	int	i;
1332
1333	if (bfad->bfad_flags & BFAD_MSIX_ON) {
1334		for (i = 0; i < bfad->nvec; i++)
1335			free_irq(bfad->msix_tab[i].msix.vector,
1336					&bfad->msix_tab[i]);
1337
1338		pci_disable_msix(bfad->pcidev);
1339		bfad->bfad_flags &= ~BFAD_MSIX_ON;
1340	} else if (bfad->bfad_flags & BFAD_INTX_ON) {
1341		free_irq(bfad->pcidev->irq, bfad);
1342	}
1343}
1344
1345/*
1346 * PCI probe entry.
1347 */
1348int
1349bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1350{
1351	struct bfad_s	*bfad;
1352	int		error = -ENODEV, retval, i;
1353
1354	/* For single port cards - only claim function 0 */
1355	if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
1356		(PCI_FUNC(pdev->devfn) != 0))
1357		return -ENODEV;
1358
1359	bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL);
1360	if (!bfad) {
1361		error = -ENOMEM;
1362		goto out;
1363	}
1364
1365	bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL);
1366	if (!bfad->trcmod) {
1367		printk(KERN_WARNING "Error alloc trace buffer!\n");
1368		error = -ENOMEM;
1369		goto out_alloc_trace_failure;
1370	}
1371
1372	/* TRACE INIT */
1373	bfa_trc_init(bfad->trcmod);
1374	bfa_trc(bfad, bfad_inst);
1375
1376	/* AEN INIT */
1377	INIT_LIST_HEAD(&bfad->free_aen_q);
1378	INIT_LIST_HEAD(&bfad->active_aen_q);
1379	for (i = 0; i < BFA_AEN_MAX_ENTRY; i++)
1380		list_add_tail(&bfad->aen_list[i].qe, &bfad->free_aen_q);
1381
1382	if (!(bfad_load_fwimg(pdev))) {
1383		kfree(bfad->trcmod);
1384		goto out_alloc_trace_failure;
1385	}
1386
1387	retval = bfad_pci_init(pdev, bfad);
1388	if (retval) {
1389		printk(KERN_WARNING "bfad_pci_init failure!\n");
1390		error = retval;
1391		goto out_pci_init_failure;
1392	}
1393
1394	mutex_lock(&bfad_mutex);
1395	bfad->inst_no = bfad_inst++;
1396	list_add_tail(&bfad->list_entry, &bfad_list);
1397	mutex_unlock(&bfad_mutex);
1398
1399	/* Initializing the state machine: State set to uninit */
1400	bfa_sm_set_state(bfad, bfad_sm_uninit);
1401
1402	spin_lock_init(&bfad->bfad_lock);
1403	pci_set_drvdata(pdev, bfad);
1404
1405	bfad->ref_count = 0;
1406	bfad->pport.bfad = bfad;
1407	INIT_LIST_HEAD(&bfad->pbc_vport_list);
1408	INIT_LIST_HEAD(&bfad->vport_list);
1409
1410	/* Setup the debugfs node for this bfad */
1411	if (bfa_debugfs_enable)
1412		bfad_debugfs_init(&bfad->pport);
1413
1414	retval = bfad_drv_init(bfad);
1415	if (retval != BFA_STATUS_OK)
1416		goto out_drv_init_failure;
1417
1418	bfa_sm_send_event(bfad, BFAD_E_CREATE);
1419
1420	if (bfa_sm_cmp_state(bfad, bfad_sm_uninit))
1421		goto out_bfad_sm_failure;
1422
1423	return 0;
1424
1425out_bfad_sm_failure:
1426	bfa_detach(&bfad->bfa);
1427	bfad_hal_mem_release(bfad);
1428out_drv_init_failure:
1429	/* Remove the debugfs node for this bfad */
1430	kfree(bfad->regdata);
1431	bfad_debugfs_exit(&bfad->pport);
1432	mutex_lock(&bfad_mutex);
1433	bfad_inst--;
1434	list_del(&bfad->list_entry);
1435	mutex_unlock(&bfad_mutex);
1436	bfad_pci_uninit(pdev, bfad);
1437out_pci_init_failure:
1438	kfree(bfad->trcmod);
1439out_alloc_trace_failure:
1440	kfree(bfad);
1441out:
1442	return error;
1443}
1444
1445/*
1446 * PCI remove entry.
1447 */
1448void
1449bfad_pci_remove(struct pci_dev *pdev)
1450{
1451	struct bfad_s	      *bfad = pci_get_drvdata(pdev);
1452	unsigned long	flags;
1453
1454	bfa_trc(bfad, bfad->inst_no);
1455
1456	spin_lock_irqsave(&bfad->bfad_lock, flags);
1457	if (bfad->bfad_tsk != NULL) {
1458		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1459		kthread_stop(bfad->bfad_tsk);
1460	} else {
1461		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1462	}
1463
1464	/* Send Event BFAD_E_STOP */
1465	bfa_sm_send_event(bfad, BFAD_E_STOP);
1466
1467	/* Driver detach and dealloc mem */
1468	spin_lock_irqsave(&bfad->bfad_lock, flags);
1469	bfa_detach(&bfad->bfa);
1470	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1471	bfad_hal_mem_release(bfad);
1472
1473	/* Remove the debugfs node for this bfad */
1474	kfree(bfad->regdata);
1475	bfad_debugfs_exit(&bfad->pport);
1476
1477	/* Cleaning the BFAD instance */
1478	mutex_lock(&bfad_mutex);
1479	bfad_inst--;
1480	list_del(&bfad->list_entry);
1481	mutex_unlock(&bfad_mutex);
1482	bfad_pci_uninit(pdev, bfad);
1483
1484	kfree(bfad->trcmod);
1485	kfree(bfad);
1486}
1487
1488struct pci_device_id bfad_id_table[] = {
1489	{
1490		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1491		.device = BFA_PCI_DEVICE_ID_FC_8G2P,
1492		.subvendor = PCI_ANY_ID,
1493		.subdevice = PCI_ANY_ID,
1494	},
1495	{
1496		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1497		.device = BFA_PCI_DEVICE_ID_FC_8G1P,
1498		.subvendor = PCI_ANY_ID,
1499		.subdevice = PCI_ANY_ID,
1500	},
1501	{
1502		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1503		.device = BFA_PCI_DEVICE_ID_CT,
1504		.subvendor = PCI_ANY_ID,
1505		.subdevice = PCI_ANY_ID,
1506		.class = (PCI_CLASS_SERIAL_FIBER << 8),
1507		.class_mask = ~0,
1508	},
1509	{
1510		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1511		.device = BFA_PCI_DEVICE_ID_CT_FC,
1512		.subvendor = PCI_ANY_ID,
1513		.subdevice = PCI_ANY_ID,
1514		.class = (PCI_CLASS_SERIAL_FIBER << 8),
1515		.class_mask = ~0,
1516	},
1517	{
1518		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1519		.device = BFA_PCI_DEVICE_ID_CT2,
1520		.subvendor = PCI_ANY_ID,
1521		.subdevice = PCI_ANY_ID,
1522		.class = (PCI_CLASS_SERIAL_FIBER << 8),
1523		.class_mask = ~0,
1524	},
1525
1526	{0, 0},
1527};
1528
1529MODULE_DEVICE_TABLE(pci, bfad_id_table);
1530
1531static struct pci_driver bfad_pci_driver = {
1532	.name = BFAD_DRIVER_NAME,
1533	.id_table = bfad_id_table,
1534	.probe = bfad_pci_probe,
1535	.remove = __devexit_p(bfad_pci_remove),
1536};
1537
1538/*
1539 * Driver module init.
1540 */
1541static int __init
1542bfad_init(void)
1543{
1544	int		error = 0;
1545
1546	printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n",
1547			BFAD_DRIVER_VERSION);
1548
1549	if (num_sgpgs > 0)
1550		num_sgpgs_parm = num_sgpgs;
1551
1552	error = bfad_im_module_init();
1553	if (error) {
1554		error = -ENOMEM;
1555		printk(KERN_WARNING "bfad_im_module_init failure\n");
1556		goto ext;
1557	}
1558
1559	if (strcmp(FCPI_NAME, " fcpim") == 0)
1560		supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
1561
1562	bfa_auto_recover = ioc_auto_recover;
1563	bfa_fcs_rport_set_del_timeout(rport_del_timeout);
1564
1565	error = pci_register_driver(&bfad_pci_driver);
1566	if (error) {
1567		printk(KERN_WARNING "pci_register_driver failure\n");
1568		goto ext;
1569	}
1570
1571	return 0;
1572
1573ext:
1574	bfad_im_module_exit();
1575	return error;
1576}
1577
1578/*
1579 * Driver module exit.
1580 */
1581static void __exit
1582bfad_exit(void)
1583{
1584	pci_unregister_driver(&bfad_pci_driver);
1585	bfad_im_module_exit();
1586	bfad_free_fwimg();
1587}
1588
1589/* Firmware handling */
1590static void
1591bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
1592		u32 *bfi_image_size, char *fw_name)
1593{
1594	const struct firmware *fw;
1595
1596	if (request_firmware(&fw, fw_name, &pdev->dev)) {
1597		printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
1598		*bfi_image = NULL;
1599		goto out;
1600	}
1601
1602	*bfi_image = vmalloc(fw->size);
1603	if (NULL == *bfi_image) {
1604		printk(KERN_ALERT "Fail to allocate buffer for fw image "
1605			"size=%x!\n", (u32) fw->size);
1606		goto out;
1607	}
1608
1609	memcpy(*bfi_image, fw->data, fw->size);
1610	*bfi_image_size = fw->size/sizeof(u32);
1611out:
1612	release_firmware(fw);
1613}
1614
1615static u32 *
1616bfad_load_fwimg(struct pci_dev *pdev)
1617{
1618	if (pdev->device == BFA_PCI_DEVICE_ID_CT2) {
1619		if (bfi_image_ct2_size == 0)
1620			bfad_read_firmware(pdev, &bfi_image_ct2,
1621				&bfi_image_ct2_size, BFAD_FW_FILE_CT2);
1622		return bfi_image_ct2;
1623	} else if (bfa_asic_id_ct(pdev->device)) {
1624		if (bfi_image_ct_size == 0)
1625			bfad_read_firmware(pdev, &bfi_image_ct,
1626				&bfi_image_ct_size, BFAD_FW_FILE_CT);
1627		return bfi_image_ct;
1628	} else {
1629		if (bfi_image_cb_size == 0)
1630			bfad_read_firmware(pdev, &bfi_image_cb,
1631				&bfi_image_cb_size, BFAD_FW_FILE_CB);
1632		return bfi_image_cb;
1633	}
1634}
1635
1636static void
1637bfad_free_fwimg(void)
1638{
1639	if (bfi_image_ct2_size && bfi_image_ct2)
1640		vfree(bfi_image_ct2);
1641	if (bfi_image_ct_size && bfi_image_ct)
1642		vfree(bfi_image_ct);
1643	if (bfi_image_cb_size && bfi_image_cb)
1644		vfree(bfi_image_cb);
1645}
1646
1647module_init(bfad_init);
1648module_exit(bfad_exit);
1649MODULE_LICENSE("GPL");
1650MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME);
1651MODULE_AUTHOR("Brocade Communications Systems, Inc.");
1652MODULE_VERSION(BFAD_DRIVER_VERSION);
1653