bfad.c revision d7be54cc5c5f6f9cb9ac67462aadda57813698b8
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * General Public License for more details.
16 */
17
18/*
19 *  bfad.c Linux driver PCI interface module.
20 */
21#include <linux/module.h>
22#include <linux/kthread.h>
23#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/init.h>
26#include <linux/fs.h>
27#include <linux/pci.h>
28#include <linux/firmware.h>
29#include <asm/uaccess.h>
30#include <asm/fcntl.h>
31
32#include "bfad_drv.h"
33#include "bfad_im.h"
34#include "bfa_fcs.h"
35#include "bfa_defs.h"
36#include "bfa.h"
37
38BFA_TRC_FILE(LDRV, BFAD);
39DEFINE_MUTEX(bfad_mutex);
40LIST_HEAD(bfad_list);
41
42static int	bfad_inst;
43static int      num_sgpgs_parm;
44int		supported_fc4s;
45char		*host_name, *os_name, *os_patch;
46int		num_rports, num_ios, num_tms;
47int		num_fcxps, num_ufbufs;
48int		reqq_size, rspq_size, num_sgpgs;
49int		rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT;
50int		bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
51int		bfa_io_max_sge = BFAD_IO_MAX_SGE;
52int		bfa_log_level = 3; /* WARNING log level */
53int		ioc_auto_recover = BFA_TRUE;
54int		bfa_linkup_delay = -1;
55int		fdmi_enable = BFA_TRUE;
56int		pcie_max_read_reqsz;
57int		bfa_debugfs_enable = 1;
58int		msix_disable_cb = 0, msix_disable_ct = 0;
59
60/* Firmware releated */
61u32	bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
62u32	*bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
63
64#define BFAD_FW_FILE_CB		"cbfw.bin"
65#define BFAD_FW_FILE_CT		"ctfw.bin"
66#define BFAD_FW_FILE_CT2	"ct2fw.bin"
67
68static u32 *bfad_load_fwimg(struct pci_dev *pdev);
69static void bfad_free_fwimg(void);
70static void bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
71		u32 *bfi_image_size, char *fw_name);
72
73static const char *msix_name_ct[] = {
74	"ctrl",
75	"cpe0", "cpe1", "cpe2", "cpe3",
76	"rme0", "rme1", "rme2", "rme3" };
77
78static const char *msix_name_cb[] = {
79	"cpe0", "cpe1", "cpe2", "cpe3",
80	"rme0", "rme1", "rme2", "rme3",
81	"eemc", "elpu0", "elpu1", "epss", "mlpu" };
82
83MODULE_FIRMWARE(BFAD_FW_FILE_CB);
84MODULE_FIRMWARE(BFAD_FW_FILE_CT);
85MODULE_FIRMWARE(BFAD_FW_FILE_CT2);
86
87module_param(os_name, charp, S_IRUGO | S_IWUSR);
88MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
89module_param(os_patch, charp, S_IRUGO | S_IWUSR);
90MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine");
91module_param(host_name, charp, S_IRUGO | S_IWUSR);
92MODULE_PARM_DESC(host_name, "Hostname of the hba host machine");
93module_param(num_rports, int, S_IRUGO | S_IWUSR);
94MODULE_PARM_DESC(num_rports, "Max number of rports supported per port "
95				"(physical/logical), default=1024");
96module_param(num_ios, int, S_IRUGO | S_IWUSR);
97MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000");
98module_param(num_tms, int, S_IRUGO | S_IWUSR);
99MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128");
100module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
101MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64");
102module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
103MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame "
104				"buffers, default=64");
105module_param(reqq_size, int, S_IRUGO | S_IWUSR);
106MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, "
107				"default=256");
108module_param(rspq_size, int, S_IRUGO | S_IWUSR);
109MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, "
110				"default=64");
111module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048");
113module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
114MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, "
115					"Range[>0]");
116module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
117MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]");
118module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
119MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
120module_param(bfa_log_level, int, S_IRUGO | S_IWUSR);
121MODULE_PARM_DESC(bfa_log_level, "Driver log level, default=3, "
122				"Range[Critical:1|Error:2|Warning:3|Info:4]");
123module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
124MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, "
125				"Range[off:0|on:1]");
126module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
127MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for "
128			"boot port. Otherwise 10 secs in RHEL4 & 0 for "
129			"[RHEL5, SLES10, ESX40] Range[>0]");
130module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
131MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts "
132			"for Brocade-415/425/815/825 cards, default=0, "
133			" Range[false:0|true:1]");
134module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
135MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts "
136			"if possible for Brocade-1010/1020/804/1007/902/1741 "
137			"cards, default=0, Range[false:0|true:1]");
138module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
139MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, "
140				"Range[false:0|true:1]");
141module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR);
142MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
143		"(use system setting), Range[128|256|512|1024|2048|4096]");
144module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
145MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
146		" Range[false:0|true:1]");
147
148static void
149bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
150static void
151bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event);
152static void
153bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event);
154static void
155bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event);
156static void
157bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event);
158static void
159bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
160static void
161bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
162
163/*
164 * Beginning state for the driver instance, awaiting the pci_probe event
165 */
166static void
167bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
168{
169	bfa_trc(bfad, event);
170
171	switch (event) {
172	case BFAD_E_CREATE:
173		bfa_sm_set_state(bfad, bfad_sm_created);
174		bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad,
175						"%s", "bfad_worker");
176		if (IS_ERR(bfad->bfad_tsk)) {
177			printk(KERN_INFO "bfad[%d]: Kernel thread "
178				"creation failed!\n", bfad->inst_no);
179			bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED);
180		}
181		bfa_sm_send_event(bfad, BFAD_E_INIT);
182		break;
183
184	case BFAD_E_STOP:
185		/* Ignore stop; already in uninit */
186		break;
187
188	default:
189		bfa_sm_fault(bfad, event);
190	}
191}
192
193/*
194 * Driver Instance is created, awaiting event INIT to initialize the bfad
195 */
196static void
197bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
198{
199	unsigned long flags;
200
201	bfa_trc(bfad, event);
202
203	switch (event) {
204	case BFAD_E_INIT:
205		bfa_sm_set_state(bfad, bfad_sm_initializing);
206
207		init_completion(&bfad->comp);
208
209		/* Enable Interrupt and wait bfa_init completion */
210		if (bfad_setup_intr(bfad)) {
211			printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
212					bfad->inst_no);
213			bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED);
214			break;
215		}
216
217		spin_lock_irqsave(&bfad->bfad_lock, flags);
218		bfa_iocfc_init(&bfad->bfa);
219		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
220
221		/* Set up interrupt handler for each vectors */
222		if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
223			bfad_install_msix_handler(bfad)) {
224			printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
225				__func__, bfad->inst_no);
226		}
227
228		bfad_init_timer(bfad);
229
230		wait_for_completion(&bfad->comp);
231
232		if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
233			bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
234		} else {
235			printk(KERN_WARNING
236				"bfa %s: bfa init failed\n",
237				bfad->pci_name);
238			bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
239			bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
240		}
241
242		break;
243
244	case BFAD_E_KTHREAD_CREATE_FAILED:
245		bfa_sm_set_state(bfad, bfad_sm_uninit);
246		break;
247
248	default:
249		bfa_sm_fault(bfad, event);
250	}
251}
252
253static void
254bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)
255{
256	int	retval;
257	unsigned long	flags;
258
259	bfa_trc(bfad, event);
260
261	switch (event) {
262	case BFAD_E_INIT_SUCCESS:
263		kthread_stop(bfad->bfad_tsk);
264		spin_lock_irqsave(&bfad->bfad_lock, flags);
265		bfad->bfad_tsk = NULL;
266		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
267
268		retval = bfad_start_ops(bfad);
269		if (retval != BFA_STATUS_OK)
270			break;
271		bfa_sm_set_state(bfad, bfad_sm_operational);
272		break;
273
274	case BFAD_E_INTR_INIT_FAILED:
275		bfa_sm_set_state(bfad, bfad_sm_uninit);
276		kthread_stop(bfad->bfad_tsk);
277		spin_lock_irqsave(&bfad->bfad_lock, flags);
278		bfad->bfad_tsk = NULL;
279		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
280		break;
281
282	case BFAD_E_INIT_FAILED:
283		bfa_sm_set_state(bfad, bfad_sm_failed);
284		break;
285	default:
286		bfa_sm_fault(bfad, event);
287	}
288}
289
290static void
291bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)
292{
293	int	retval;
294
295	bfa_trc(bfad, event);
296
297	switch (event) {
298	case BFAD_E_INIT_SUCCESS:
299		retval = bfad_start_ops(bfad);
300		if (retval != BFA_STATUS_OK)
301			break;
302		bfa_sm_set_state(bfad, bfad_sm_operational);
303		break;
304
305	case BFAD_E_STOP:
306		if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
307			bfad_uncfg_pport(bfad);
308		if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) {
309			bfad_im_probe_undo(bfad);
310			bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
311		}
312		bfad_stop(bfad);
313		break;
314
315	case BFAD_E_EXIT_COMP:
316		bfa_sm_set_state(bfad, bfad_sm_uninit);
317		bfad_remove_intr(bfad);
318		del_timer_sync(&bfad->hal_tmo);
319		break;
320
321	default:
322		bfa_sm_fault(bfad, event);
323	}
324}
325
326static void
327bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event)
328{
329	bfa_trc(bfad, event);
330
331	switch (event) {
332	case BFAD_E_STOP:
333		bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
334		bfad_fcs_stop(bfad);
335		break;
336
337	default:
338		bfa_sm_fault(bfad, event);
339	}
340}
341
342static void
343bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event)
344{
345	bfa_trc(bfad, event);
346
347	switch (event) {
348	case BFAD_E_FCS_EXIT_COMP:
349		bfa_sm_set_state(bfad, bfad_sm_stopping);
350		bfad_stop(bfad);
351		break;
352
353	default:
354		bfa_sm_fault(bfad, event);
355	}
356}
357
358static void
359bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
360{
361	bfa_trc(bfad, event);
362
363	switch (event) {
364	case BFAD_E_EXIT_COMP:
365		bfa_sm_set_state(bfad, bfad_sm_uninit);
366		bfad_remove_intr(bfad);
367		del_timer_sync(&bfad->hal_tmo);
368		bfad_im_probe_undo(bfad);
369		bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
370		bfad_uncfg_pport(bfad);
371		break;
372
373	default:
374		bfa_sm_fault(bfad, event);
375		break;
376	}
377}
378
379/*
380 *  BFA callbacks
381 */
382void
383bfad_hcb_comp(void *arg, bfa_status_t status)
384{
385	struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg;
386
387	fcomp->status = status;
388	complete(&fcomp->comp);
389}
390
391/*
392 * bfa_init callback
393 */
394void
395bfa_cb_init(void *drv, bfa_status_t init_status)
396{
397	struct bfad_s	      *bfad = drv;
398
399	if (init_status == BFA_STATUS_OK) {
400		bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
401
402		/*
403		 * If BFAD_HAL_INIT_FAIL flag is set:
404		 * Wake up the kernel thread to start
405		 * the bfad operations after HAL init done
406		 */
407		if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) {
408			bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL;
409			wake_up_process(bfad->bfad_tsk);
410		}
411	}
412
413	complete(&bfad->comp);
414}
415
416/*
417 *  BFA_FCS callbacks
418 */
419struct bfad_port_s *
420bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port,
421		 enum bfa_lport_role roles, struct bfad_vf_s *vf_drv,
422		 struct bfad_vport_s *vp_drv)
423{
424	bfa_status_t	rc;
425	struct bfad_port_s    *port_drv;
426
427	if (!vp_drv && !vf_drv) {
428		port_drv = &bfad->pport;
429		port_drv->pvb_type = BFAD_PORT_PHYS_BASE;
430	} else if (!vp_drv && vf_drv) {
431		port_drv = &vf_drv->base_port;
432		port_drv->pvb_type = BFAD_PORT_VF_BASE;
433	} else if (vp_drv && !vf_drv) {
434		port_drv = &vp_drv->drv_port;
435		port_drv->pvb_type = BFAD_PORT_PHYS_VPORT;
436	} else {
437		port_drv = &vp_drv->drv_port;
438		port_drv->pvb_type = BFAD_PORT_VF_VPORT;
439	}
440
441	port_drv->fcs_port = port;
442	port_drv->roles = roles;
443
444	if (roles & BFA_LPORT_ROLE_FCP_IM) {
445		rc = bfad_im_port_new(bfad, port_drv);
446		if (rc != BFA_STATUS_OK) {
447			bfad_im_port_delete(bfad, port_drv);
448			port_drv = NULL;
449		}
450	}
451
452	return port_drv;
453}
454
455void
456bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
457		    struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
458{
459	struct bfad_port_s    *port_drv;
460
461	/* this will be only called from rmmod context */
462	if (vp_drv && !vp_drv->comp_del) {
463		port_drv = (vp_drv) ? (&(vp_drv)->drv_port) :
464				((vf_drv) ? (&(vf_drv)->base_port) :
465				(&(bfad)->pport));
466		bfa_trc(bfad, roles);
467		if (roles & BFA_LPORT_ROLE_FCP_IM)
468			bfad_im_port_delete(bfad, port_drv);
469	}
470}
471
472/*
473 * FCS RPORT alloc callback, after successful PLOGI by FCS
474 */
475bfa_status_t
476bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
477		    struct bfad_rport_s **rport_drv)
478{
479	bfa_status_t	rc = BFA_STATUS_OK;
480
481	*rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC);
482	if (*rport_drv == NULL) {
483		rc = BFA_STATUS_ENOMEM;
484		goto ext;
485	}
486
487	*rport = &(*rport_drv)->fcs_rport;
488
489ext:
490	return rc;
491}
492
493/*
494 * FCS PBC VPORT Create
495 */
496void
497bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
498{
499
500	struct bfa_lport_cfg_s port_cfg = {0};
501	struct bfad_vport_s   *vport;
502	int rc;
503
504	vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
505	if (!vport) {
506		bfa_trc(bfad, 0);
507		return;
508	}
509
510	vport->drv_port.bfad = bfad;
511	port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
512	port_cfg.pwwn = pbc_vport.vp_pwwn;
513	port_cfg.nwwn = pbc_vport.vp_nwwn;
514	port_cfg.preboot_vp  = BFA_TRUE;
515
516	rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0,
517				  &port_cfg, vport);
518
519	if (rc != BFA_STATUS_OK) {
520		bfa_trc(bfad, 0);
521		return;
522	}
523
524	list_add_tail(&vport->list_entry, &bfad->pbc_vport_list);
525}
526
527void
528bfad_hal_mem_release(struct bfad_s *bfad)
529{
530	struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
531	struct bfa_mem_dma_s *dma_info, *dma_elem;
532	struct bfa_mem_kva_s *kva_info, *kva_elem;
533	struct list_head *dm_qe, *km_qe;
534
535	dma_info = &hal_meminfo->dma_info;
536	kva_info = &hal_meminfo->kva_info;
537
538	/* Iterate through the KVA meminfo queue */
539	list_for_each(km_qe, &kva_info->qe) {
540		kva_elem = (struct bfa_mem_kva_s *) km_qe;
541		vfree(kva_elem->kva);
542	}
543
544	/* Iterate through the DMA meminfo queue */
545	list_for_each(dm_qe, &dma_info->qe) {
546		dma_elem = (struct bfa_mem_dma_s *) dm_qe;
547		dma_free_coherent(&bfad->pcidev->dev,
548				dma_elem->mem_len, dma_elem->kva,
549				(dma_addr_t) dma_elem->dma);
550	}
551
552	memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
553}
554
555void
556bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
557{
558	if (num_rports > 0)
559		bfa_cfg->fwcfg.num_rports = num_rports;
560	if (num_ios > 0)
561		bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
562	if (num_tms > 0)
563		bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
564	if (num_fcxps > 0 && num_fcxps <= BFA_FCXP_MAX)
565		bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
566	if (num_ufbufs > 0 && num_ufbufs <= BFA_UF_MAX)
567		bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
568	if (reqq_size > 0)
569		bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
570	if (rspq_size > 0)
571		bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
572	if (num_sgpgs > 0 && num_sgpgs <= BFA_SGPG_MAX)
573		bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
574
575	/*
576	 * populate the hal values back to the driver for sysfs use.
577	 * otherwise, the default values will be shown as 0 in sysfs
578	 */
579	num_rports = bfa_cfg->fwcfg.num_rports;
580	num_ios = bfa_cfg->fwcfg.num_ioim_reqs;
581	num_tms = bfa_cfg->fwcfg.num_tskim_reqs;
582	num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs;
583	num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs;
584	reqq_size = bfa_cfg->drvcfg.num_reqq_elems;
585	rspq_size = bfa_cfg->drvcfg.num_rspq_elems;
586	num_sgpgs = bfa_cfg->drvcfg.num_sgpgs;
587}
588
589bfa_status_t
590bfad_hal_mem_alloc(struct bfad_s *bfad)
591{
592	struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
593	struct bfa_mem_dma_s *dma_info, *dma_elem;
594	struct bfa_mem_kva_s *kva_info, *kva_elem;
595	struct list_head *dm_qe, *km_qe;
596	bfa_status_t	rc = BFA_STATUS_OK;
597	dma_addr_t	phys_addr;
598
599	bfa_cfg_get_default(&bfad->ioc_cfg);
600	bfad_update_hal_cfg(&bfad->ioc_cfg);
601	bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
602	bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo, &bfad->bfa);
603
604	dma_info = &hal_meminfo->dma_info;
605	kva_info = &hal_meminfo->kva_info;
606
607	/* Iterate through the KVA meminfo queue */
608	list_for_each(km_qe, &kva_info->qe) {
609		kva_elem = (struct bfa_mem_kva_s *) km_qe;
610		kva_elem->kva = vmalloc(kva_elem->mem_len);
611		if (kva_elem->kva == NULL) {
612			bfad_hal_mem_release(bfad);
613			rc = BFA_STATUS_ENOMEM;
614			goto ext;
615		}
616		memset(kva_elem->kva, 0, kva_elem->mem_len);
617	}
618
619	/* Iterate through the DMA meminfo queue */
620	list_for_each(dm_qe, &dma_info->qe) {
621		dma_elem = (struct bfa_mem_dma_s *) dm_qe;
622		dma_elem->kva = dma_alloc_coherent(&bfad->pcidev->dev,
623						dma_elem->mem_len,
624						&phys_addr, GFP_KERNEL);
625		if (dma_elem->kva == NULL) {
626			bfad_hal_mem_release(bfad);
627			rc = BFA_STATUS_ENOMEM;
628			goto ext;
629		}
630		dma_elem->dma = phys_addr;
631		memset(dma_elem->kva, 0, dma_elem->mem_len);
632	}
633ext:
634	return rc;
635}
636
637/*
638 * Create a vport under a vf.
639 */
640bfa_status_t
641bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
642		  struct bfa_lport_cfg_s *port_cfg, struct device *dev)
643{
644	struct bfad_vport_s   *vport;
645	int		rc = BFA_STATUS_OK;
646	unsigned long	flags;
647	struct completion fcomp;
648
649	vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
650	if (!vport) {
651		rc = BFA_STATUS_ENOMEM;
652		goto ext;
653	}
654
655	vport->drv_port.bfad = bfad;
656	spin_lock_irqsave(&bfad->bfad_lock, flags);
657	rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id,
658				  port_cfg, vport);
659	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
660
661	if (rc != BFA_STATUS_OK)
662		goto ext_free_vport;
663
664	if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) {
665		rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
666							dev);
667		if (rc != BFA_STATUS_OK)
668			goto ext_free_fcs_vport;
669	}
670
671	spin_lock_irqsave(&bfad->bfad_lock, flags);
672	bfa_fcs_vport_start(&vport->fcs_vport);
673	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
674
675	return BFA_STATUS_OK;
676
677ext_free_fcs_vport:
678	spin_lock_irqsave(&bfad->bfad_lock, flags);
679	vport->comp_del = &fcomp;
680	init_completion(vport->comp_del);
681	bfa_fcs_vport_delete(&vport->fcs_vport);
682	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
683	wait_for_completion(vport->comp_del);
684ext_free_vport:
685	kfree(vport);
686ext:
687	return rc;
688}
689
690void
691bfad_bfa_tmo(unsigned long data)
692{
693	struct bfad_s	      *bfad = (struct bfad_s *) data;
694	unsigned long	flags;
695	struct list_head	       doneq;
696
697	spin_lock_irqsave(&bfad->bfad_lock, flags);
698
699	bfa_timer_beat(&bfad->bfa.timer_mod);
700
701	bfa_comp_deq(&bfad->bfa, &doneq);
702	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
703
704	if (!list_empty(&doneq)) {
705		bfa_comp_process(&bfad->bfa, &doneq);
706		spin_lock_irqsave(&bfad->bfad_lock, flags);
707		bfa_comp_free(&bfad->bfa, &doneq);
708		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
709	}
710
711	mod_timer(&bfad->hal_tmo,
712		  jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
713}
714
715void
716bfad_init_timer(struct bfad_s *bfad)
717{
718	init_timer(&bfad->hal_tmo);
719	bfad->hal_tmo.function = bfad_bfa_tmo;
720	bfad->hal_tmo.data = (unsigned long)bfad;
721
722	mod_timer(&bfad->hal_tmo,
723		  jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
724}
725
726int
727bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
728{
729	int		rc = -ENODEV;
730
731	if (pci_enable_device(pdev)) {
732		printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
733		goto out;
734	}
735
736	if (pci_request_regions(pdev, BFAD_DRIVER_NAME))
737		goto out_disable_device;
738
739	pci_set_master(pdev);
740
741
742	if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) ||
743	    (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)) {
744		if ((pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) ||
745		   (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)) {
746			printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
747			goto out_release_region;
748		}
749	}
750
751	bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
752	bfad->pci_bar2_kva = pci_iomap(pdev, 2, pci_resource_len(pdev, 2));
753
754	if (bfad->pci_bar0_kva == NULL) {
755		printk(KERN_ERR "Fail to map bar0\n");
756		goto out_release_region;
757	}
758
759	bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn);
760	bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
761	bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
762	bfad->hal_pcidev.device_id = pdev->device;
763	bfad->hal_pcidev.ssid = pdev->subsystem_device;
764	bfad->pci_name = pci_name(pdev);
765
766	bfad->pci_attr.vendor_id = pdev->vendor;
767	bfad->pci_attr.device_id = pdev->device;
768	bfad->pci_attr.ssid = pdev->subsystem_device;
769	bfad->pci_attr.ssvid = pdev->subsystem_vendor;
770	bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn);
771
772	bfad->pcidev = pdev;
773
774	/* Adjust PCIe Maximum Read Request Size */
775	if (pcie_max_read_reqsz > 0) {
776		int pcie_cap_reg;
777		u16 pcie_dev_ctl;
778		u16 mask = 0xffff;
779
780		switch (pcie_max_read_reqsz) {
781		case 128:
782			mask = 0x0;
783			break;
784		case 256:
785			mask = 0x1000;
786			break;
787		case 512:
788			mask = 0x2000;
789			break;
790		case 1024:
791			mask = 0x3000;
792			break;
793		case 2048:
794			mask = 0x4000;
795			break;
796		case 4096:
797			mask = 0x5000;
798			break;
799		default:
800			break;
801		}
802
803		pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
804		if (mask != 0xffff && pcie_cap_reg) {
805			pcie_cap_reg += 0x08;
806			pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl);
807			if ((pcie_dev_ctl & 0x7000) != mask) {
808				printk(KERN_WARNING "BFA[%s]: "
809				"pcie_max_read_request_size is %d, "
810				"reset to %d\n", bfad->pci_name,
811				(1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7,
812				pcie_max_read_reqsz);
813
814				pcie_dev_ctl &= ~0x7000;
815				pci_write_config_word(pdev, pcie_cap_reg,
816						pcie_dev_ctl | mask);
817			}
818		}
819	}
820
821	return 0;
822
823out_release_region:
824	pci_release_regions(pdev);
825out_disable_device:
826	pci_disable_device(pdev);
827out:
828	return rc;
829}
830
831void
832bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
833{
834	pci_iounmap(pdev, bfad->pci_bar0_kva);
835	pci_iounmap(pdev, bfad->pci_bar2_kva);
836	pci_release_regions(pdev);
837	pci_disable_device(pdev);
838	pci_set_drvdata(pdev, NULL);
839}
840
841bfa_status_t
842bfad_drv_init(struct bfad_s *bfad)
843{
844	bfa_status_t	rc;
845	unsigned long	flags;
846
847	bfad->cfg_data.rport_del_timeout = rport_del_timeout;
848	bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
849	bfad->cfg_data.io_max_sge = bfa_io_max_sge;
850	bfad->cfg_data.binding_method = FCP_PWWN_BINDING;
851
852	rc = bfad_hal_mem_alloc(bfad);
853	if (rc != BFA_STATUS_OK) {
854		printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n",
855		       bfad->inst_no);
856		printk(KERN_WARNING
857			"Not enough memory to attach all Brocade HBA ports, %s",
858			"System may need more memory.\n");
859		goto out_hal_mem_alloc_failure;
860	}
861
862	bfad->bfa.trcmod = bfad->trcmod;
863	bfad->bfa.plog = &bfad->plog_buf;
864	bfa_plog_init(&bfad->plog_buf);
865	bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
866		     0, "Driver Attach");
867
868	bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo,
869		   &bfad->hal_pcidev);
870
871	/* FCS INIT */
872	spin_lock_irqsave(&bfad->bfad_lock, flags);
873	bfad->bfa_fcs.trcmod = bfad->trcmod;
874	bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
875	bfad->bfa_fcs.fdmi_enabled = fdmi_enable;
876	bfa_fcs_init(&bfad->bfa_fcs);
877	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
878
879	bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
880
881	/* configure base port */
882	rc = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
883	if (rc != BFA_STATUS_OK)
884		goto out_cfg_pport_fail;
885
886	return BFA_STATUS_OK;
887
888out_cfg_pport_fail:
889	/* fcs exit - on cfg pport failure */
890	spin_lock_irqsave(&bfad->bfad_lock, flags);
891	init_completion(&bfad->comp);
892	bfad->pport.flags |= BFAD_PORT_DELETE;
893	bfa_fcs_exit(&bfad->bfa_fcs);
894	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
895	wait_for_completion(&bfad->comp);
896	/* bfa detach - free hal memory */
897	bfa_detach(&bfad->bfa);
898	bfad_hal_mem_release(bfad);
899out_hal_mem_alloc_failure:
900	return BFA_STATUS_FAILED;
901}
902
903void
904bfad_drv_uninit(struct bfad_s *bfad)
905{
906	unsigned long   flags;
907
908	spin_lock_irqsave(&bfad->bfad_lock, flags);
909	init_completion(&bfad->comp);
910	bfa_iocfc_stop(&bfad->bfa);
911	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
912	wait_for_completion(&bfad->comp);
913
914	del_timer_sync(&bfad->hal_tmo);
915	bfa_isr_disable(&bfad->bfa);
916	bfa_detach(&bfad->bfa);
917	bfad_remove_intr(bfad);
918	bfad_hal_mem_release(bfad);
919
920	bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE;
921}
922
923void
924bfad_drv_start(struct bfad_s *bfad)
925{
926	unsigned long	flags;
927
928	spin_lock_irqsave(&bfad->bfad_lock, flags);
929	bfa_iocfc_start(&bfad->bfa);
930	bfa_fcs_pbc_vport_init(&bfad->bfa_fcs);
931	bfa_fcs_fabric_modstart(&bfad->bfa_fcs);
932	bfad->bfad_flags |= BFAD_HAL_START_DONE;
933	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
934
935	if (bfad->im)
936		flush_workqueue(bfad->im->drv_workq);
937}
938
939void
940bfad_fcs_stop(struct bfad_s *bfad)
941{
942	unsigned long	flags;
943
944	spin_lock_irqsave(&bfad->bfad_lock, flags);
945	init_completion(&bfad->comp);
946	bfad->pport.flags |= BFAD_PORT_DELETE;
947	bfa_fcs_exit(&bfad->bfa_fcs);
948	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
949	wait_for_completion(&bfad->comp);
950
951	bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
952}
953
954void
955bfad_stop(struct bfad_s *bfad)
956{
957	unsigned long	flags;
958
959	spin_lock_irqsave(&bfad->bfad_lock, flags);
960	init_completion(&bfad->comp);
961	bfa_iocfc_stop(&bfad->bfa);
962	bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
963	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
964	wait_for_completion(&bfad->comp);
965
966	bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP);
967}
968
969bfa_status_t
970bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role)
971{
972	int		rc = BFA_STATUS_OK;
973
974	/* Allocate scsi_host for the physical port */
975	if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
976	    (role & BFA_LPORT_ROLE_FCP_IM)) {
977		if (bfad->pport.im_port == NULL) {
978			rc = BFA_STATUS_FAILED;
979			goto out;
980		}
981
982		rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port,
983						&bfad->pcidev->dev);
984		if (rc != BFA_STATUS_OK)
985			goto out;
986
987		bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM;
988	}
989
990	bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
991
992out:
993	return rc;
994}
995
996void
997bfad_uncfg_pport(struct bfad_s *bfad)
998{
999	if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
1000	    (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) {
1001		bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
1002		bfad_im_port_clean(bfad->pport.im_port);
1003		kfree(bfad->pport.im_port);
1004		bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM;
1005	}
1006
1007	bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE;
1008}
1009
1010bfa_status_t
1011bfad_start_ops(struct bfad_s *bfad) {
1012
1013	int	retval;
1014	unsigned long	flags;
1015	struct bfad_vport_s *vport, *vport_new;
1016	struct bfa_fcs_driver_info_s driver_info;
1017
1018	/* Fill the driver_info info to fcs*/
1019	memset(&driver_info, 0, sizeof(driver_info));
1020	strncpy(driver_info.version, BFAD_DRIVER_VERSION,
1021		sizeof(driver_info.version) - 1);
1022	if (host_name)
1023		strncpy(driver_info.host_machine_name, host_name,
1024			sizeof(driver_info.host_machine_name) - 1);
1025	if (os_name)
1026		strncpy(driver_info.host_os_name, os_name,
1027			sizeof(driver_info.host_os_name) - 1);
1028	if (os_patch)
1029		strncpy(driver_info.host_os_patch, os_patch,
1030			sizeof(driver_info.host_os_patch) - 1);
1031
1032	strncpy(driver_info.os_device_name, bfad->pci_name,
1033		sizeof(driver_info.os_device_name - 1));
1034
1035	/* FCS driver info init */
1036	spin_lock_irqsave(&bfad->bfad_lock, flags);
1037	bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
1038	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1039
1040	/*
1041	 * FCS update cfg - reset the pwwn/nwwn of fabric base logical port
1042	 * with values learned during bfa_init firmware GETATTR REQ.
1043	 */
1044	bfa_fcs_update_cfg(&bfad->bfa_fcs);
1045
1046	/* Setup fc host fixed attribute if the lk supports */
1047	bfad_fc_host_init(bfad->pport.im_port);
1048
1049	/* BFAD level FC4 IM specific resource allocation */
1050	retval = bfad_im_probe(bfad);
1051	if (retval != BFA_STATUS_OK) {
1052		printk(KERN_WARNING "bfad_im_probe failed\n");
1053		if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
1054			bfa_sm_set_state(bfad, bfad_sm_failed);
1055		bfad_im_probe_undo(bfad);
1056		bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
1057		bfad_uncfg_pport(bfad);
1058		bfad_stop(bfad);
1059		return BFA_STATUS_FAILED;
1060	} else
1061		bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
1062
1063	bfad_drv_start(bfad);
1064
1065	/* Complete pbc vport create */
1066	list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list,
1067				list_entry) {
1068		struct fc_vport_identifiers vid;
1069		struct fc_vport *fc_vport;
1070		char pwwn_buf[BFA_STRING_32];
1071
1072		memset(&vid, 0, sizeof(vid));
1073		vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
1074		vid.vport_type = FC_PORTTYPE_NPIV;
1075		vid.disable = false;
1076		vid.node_name = wwn_to_u64((u8 *)
1077				(&((vport->fcs_vport).lport.port_cfg.nwwn)));
1078		vid.port_name = wwn_to_u64((u8 *)
1079				(&((vport->fcs_vport).lport.port_cfg.pwwn)));
1080		fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid);
1081		if (!fc_vport) {
1082			wwn2str(pwwn_buf, vid.port_name);
1083			printk(KERN_WARNING "bfad%d: failed to create pbc vport"
1084				" %s\n", bfad->inst_no, pwwn_buf);
1085		}
1086		list_del(&vport->list_entry);
1087		kfree(vport);
1088	}
1089
1090	/*
1091	 * If bfa_linkup_delay is set to -1 default; try to retrive the
1092	 * value using the bfad_get_linkup_delay(); else use the
1093	 * passed in module param value as the bfa_linkup_delay.
1094	 */
1095	if (bfa_linkup_delay < 0) {
1096		bfa_linkup_delay = bfad_get_linkup_delay(bfad);
1097		bfad_rport_online_wait(bfad);
1098		bfa_linkup_delay = -1;
1099	} else
1100		bfad_rport_online_wait(bfad);
1101
1102	BFA_LOG(KERN_INFO, bfad, bfa_log_level, "bfa device claimed\n");
1103
1104	return BFA_STATUS_OK;
1105}
1106
1107int
1108bfad_worker(void *ptr)
1109{
1110	struct bfad_s *bfad;
1111	unsigned long   flags;
1112
1113	bfad = (struct bfad_s *)ptr;
1114
1115	while (!kthread_should_stop()) {
1116
1117		/* Send event BFAD_E_INIT_SUCCESS */
1118		bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
1119
1120		spin_lock_irqsave(&bfad->bfad_lock, flags);
1121		bfad->bfad_tsk = NULL;
1122		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1123
1124		break;
1125	}
1126
1127	return 0;
1128}
1129
1130/*
1131 *  BFA driver interrupt functions
1132 */
1133irqreturn_t
1134bfad_intx(int irq, void *dev_id)
1135{
1136	struct bfad_s	*bfad = dev_id;
1137	struct list_head	doneq;
1138	unsigned long	flags;
1139	bfa_boolean_t rc;
1140
1141	spin_lock_irqsave(&bfad->bfad_lock, flags);
1142	rc = bfa_intx(&bfad->bfa);
1143	if (!rc) {
1144		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1145		return IRQ_NONE;
1146	}
1147
1148	bfa_comp_deq(&bfad->bfa, &doneq);
1149	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1150
1151	if (!list_empty(&doneq)) {
1152		bfa_comp_process(&bfad->bfa, &doneq);
1153
1154		spin_lock_irqsave(&bfad->bfad_lock, flags);
1155		bfa_comp_free(&bfad->bfa, &doneq);
1156		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1157	}
1158
1159	return IRQ_HANDLED;
1160
1161}
1162
1163static irqreturn_t
1164bfad_msix(int irq, void *dev_id)
1165{
1166	struct bfad_msix_s *vec = dev_id;
1167	struct bfad_s *bfad = vec->bfad;
1168	struct list_head doneq;
1169	unsigned long   flags;
1170
1171	spin_lock_irqsave(&bfad->bfad_lock, flags);
1172
1173	bfa_msix(&bfad->bfa, vec->msix.entry);
1174	bfa_comp_deq(&bfad->bfa, &doneq);
1175	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1176
1177	if (!list_empty(&doneq)) {
1178		bfa_comp_process(&bfad->bfa, &doneq);
1179
1180		spin_lock_irqsave(&bfad->bfad_lock, flags);
1181		bfa_comp_free(&bfad->bfa, &doneq);
1182		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1183	}
1184
1185	return IRQ_HANDLED;
1186}
1187
1188/*
1189 * Initialize the MSIX entry table.
1190 */
1191static void
1192bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
1193			 int mask, int max_bit)
1194{
1195	int	i;
1196	int	match = 0x00000001;
1197
1198	for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
1199		if (mask & match) {
1200			bfad->msix_tab[bfad->nvec].msix.entry = i;
1201			bfad->msix_tab[bfad->nvec].bfad = bfad;
1202			msix_entries[bfad->nvec].entry = i;
1203			bfad->nvec++;
1204		}
1205
1206		match <<= 1;
1207	}
1208
1209}
1210
1211int
1212bfad_install_msix_handler(struct bfad_s *bfad)
1213{
1214	int i, error = 0;
1215
1216	for (i = 0; i < bfad->nvec; i++) {
1217		sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
1218				bfad->pci_name,
1219				((bfa_asic_id_cb(bfad->hal_pcidev.device_id)) ?
1220				msix_name_cb[i] : msix_name_ct[i]));
1221
1222		error = request_irq(bfad->msix_tab[i].msix.vector,
1223				    (irq_handler_t) bfad_msix, 0,
1224				    bfad->msix_tab[i].name, &bfad->msix_tab[i]);
1225		bfa_trc(bfad, i);
1226		bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
1227		if (error) {
1228			int	j;
1229
1230			for (j = 0; j < i; j++)
1231				free_irq(bfad->msix_tab[j].msix.vector,
1232						&bfad->msix_tab[j]);
1233
1234			return 1;
1235		}
1236	}
1237
1238	return 0;
1239}
1240
1241/*
1242 * Setup MSIX based interrupt.
1243 */
1244int
1245bfad_setup_intr(struct bfad_s *bfad)
1246{
1247	int error = 0;
1248	u32 mask = 0, i, num_bit = 0, max_bit = 0;
1249	struct msix_entry msix_entries[MAX_MSIX_ENTRY];
1250	struct pci_dev *pdev = bfad->pcidev;
1251	u16	reg;
1252
1253	/* Call BFA to get the msix map for this PCI function.  */
1254	bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
1255
1256	/* Set up the msix entry table */
1257	bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
1258
1259	if ((bfa_asic_id_ctc(pdev->device) && !msix_disable_ct) ||
1260	   (bfa_asic_id_cb(pdev->device) && !msix_disable_cb)) {
1261
1262		error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
1263		if (error) {
1264			/*
1265			 * Only error number of vector is available.
1266			 * We don't have a mechanism to map multiple
1267			 * interrupts into one vector, so even if we
1268			 * can try to request less vectors, we don't
1269			 * know how to associate interrupt events to
1270			 *  vectors. Linux doesn't duplicate vectors
1271			 * in the MSIX table for this case.
1272			 */
1273
1274			printk(KERN_WARNING "bfad%d: "
1275				"pci_enable_msix failed (%d),"
1276				" use line based.\n", bfad->inst_no, error);
1277
1278			goto line_based;
1279		}
1280
1281		/* Disable INTX in MSI-X mode */
1282		pci_read_config_word(pdev, PCI_COMMAND, &reg);
1283
1284		if (!(reg & PCI_COMMAND_INTX_DISABLE))
1285			pci_write_config_word(pdev, PCI_COMMAND,
1286				reg | PCI_COMMAND_INTX_DISABLE);
1287
1288		/* Save the vectors */
1289		for (i = 0; i < bfad->nvec; i++) {
1290			bfa_trc(bfad, msix_entries[i].vector);
1291			bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
1292		}
1293
1294		bfa_msix_init(&bfad->bfa, bfad->nvec);
1295
1296		bfad->bfad_flags |= BFAD_MSIX_ON;
1297
1298		return error;
1299	}
1300
1301line_based:
1302	error = 0;
1303	if (request_irq
1304	    (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS,
1305	     BFAD_DRIVER_NAME, bfad) != 0) {
1306		/* Enable interrupt handler failed */
1307		return 1;
1308	}
1309
1310	return error;
1311}
1312
1313void
1314bfad_remove_intr(struct bfad_s *bfad)
1315{
1316	int	i;
1317
1318	if (bfad->bfad_flags & BFAD_MSIX_ON) {
1319		for (i = 0; i < bfad->nvec; i++)
1320			free_irq(bfad->msix_tab[i].msix.vector,
1321					&bfad->msix_tab[i]);
1322
1323		pci_disable_msix(bfad->pcidev);
1324		bfad->bfad_flags &= ~BFAD_MSIX_ON;
1325	} else {
1326		free_irq(bfad->pcidev->irq, bfad);
1327	}
1328}
1329
1330/*
1331 * PCI probe entry.
1332 */
1333int
1334bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1335{
1336	struct bfad_s	*bfad;
1337	int		error = -ENODEV, retval;
1338
1339	/* For single port cards - only claim function 0 */
1340	if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
1341		(PCI_FUNC(pdev->devfn) != 0))
1342		return -ENODEV;
1343
1344	bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL);
1345	if (!bfad) {
1346		error = -ENOMEM;
1347		goto out;
1348	}
1349
1350	bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL);
1351	if (!bfad->trcmod) {
1352		printk(KERN_WARNING "Error alloc trace buffer!\n");
1353		error = -ENOMEM;
1354		goto out_alloc_trace_failure;
1355	}
1356
1357	/* TRACE INIT */
1358	bfa_trc_init(bfad->trcmod);
1359	bfa_trc(bfad, bfad_inst);
1360
1361	if (!(bfad_load_fwimg(pdev))) {
1362		kfree(bfad->trcmod);
1363		goto out_alloc_trace_failure;
1364	}
1365
1366	retval = bfad_pci_init(pdev, bfad);
1367	if (retval) {
1368		printk(KERN_WARNING "bfad_pci_init failure!\n");
1369		error = retval;
1370		goto out_pci_init_failure;
1371	}
1372
1373	mutex_lock(&bfad_mutex);
1374	bfad->inst_no = bfad_inst++;
1375	list_add_tail(&bfad->list_entry, &bfad_list);
1376	mutex_unlock(&bfad_mutex);
1377
1378	/* Initializing the state machine: State set to uninit */
1379	bfa_sm_set_state(bfad, bfad_sm_uninit);
1380
1381	spin_lock_init(&bfad->bfad_lock);
1382	pci_set_drvdata(pdev, bfad);
1383
1384	bfad->ref_count = 0;
1385	bfad->pport.bfad = bfad;
1386	INIT_LIST_HEAD(&bfad->pbc_vport_list);
1387
1388	/* Setup the debugfs node for this bfad */
1389	if (bfa_debugfs_enable)
1390		bfad_debugfs_init(&bfad->pport);
1391
1392	retval = bfad_drv_init(bfad);
1393	if (retval != BFA_STATUS_OK)
1394		goto out_drv_init_failure;
1395
1396	bfa_sm_send_event(bfad, BFAD_E_CREATE);
1397
1398	if (bfa_sm_cmp_state(bfad, bfad_sm_uninit))
1399		goto out_bfad_sm_failure;
1400
1401	return 0;
1402
1403out_bfad_sm_failure:
1404	bfa_detach(&bfad->bfa);
1405	bfad_hal_mem_release(bfad);
1406out_drv_init_failure:
1407	/* Remove the debugfs node for this bfad */
1408	kfree(bfad->regdata);
1409	bfad_debugfs_exit(&bfad->pport);
1410	mutex_lock(&bfad_mutex);
1411	bfad_inst--;
1412	list_del(&bfad->list_entry);
1413	mutex_unlock(&bfad_mutex);
1414	bfad_pci_uninit(pdev, bfad);
1415out_pci_init_failure:
1416	kfree(bfad->trcmod);
1417out_alloc_trace_failure:
1418	kfree(bfad);
1419out:
1420	return error;
1421}
1422
1423/*
1424 * PCI remove entry.
1425 */
1426void
1427bfad_pci_remove(struct pci_dev *pdev)
1428{
1429	struct bfad_s	      *bfad = pci_get_drvdata(pdev);
1430	unsigned long	flags;
1431
1432	bfa_trc(bfad, bfad->inst_no);
1433
1434	spin_lock_irqsave(&bfad->bfad_lock, flags);
1435	if (bfad->bfad_tsk != NULL) {
1436		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1437		kthread_stop(bfad->bfad_tsk);
1438	} else {
1439		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1440	}
1441
1442	/* Send Event BFAD_E_STOP */
1443	bfa_sm_send_event(bfad, BFAD_E_STOP);
1444
1445	/* Driver detach and dealloc mem */
1446	spin_lock_irqsave(&bfad->bfad_lock, flags);
1447	bfa_detach(&bfad->bfa);
1448	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1449	bfad_hal_mem_release(bfad);
1450
1451	/* Remove the debugfs node for this bfad */
1452	kfree(bfad->regdata);
1453	bfad_debugfs_exit(&bfad->pport);
1454
1455	/* Cleaning the BFAD instance */
1456	mutex_lock(&bfad_mutex);
1457	bfad_inst--;
1458	list_del(&bfad->list_entry);
1459	mutex_unlock(&bfad_mutex);
1460	bfad_pci_uninit(pdev, bfad);
1461
1462	kfree(bfad->trcmod);
1463	kfree(bfad);
1464}
1465
1466struct pci_device_id bfad_id_table[] = {
1467	{
1468		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1469		.device = BFA_PCI_DEVICE_ID_FC_8G2P,
1470		.subvendor = PCI_ANY_ID,
1471		.subdevice = PCI_ANY_ID,
1472	},
1473	{
1474		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1475		.device = BFA_PCI_DEVICE_ID_FC_8G1P,
1476		.subvendor = PCI_ANY_ID,
1477		.subdevice = PCI_ANY_ID,
1478	},
1479	{
1480		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1481		.device = BFA_PCI_DEVICE_ID_CT,
1482		.subvendor = PCI_ANY_ID,
1483		.subdevice = PCI_ANY_ID,
1484		.class = (PCI_CLASS_SERIAL_FIBER << 8),
1485		.class_mask = ~0,
1486	},
1487	{
1488		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1489		.device = BFA_PCI_DEVICE_ID_CT_FC,
1490		.subvendor = PCI_ANY_ID,
1491		.subdevice = PCI_ANY_ID,
1492		.class = (PCI_CLASS_SERIAL_FIBER << 8),
1493		.class_mask = ~0,
1494	},
1495	{
1496		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1497		.device = BFA_PCI_DEVICE_ID_CT2,
1498		.subvendor = PCI_ANY_ID,
1499		.subdevice = PCI_ANY_ID,
1500		.class = (PCI_CLASS_SERIAL_FIBER << 8),
1501		.class_mask = ~0,
1502	},
1503
1504	{0, 0},
1505};
1506
1507MODULE_DEVICE_TABLE(pci, bfad_id_table);
1508
1509static struct pci_driver bfad_pci_driver = {
1510	.name = BFAD_DRIVER_NAME,
1511	.id_table = bfad_id_table,
1512	.probe = bfad_pci_probe,
1513	.remove = __devexit_p(bfad_pci_remove),
1514};
1515
1516/*
1517 * Driver module init.
1518 */
1519static int __init
1520bfad_init(void)
1521{
1522	int		error = 0;
1523
1524	printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n",
1525			BFAD_DRIVER_VERSION);
1526
1527	if (num_sgpgs > 0)
1528		num_sgpgs_parm = num_sgpgs;
1529
1530	error = bfad_im_module_init();
1531	if (error) {
1532		error = -ENOMEM;
1533		printk(KERN_WARNING "bfad_im_module_init failure\n");
1534		goto ext;
1535	}
1536
1537	if (strcmp(FCPI_NAME, " fcpim") == 0)
1538		supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
1539
1540	bfa_auto_recover = ioc_auto_recover;
1541	bfa_fcs_rport_set_del_timeout(rport_del_timeout);
1542
1543	error = pci_register_driver(&bfad_pci_driver);
1544	if (error) {
1545		printk(KERN_WARNING "pci_register_driver failure\n");
1546		goto ext;
1547	}
1548
1549	return 0;
1550
1551ext:
1552	bfad_im_module_exit();
1553	return error;
1554}
1555
1556/*
1557 * Driver module exit.
1558 */
1559static void __exit
1560bfad_exit(void)
1561{
1562	pci_unregister_driver(&bfad_pci_driver);
1563	bfad_im_module_exit();
1564	bfad_free_fwimg();
1565}
1566
1567/* Firmware handling */
1568static void
1569bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
1570		u32 *bfi_image_size, char *fw_name)
1571{
1572	const struct firmware *fw;
1573
1574	if (request_firmware(&fw, fw_name, &pdev->dev)) {
1575		printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
1576		*bfi_image = NULL;
1577		goto out;
1578	}
1579
1580	*bfi_image = vmalloc(fw->size);
1581	if (NULL == *bfi_image) {
1582		printk(KERN_ALERT "Fail to allocate buffer for fw image "
1583			"size=%x!\n", (u32) fw->size);
1584		goto out;
1585	}
1586
1587	memcpy(*bfi_image, fw->data, fw->size);
1588	*bfi_image_size = fw->size/sizeof(u32);
1589out:
1590	release_firmware(fw);
1591}
1592
1593static u32 *
1594bfad_load_fwimg(struct pci_dev *pdev)
1595{
1596	if (pdev->device == BFA_PCI_DEVICE_ID_CT2) {
1597		if (bfi_image_ct2_size == 0)
1598			bfad_read_firmware(pdev, &bfi_image_ct2,
1599				&bfi_image_ct2_size, BFAD_FW_FILE_CT2);
1600		return bfi_image_ct2;
1601	} else if (bfa_asic_id_ct(pdev->device)) {
1602		if (bfi_image_ct_size == 0)
1603			bfad_read_firmware(pdev, &bfi_image_ct,
1604				&bfi_image_ct_size, BFAD_FW_FILE_CT);
1605		return bfi_image_ct;
1606	} else {
1607		if (bfi_image_cb_size == 0)
1608			bfad_read_firmware(pdev, &bfi_image_cb,
1609				&bfi_image_cb_size, BFAD_FW_FILE_CB);
1610		return bfi_image_cb;
1611	}
1612}
1613
1614static void
1615bfad_free_fwimg(void)
1616{
1617	if (bfi_image_ct2_size && bfi_image_ct2)
1618		vfree(bfi_image_ct2);
1619	if (bfi_image_ct_size && bfi_image_ct)
1620		vfree(bfi_image_ct);
1621	if (bfi_image_cb_size && bfi_image_cb)
1622		vfree(bfi_image_cb);
1623}
1624
1625module_init(bfad_init);
1626module_exit(bfad_exit);
1627MODULE_LICENSE("GPL");
1628MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME);
1629MODULE_AUTHOR("Brocade Communications Systems, Inc.");
1630MODULE_VERSION(BFAD_DRIVER_VERSION);
1631