bfad.c revision a36c61f9025b8924f99f54d518763bee7aa84085
1/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15 * General Public License for more details.
16 */
17
18/**
19 *  bfad.c Linux driver PCI interface module.
20 */
21#include <linux/module.h>
22#include <linux/kthread.h>
23#include <linux/errno.h>
24#include <linux/sched.h>
25#include <linux/init.h>
26#include <linux/fs.h>
27#include <linux/pci.h>
28#include <linux/firmware.h>
29#include <asm/uaccess.h>
30#include <asm/fcntl.h>
31
32#include "bfad_drv.h"
33#include "bfad_im.h"
34#include "bfa_fcs.h"
35#include "bfa_os_inc.h"
36#include "bfa_defs.h"
37#include "bfa.h"
38
39BFA_TRC_FILE(LDRV, BFAD);
40DEFINE_MUTEX(bfad_mutex);
41LIST_HEAD(bfad_list);
42
43static int	bfad_inst;
44static int      num_sgpgs_parm;
45int		supported_fc4s;
46char		*host_name, *os_name, *os_patch;
47int		num_rports, num_ios, num_tms;
48int		num_fcxps, num_ufbufs;
49int		reqq_size, rspq_size, num_sgpgs;
50int		rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT;
51int		bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
52int		bfa_io_max_sge = BFAD_IO_MAX_SGE;
53int		log_level = 3; /* WARNING log level */
54int		ioc_auto_recover = BFA_TRUE;
55int		bfa_linkup_delay = -1;
56int		fdmi_enable = BFA_TRUE;
57int		pcie_max_read_reqsz;
58int		bfa_debugfs_enable = 1;
59int		msix_disable_cb = 0, msix_disable_ct = 0;
60
61u32	bfi_image_ct_fc_size, bfi_image_ct_cna_size, bfi_image_cb_fc_size;
62u32     *bfi_image_ct_fc, *bfi_image_ct_cna, *bfi_image_cb_fc;
63
64const char *msix_name_ct[] = {
65	"cpe0", "cpe1", "cpe2", "cpe3",
66	"rme0", "rme1", "rme2", "rme3",
67	"ctrl" };
68
69const char *msix_name_cb[] = {
70	"cpe0", "cpe1", "cpe2", "cpe3",
71	"rme0", "rme1", "rme2", "rme3",
72	"eemc", "elpu0", "elpu1", "epss", "mlpu" };
73
74MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC);
75MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA);
76MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC);
77
78module_param(os_name, charp, S_IRUGO | S_IWUSR);
79MODULE_PARM_DESC(os_name, "OS name of the hba host machine");
80module_param(os_patch, charp, S_IRUGO | S_IWUSR);
81MODULE_PARM_DESC(os_patch, "OS patch level of the hba host machine");
82module_param(host_name, charp, S_IRUGO | S_IWUSR);
83MODULE_PARM_DESC(host_name, "Hostname of the hba host machine");
84module_param(num_rports, int, S_IRUGO | S_IWUSR);
85MODULE_PARM_DESC(num_rports, "Max number of rports supported per port "
86				"(physical/logical), default=1024");
87module_param(num_ios, int, S_IRUGO | S_IWUSR);
88MODULE_PARM_DESC(num_ios, "Max number of ioim requests, default=2000");
89module_param(num_tms, int, S_IRUGO | S_IWUSR);
90MODULE_PARM_DESC(num_tms, "Max number of task im requests, default=128");
91module_param(num_fcxps, int, S_IRUGO | S_IWUSR);
92MODULE_PARM_DESC(num_fcxps, "Max number of fcxp requests, default=64");
93module_param(num_ufbufs, int, S_IRUGO | S_IWUSR);
94MODULE_PARM_DESC(num_ufbufs, "Max number of unsolicited frame "
95				"buffers, default=64");
96module_param(reqq_size, int, S_IRUGO | S_IWUSR);
97MODULE_PARM_DESC(reqq_size, "Max number of request queue elements, "
98				"default=256");
99module_param(rspq_size, int, S_IRUGO | S_IWUSR);
100MODULE_PARM_DESC(rspq_size, "Max number of response queue elements, "
101				"default=64");
102module_param(num_sgpgs, int, S_IRUGO | S_IWUSR);
103MODULE_PARM_DESC(num_sgpgs, "Number of scatter/gather pages, default=2048");
104module_param(rport_del_timeout, int, S_IRUGO | S_IWUSR);
105MODULE_PARM_DESC(rport_del_timeout, "Rport delete timeout, default=90 secs, "
106					"Range[>0]");
107module_param(bfa_lun_queue_depth, int, S_IRUGO | S_IWUSR);
108MODULE_PARM_DESC(bfa_lun_queue_depth, "Lun queue depth, default=32, Range[>0]");
109module_param(bfa_io_max_sge, int, S_IRUGO | S_IWUSR);
110MODULE_PARM_DESC(bfa_io_max_sge, "Max io scatter/gather elements, default=255");
111module_param(log_level, int, S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(log_level, "Driver log level, default=3, "
113				"Range[Critical:1|Error:2|Warning:3|Info:4]");
114module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
115MODULE_PARM_DESC(ioc_auto_recover, "IOC auto recovery, default=1, "
116				"Range[off:0|on:1]");
117module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
118MODULE_PARM_DESC(bfa_linkup_delay, "Link up delay, default=30 secs for "
119			"boot port. Otherwise 10 secs in RHEL4 & 0 for "
120			"[RHEL5, SLES10, ESX40] Range[>0]");
121module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
122MODULE_PARM_DESC(msix_disable_cb, "Disable Message Signaled Interrupts "
123			"for Brocade-415/425/815/825 cards, default=0, "
124			" Range[false:0|true:1]");
125module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
126MODULE_PARM_DESC(msix_disable_ct, "Disable Message Signaled Interrupts "
127			"if possible for Brocade-1010/1020/804/1007/902/1741 "
128			"cards, default=0, Range[false:0|true:1]");
129module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
130MODULE_PARM_DESC(fdmi_enable, "Enables fdmi registration, default=1, "
131				"Range[false:0|true:1]");
132module_param(pcie_max_read_reqsz, int, S_IRUGO | S_IWUSR);
133MODULE_PARM_DESC(pcie_max_read_reqsz, "PCIe max read request size, default=0 "
134		"(use system setting), Range[128|256|512|1024|2048|4096]");
135module_param(bfa_debugfs_enable, int, S_IRUGO | S_IWUSR);
136MODULE_PARM_DESC(bfa_debugfs_enable, "Enables debugfs feature, default=1,"
137		" Range[false:0|true:1]");
138
139static void
140bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event);
141static void
142bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event);
143static void
144bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event);
145static void
146bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event);
147static void
148bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event);
149static void
150bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event);
151static void
152bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
153
154/**
155 * Beginning state for the driver instance, awaiting the pci_probe event
156 */
157static void
158bfad_sm_uninit(struct bfad_s *bfad, enum bfad_sm_event event)
159{
160	bfa_trc(bfad, event);
161
162	switch (event) {
163	case BFAD_E_CREATE:
164		bfa_sm_set_state(bfad, bfad_sm_created);
165		bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad,
166						"%s", "bfad_worker");
167		if (IS_ERR(bfad->bfad_tsk)) {
168			printk(KERN_INFO "bfad[%d]: Kernel thread "
169				"creation failed!\n", bfad->inst_no);
170			bfa_sm_send_event(bfad, BFAD_E_KTHREAD_CREATE_FAILED);
171		}
172		bfa_sm_send_event(bfad, BFAD_E_INIT);
173		break;
174
175	case BFAD_E_STOP:
176		/* Ignore stop; already in uninit */
177		break;
178
179	default:
180		bfa_sm_fault(bfad, event);
181	}
182}
183
184/**
185 * Driver Instance is created, awaiting event INIT to initialize the bfad
186 */
187static void
188bfad_sm_created(struct bfad_s *bfad, enum bfad_sm_event event)
189{
190	unsigned long flags;
191
192	bfa_trc(bfad, event);
193
194	switch (event) {
195	case BFAD_E_INIT:
196		bfa_sm_set_state(bfad, bfad_sm_initializing);
197
198		init_completion(&bfad->comp);
199
200		/* Enable Interrupt and wait bfa_init completion */
201		if (bfad_setup_intr(bfad)) {
202			printk(KERN_WARNING "bfad%d: bfad_setup_intr failed\n",
203					bfad->inst_no);
204			bfa_sm_send_event(bfad, BFAD_E_INTR_INIT_FAILED);
205			break;
206		}
207
208		spin_lock_irqsave(&bfad->bfad_lock, flags);
209		bfa_init(&bfad->bfa);
210		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
211
212		/* Set up interrupt handler for each vectors */
213		if ((bfad->bfad_flags & BFAD_MSIX_ON) &&
214			bfad_install_msix_handler(bfad)) {
215			printk(KERN_WARNING "%s: install_msix failed, bfad%d\n",
216				__func__, bfad->inst_no);
217		}
218
219		bfad_init_timer(bfad);
220
221		wait_for_completion(&bfad->comp);
222
223		if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
224			bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
225		} else {
226			bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
227			bfa_sm_send_event(bfad, BFAD_E_INIT_FAILED);
228		}
229
230		break;
231
232	case BFAD_E_KTHREAD_CREATE_FAILED:
233		bfa_sm_set_state(bfad, bfad_sm_uninit);
234		break;
235
236	default:
237		bfa_sm_fault(bfad, event);
238	}
239}
240
241static void
242bfad_sm_initializing(struct bfad_s *bfad, enum bfad_sm_event event)
243{
244	int	retval;
245	unsigned long	flags;
246
247	bfa_trc(bfad, event);
248
249	switch (event) {
250	case BFAD_E_INIT_SUCCESS:
251		kthread_stop(bfad->bfad_tsk);
252		spin_lock_irqsave(&bfad->bfad_lock, flags);
253		bfad->bfad_tsk = NULL;
254		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
255
256		retval = bfad_start_ops(bfad);
257		if (retval != BFA_STATUS_OK)
258			break;
259		bfa_sm_set_state(bfad, bfad_sm_operational);
260		break;
261
262	case BFAD_E_INTR_INIT_FAILED:
263		bfa_sm_set_state(bfad, bfad_sm_uninit);
264		kthread_stop(bfad->bfad_tsk);
265		spin_lock_irqsave(&bfad->bfad_lock, flags);
266		bfad->bfad_tsk = NULL;
267		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
268		break;
269
270	case BFAD_E_INIT_FAILED:
271		bfa_sm_set_state(bfad, bfad_sm_failed);
272		break;
273	default:
274		bfa_sm_fault(bfad, event);
275	}
276}
277
278static void
279bfad_sm_failed(struct bfad_s *bfad, enum bfad_sm_event event)
280{
281	int	retval;
282
283	bfa_trc(bfad, event);
284
285	switch (event) {
286	case BFAD_E_INIT_SUCCESS:
287		retval = bfad_start_ops(bfad);
288		if (retval != BFA_STATUS_OK)
289			break;
290		bfa_sm_set_state(bfad, bfad_sm_operational);
291		break;
292
293	case BFAD_E_STOP:
294		if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
295			bfad_uncfg_pport(bfad);
296		if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE) {
297			bfad_im_probe_undo(bfad);
298			bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
299		}
300		bfad_stop(bfad);
301		break;
302
303	case BFAD_E_EXIT_COMP:
304		bfa_sm_set_state(bfad, bfad_sm_uninit);
305		bfad_remove_intr(bfad);
306		del_timer_sync(&bfad->hal_tmo);
307		break;
308
309	default:
310		bfa_sm_fault(bfad, event);
311	}
312}
313
314static void
315bfad_sm_operational(struct bfad_s *bfad, enum bfad_sm_event event)
316{
317	bfa_trc(bfad, event);
318
319	switch (event) {
320	case BFAD_E_STOP:
321		bfa_sm_set_state(bfad, bfad_sm_fcs_exit);
322		bfad_fcs_stop(bfad);
323		break;
324
325	default:
326		bfa_sm_fault(bfad, event);
327	}
328}
329
330static void
331bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event)
332{
333	bfa_trc(bfad, event);
334
335	switch (event) {
336	case BFAD_E_FCS_EXIT_COMP:
337		bfa_sm_set_state(bfad, bfad_sm_stopping);
338		bfad_stop(bfad);
339		break;
340
341	default:
342		bfa_sm_fault(bfad, event);
343	}
344}
345
346static void
347bfad_sm_stopping(struct bfad_s *bfad, enum bfad_sm_event event)
348{
349	bfa_trc(bfad, event);
350
351	switch (event) {
352	case BFAD_E_EXIT_COMP:
353		bfa_sm_set_state(bfad, bfad_sm_uninit);
354		bfad_remove_intr(bfad);
355		del_timer_sync(&bfad->hal_tmo);
356		bfad_im_probe_undo(bfad);
357		bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
358		bfad_uncfg_pport(bfad);
359		break;
360
361	default:
362		bfa_sm_fault(bfad, event);
363		break;
364	}
365}
366
367/**
368 *  BFA callbacks
369 */
370void
371bfad_hcb_comp(void *arg, bfa_status_t status)
372{
373	struct bfad_hal_comp *fcomp = (struct bfad_hal_comp *)arg;
374
375	fcomp->status = status;
376	complete(&fcomp->comp);
377}
378
379/**
380 * bfa_init callback
381 */
382void
383bfa_cb_init(void *drv, bfa_status_t init_status)
384{
385	struct bfad_s	      *bfad = drv;
386
387	if (init_status == BFA_STATUS_OK) {
388		bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
389
390		/*
391		 * If BFAD_HAL_INIT_FAIL flag is set:
392		 * Wake up the kernel thread to start
393		 * the bfad operations after HAL init done
394		 */
395		if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) {
396			bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL;
397			wake_up_process(bfad->bfad_tsk);
398		}
399	}
400
401	complete(&bfad->comp);
402}
403
404/**
405 *  BFA_FCS callbacks
406 */
407struct bfad_port_s *
408bfa_fcb_lport_new(struct bfad_s *bfad, struct bfa_fcs_lport_s *port,
409		 enum bfa_lport_role roles, struct bfad_vf_s *vf_drv,
410		 struct bfad_vport_s *vp_drv)
411{
412	bfa_status_t	rc;
413	struct bfad_port_s    *port_drv;
414
415	if (!vp_drv && !vf_drv) {
416		port_drv = &bfad->pport;
417		port_drv->pvb_type = BFAD_PORT_PHYS_BASE;
418	} else if (!vp_drv && vf_drv) {
419		port_drv = &vf_drv->base_port;
420		port_drv->pvb_type = BFAD_PORT_VF_BASE;
421	} else if (vp_drv && !vf_drv) {
422		port_drv = &vp_drv->drv_port;
423		port_drv->pvb_type = BFAD_PORT_PHYS_VPORT;
424	} else {
425		port_drv = &vp_drv->drv_port;
426		port_drv->pvb_type = BFAD_PORT_VF_VPORT;
427	}
428
429	port_drv->fcs_port = port;
430	port_drv->roles = roles;
431
432	if (roles & BFA_LPORT_ROLE_FCP_IM) {
433		rc = bfad_im_port_new(bfad, port_drv);
434		if (rc != BFA_STATUS_OK) {
435			bfad_im_port_delete(bfad, port_drv);
436			port_drv = NULL;
437		}
438	}
439
440	return port_drv;
441}
442
443void
444bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
445		    struct bfad_vf_s *vf_drv, struct bfad_vport_s *vp_drv)
446{
447	struct bfad_port_s    *port_drv;
448
449	/* this will be only called from rmmod context */
450	if (vp_drv && !vp_drv->comp_del) {
451		port_drv = (vp_drv) ? (&(vp_drv)->drv_port) :
452				((vf_drv) ? (&(vf_drv)->base_port) :
453				(&(bfad)->pport));
454		bfa_trc(bfad, roles);
455		if (roles & BFA_LPORT_ROLE_FCP_IM)
456			bfad_im_port_delete(bfad, port_drv);
457	}
458}
459
460/**
461 * FCS RPORT alloc callback, after successful PLOGI by FCS
462 */
463bfa_status_t
464bfa_fcb_rport_alloc(struct bfad_s *bfad, struct bfa_fcs_rport_s **rport,
465		    struct bfad_rport_s **rport_drv)
466{
467	bfa_status_t	rc = BFA_STATUS_OK;
468
469	*rport_drv = kzalloc(sizeof(struct bfad_rport_s), GFP_ATOMIC);
470	if (*rport_drv == NULL) {
471		rc = BFA_STATUS_ENOMEM;
472		goto ext;
473	}
474
475	*rport = &(*rport_drv)->fcs_rport;
476
477ext:
478	return rc;
479}
480
481/**
482 * FCS PBC VPORT Create
483 */
484void
485bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s pbc_vport)
486{
487
488	struct bfa_lport_cfg_s port_cfg = {0};
489	struct bfad_vport_s   *vport;
490	int rc;
491
492	vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
493	if (!vport) {
494		bfa_trc(bfad, 0);
495		return;
496	}
497
498	vport->drv_port.bfad = bfad;
499	port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
500	port_cfg.pwwn = pbc_vport.vp_pwwn;
501	port_cfg.nwwn = pbc_vport.vp_nwwn;
502	port_cfg.preboot_vp  = BFA_TRUE;
503
504	rc = bfa_fcs_pbc_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, 0,
505				  &port_cfg, vport);
506
507	if (rc != BFA_STATUS_OK) {
508		bfa_trc(bfad, 0);
509		return;
510	}
511
512	list_add_tail(&vport->list_entry, &bfad->pbc_vport_list);
513}
514
515void
516bfad_hal_mem_release(struct bfad_s *bfad)
517{
518	int		i;
519	struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
520	struct bfa_mem_elem_s *meminfo_elem;
521
522	for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
523		meminfo_elem = &hal_meminfo->meminfo[i];
524		if (meminfo_elem->kva != NULL) {
525			switch (meminfo_elem->mem_type) {
526			case BFA_MEM_TYPE_KVA:
527				vfree(meminfo_elem->kva);
528				break;
529			case BFA_MEM_TYPE_DMA:
530				dma_free_coherent(&bfad->pcidev->dev,
531					meminfo_elem->mem_len,
532					meminfo_elem->kva,
533					(dma_addr_t) meminfo_elem->dma);
534				break;
535			default:
536				bfa_assert(0);
537				break;
538			}
539		}
540	}
541
542	memset(hal_meminfo, 0, sizeof(struct bfa_meminfo_s));
543}
544
545void
546bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg)
547{
548	if (num_rports > 0)
549		bfa_cfg->fwcfg.num_rports = num_rports;
550	if (num_ios > 0)
551		bfa_cfg->fwcfg.num_ioim_reqs = num_ios;
552	if (num_tms > 0)
553		bfa_cfg->fwcfg.num_tskim_reqs = num_tms;
554	if (num_fcxps > 0)
555		bfa_cfg->fwcfg.num_fcxp_reqs = num_fcxps;
556	if (num_ufbufs > 0)
557		bfa_cfg->fwcfg.num_uf_bufs = num_ufbufs;
558	if (reqq_size > 0)
559		bfa_cfg->drvcfg.num_reqq_elems = reqq_size;
560	if (rspq_size > 0)
561		bfa_cfg->drvcfg.num_rspq_elems = rspq_size;
562	if (num_sgpgs > 0)
563		bfa_cfg->drvcfg.num_sgpgs = num_sgpgs;
564
565	/*
566	 * populate the hal values back to the driver for sysfs use.
567	 * otherwise, the default values will be shown as 0 in sysfs
568	 */
569	num_rports = bfa_cfg->fwcfg.num_rports;
570	num_ios = bfa_cfg->fwcfg.num_ioim_reqs;
571	num_tms = bfa_cfg->fwcfg.num_tskim_reqs;
572	num_fcxps = bfa_cfg->fwcfg.num_fcxp_reqs;
573	num_ufbufs = bfa_cfg->fwcfg.num_uf_bufs;
574	reqq_size = bfa_cfg->drvcfg.num_reqq_elems;
575	rspq_size = bfa_cfg->drvcfg.num_rspq_elems;
576	num_sgpgs = bfa_cfg->drvcfg.num_sgpgs;
577}
578
579bfa_status_t
580bfad_hal_mem_alloc(struct bfad_s *bfad)
581{
582	int		i;
583	struct bfa_meminfo_s *hal_meminfo = &bfad->meminfo;
584	struct bfa_mem_elem_s *meminfo_elem;
585	dma_addr_t	phys_addr;
586	void	       *kva;
587	bfa_status_t	rc = BFA_STATUS_OK;
588	int retry_count = 0;
589	int reset_value = 1;
590	int min_num_sgpgs = 512;
591
592	bfa_cfg_get_default(&bfad->ioc_cfg);
593
594retry:
595	bfad_update_hal_cfg(&bfad->ioc_cfg);
596	bfad->cfg_data.ioc_queue_depth = bfad->ioc_cfg.fwcfg.num_ioim_reqs;
597	bfa_cfg_get_meminfo(&bfad->ioc_cfg, hal_meminfo);
598
599	for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
600		meminfo_elem = &hal_meminfo->meminfo[i];
601		switch (meminfo_elem->mem_type) {
602		case BFA_MEM_TYPE_KVA:
603			kva = vmalloc(meminfo_elem->mem_len);
604			if (kva == NULL) {
605				bfad_hal_mem_release(bfad);
606				rc = BFA_STATUS_ENOMEM;
607				goto ext;
608			}
609			memset(kva, 0, meminfo_elem->mem_len);
610			meminfo_elem->kva = kva;
611			break;
612		case BFA_MEM_TYPE_DMA:
613			kva = dma_alloc_coherent(&bfad->pcidev->dev,
614				meminfo_elem->mem_len, &phys_addr, GFP_KERNEL);
615			if (kva == NULL) {
616				bfad_hal_mem_release(bfad);
617				/*
618				 * If we cannot allocate with default
619				 * num_sgpages try with half the value.
620				 */
621				if (num_sgpgs > min_num_sgpgs) {
622					printk(KERN_INFO
623					"bfad[%d]: memory allocation failed"
624					" with num_sgpgs: %d\n",
625						bfad->inst_no, num_sgpgs);
626					nextLowerInt(&num_sgpgs);
627					printk(KERN_INFO
628					"bfad[%d]: trying to allocate memory"
629					" with num_sgpgs: %d\n",
630						bfad->inst_no, num_sgpgs);
631					retry_count++;
632					goto retry;
633				} else {
634					if (num_sgpgs_parm > 0)
635						num_sgpgs = num_sgpgs_parm;
636					else {
637						reset_value =
638							(1 << retry_count);
639						num_sgpgs *= reset_value;
640					}
641					rc = BFA_STATUS_ENOMEM;
642					goto ext;
643				}
644			}
645
646			if (num_sgpgs_parm > 0)
647				num_sgpgs = num_sgpgs_parm;
648			else {
649				reset_value = (1 << retry_count);
650				num_sgpgs *= reset_value;
651			}
652
653			memset(kva, 0, meminfo_elem->mem_len);
654			meminfo_elem->kva = kva;
655			meminfo_elem->dma = phys_addr;
656			break;
657		default:
658			break;
659
660		}
661	}
662ext:
663	return rc;
664}
665
666/**
667 * Create a vport under a vf.
668 */
669bfa_status_t
670bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
671		  struct bfa_lport_cfg_s *port_cfg, struct device *dev)
672{
673	struct bfad_vport_s   *vport;
674	int		rc = BFA_STATUS_OK;
675	unsigned long	flags;
676	struct completion fcomp;
677
678	vport = kzalloc(sizeof(struct bfad_vport_s), GFP_KERNEL);
679	if (!vport) {
680		rc = BFA_STATUS_ENOMEM;
681		goto ext;
682	}
683
684	vport->drv_port.bfad = bfad;
685	spin_lock_irqsave(&bfad->bfad_lock, flags);
686	rc = bfa_fcs_vport_create(&vport->fcs_vport, &bfad->bfa_fcs, vf_id,
687				  port_cfg, vport);
688	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
689
690	if (rc != BFA_STATUS_OK)
691		goto ext_free_vport;
692
693	if (port_cfg->roles & BFA_LPORT_ROLE_FCP_IM) {
694		rc = bfad_im_scsi_host_alloc(bfad, vport->drv_port.im_port,
695							dev);
696		if (rc != BFA_STATUS_OK)
697			goto ext_free_fcs_vport;
698	}
699
700	spin_lock_irqsave(&bfad->bfad_lock, flags);
701	bfa_fcs_vport_start(&vport->fcs_vport);
702	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
703
704	return BFA_STATUS_OK;
705
706ext_free_fcs_vport:
707	spin_lock_irqsave(&bfad->bfad_lock, flags);
708	vport->comp_del = &fcomp;
709	init_completion(vport->comp_del);
710	bfa_fcs_vport_delete(&vport->fcs_vport);
711	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
712	wait_for_completion(vport->comp_del);
713ext_free_vport:
714	kfree(vport);
715ext:
716	return rc;
717}
718
719/**
720 * Create a vf and its base vport implicitely.
721 */
722bfa_status_t
723bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
724	       struct bfa_lport_cfg_s *port_cfg)
725{
726	struct bfad_vf_s      *vf;
727	int		rc = BFA_STATUS_OK;
728
729	vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL);
730	if (!vf) {
731		rc = BFA_STATUS_FAILED;
732		goto ext;
733	}
734
735	rc = bfa_fcs_vf_create(&vf->fcs_vf, &bfad->bfa_fcs, vf_id, port_cfg,
736			       vf);
737	if (rc != BFA_STATUS_OK)
738		kfree(vf);
739ext:
740	return rc;
741}
742
743void
744bfad_bfa_tmo(unsigned long data)
745{
746	struct bfad_s	      *bfad = (struct bfad_s *) data;
747	unsigned long	flags;
748	struct list_head	       doneq;
749
750	spin_lock_irqsave(&bfad->bfad_lock, flags);
751
752	bfa_timer_tick(&bfad->bfa);
753
754	bfa_comp_deq(&bfad->bfa, &doneq);
755	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
756
757	if (!list_empty(&doneq)) {
758		bfa_comp_process(&bfad->bfa, &doneq);
759		spin_lock_irqsave(&bfad->bfad_lock, flags);
760		bfa_comp_free(&bfad->bfa, &doneq);
761		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
762	}
763
764	mod_timer(&bfad->hal_tmo,
765		  jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
766}
767
768void
769bfad_init_timer(struct bfad_s *bfad)
770{
771	init_timer(&bfad->hal_tmo);
772	bfad->hal_tmo.function = bfad_bfa_tmo;
773	bfad->hal_tmo.data = (unsigned long)bfad;
774
775	mod_timer(&bfad->hal_tmo,
776		  jiffies + msecs_to_jiffies(BFA_TIMER_FREQ));
777}
778
779int
780bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad)
781{
782	int		rc = -ENODEV;
783
784	if (pci_enable_device(pdev)) {
785		printk(KERN_ERR "pci_enable_device fail %p\n", pdev);
786		goto out;
787	}
788
789	if (pci_request_regions(pdev, BFAD_DRIVER_NAME))
790		goto out_disable_device;
791
792	pci_set_master(pdev);
793
794
795	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
796		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
797			printk(KERN_ERR "pci_set_dma_mask fail %p\n", pdev);
798			goto out_release_region;
799		}
800
801	bfad->pci_bar0_kva = pci_iomap(pdev, 0, pci_resource_len(pdev, 0));
802
803	if (bfad->pci_bar0_kva == NULL) {
804		printk(KERN_ERR "Fail to map bar0\n");
805		goto out_release_region;
806	}
807
808	bfad->hal_pcidev.pci_slot = PCI_SLOT(pdev->devfn);
809	bfad->hal_pcidev.pci_func = PCI_FUNC(pdev->devfn);
810	bfad->hal_pcidev.pci_bar_kva = bfad->pci_bar0_kva;
811	bfad->hal_pcidev.device_id = pdev->device;
812	bfad->pci_name = pci_name(pdev);
813
814	bfad->pci_attr.vendor_id = pdev->vendor;
815	bfad->pci_attr.device_id = pdev->device;
816	bfad->pci_attr.ssid = pdev->subsystem_device;
817	bfad->pci_attr.ssvid = pdev->subsystem_vendor;
818	bfad->pci_attr.pcifn = PCI_FUNC(pdev->devfn);
819
820	bfad->pcidev = pdev;
821
822	/* Adjust PCIe Maximum Read Request Size */
823	if (pcie_max_read_reqsz > 0) {
824		int pcie_cap_reg;
825		u16 pcie_dev_ctl;
826		u16 mask = 0xffff;
827
828		switch (pcie_max_read_reqsz) {
829		case 128:
830			mask = 0x0;
831			break;
832		case 256:
833			mask = 0x1000;
834			break;
835		case 512:
836			mask = 0x2000;
837			break;
838		case 1024:
839			mask = 0x3000;
840			break;
841		case 2048:
842			mask = 0x4000;
843			break;
844		case 4096:
845			mask = 0x5000;
846			break;
847		default:
848			break;
849		}
850
851		pcie_cap_reg = pci_find_capability(pdev, PCI_CAP_ID_EXP);
852		if (mask != 0xffff && pcie_cap_reg) {
853			pcie_cap_reg += 0x08;
854			pci_read_config_word(pdev, pcie_cap_reg, &pcie_dev_ctl);
855			if ((pcie_dev_ctl & 0x7000) != mask) {
856				printk(KERN_WARNING "BFA[%s]: "
857				"pcie_max_read_request_size is %d, "
858				"reset to %d\n", bfad->pci_name,
859				(1 << ((pcie_dev_ctl & 0x7000) >> 12)) << 7,
860				pcie_max_read_reqsz);
861
862				pcie_dev_ctl &= ~0x7000;
863				pci_write_config_word(pdev, pcie_cap_reg,
864						pcie_dev_ctl | mask);
865			}
866		}
867	}
868
869	return 0;
870
871out_release_region:
872	pci_release_regions(pdev);
873out_disable_device:
874	pci_disable_device(pdev);
875out:
876	return rc;
877}
878
879void
880bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad)
881{
882	pci_iounmap(pdev, bfad->pci_bar0_kva);
883	pci_release_regions(pdev);
884	pci_disable_device(pdev);
885	pci_set_drvdata(pdev, NULL);
886}
887
888void
889bfad_fcs_port_cfg(struct bfad_s *bfad)
890{
891	struct bfa_lport_cfg_s  port_cfg;
892	struct bfa_port_attr_s attr;
893	char		symname[BFA_SYMNAME_MAXLEN];
894
895	sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no);
896	memcpy(port_cfg.sym_name.symname, symname, strlen(symname));
897	bfa_fcport_get_attr(&bfad->bfa, &attr);
898	port_cfg.nwwn = attr.nwwn;
899	port_cfg.pwwn = attr.pwwn;
900}
901
902bfa_status_t
903bfad_drv_init(struct bfad_s *bfad)
904{
905	bfa_status_t	rc;
906	unsigned long	flags;
907
908	bfad->cfg_data.rport_del_timeout = rport_del_timeout;
909	bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
910	bfad->cfg_data.io_max_sge = bfa_io_max_sge;
911	bfad->cfg_data.binding_method = FCP_PWWN_BINDING;
912
913	rc = bfad_hal_mem_alloc(bfad);
914	if (rc != BFA_STATUS_OK) {
915		printk(KERN_WARNING "bfad%d bfad_hal_mem_alloc failure\n",
916		       bfad->inst_no);
917		printk(KERN_WARNING
918			"Not enough memory to attach all Brocade HBA ports, %s",
919			"System may need more memory.\n");
920		goto out_hal_mem_alloc_failure;
921	}
922
923	bfa_init_trc(&bfad->bfa, bfad->trcmod);
924	bfa_init_plog(&bfad->bfa, &bfad->plog_buf);
925	bfa_plog_init(&bfad->plog_buf);
926	bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
927		     0, "Driver Attach");
928
929	bfa_attach(&bfad->bfa, bfad, &bfad->ioc_cfg, &bfad->meminfo,
930		   &bfad->hal_pcidev);
931
932	/* FCS INIT */
933	spin_lock_irqsave(&bfad->bfad_lock, flags);
934	bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod);
935	bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
936	bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable);
937	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
938
939	bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
940
941	return BFA_STATUS_OK;
942
943out_hal_mem_alloc_failure:
944	return BFA_STATUS_FAILED;
945}
946
947void
948bfad_drv_uninit(struct bfad_s *bfad)
949{
950	unsigned long   flags;
951
952	spin_lock_irqsave(&bfad->bfad_lock, flags);
953	init_completion(&bfad->comp);
954	bfa_stop(&bfad->bfa);
955	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
956	wait_for_completion(&bfad->comp);
957
958	del_timer_sync(&bfad->hal_tmo);
959	bfa_isr_disable(&bfad->bfa);
960	bfa_detach(&bfad->bfa);
961	bfad_remove_intr(bfad);
962	bfad_hal_mem_release(bfad);
963
964	bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE;
965}
966
967void
968bfad_drv_start(struct bfad_s *bfad)
969{
970	unsigned long	flags;
971
972	spin_lock_irqsave(&bfad->bfad_lock, flags);
973	bfa_start(&bfad->bfa);
974	bfa_fcs_start(&bfad->bfa_fcs);
975	bfad->bfad_flags |= BFAD_HAL_START_DONE;
976	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
977
978	if (bfad->im)
979		flush_workqueue(bfad->im->drv_workq);
980}
981
982void
983bfad_fcs_stop(struct bfad_s *bfad)
984{
985	unsigned long	flags;
986
987	spin_lock_irqsave(&bfad->bfad_lock, flags);
988	init_completion(&bfad->comp);
989	bfad->pport.flags |= BFAD_PORT_DELETE;
990	bfa_fcs_exit(&bfad->bfa_fcs);
991	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
992	wait_for_completion(&bfad->comp);
993
994	bfa_sm_send_event(bfad, BFAD_E_FCS_EXIT_COMP);
995}
996
997void
998bfad_stop(struct bfad_s *bfad)
999{
1000	unsigned long	flags;
1001
1002	spin_lock_irqsave(&bfad->bfad_lock, flags);
1003	init_completion(&bfad->comp);
1004	bfa_stop(&bfad->bfa);
1005	bfad->bfad_flags &= ~BFAD_HAL_START_DONE;
1006	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1007	wait_for_completion(&bfad->comp);
1008
1009	bfa_sm_send_event(bfad, BFAD_E_EXIT_COMP);
1010}
1011
1012bfa_status_t
1013bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role)
1014{
1015	int		rc = BFA_STATUS_OK;
1016
1017	/* Allocate scsi_host for the physical port */
1018	if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
1019	    (role & BFA_LPORT_ROLE_FCP_IM)) {
1020		if (bfad->pport.im_port == NULL) {
1021			rc = BFA_STATUS_FAILED;
1022			goto out;
1023		}
1024
1025		rc = bfad_im_scsi_host_alloc(bfad, bfad->pport.im_port,
1026						&bfad->pcidev->dev);
1027		if (rc != BFA_STATUS_OK)
1028			goto out;
1029
1030		bfad->pport.roles |= BFA_LPORT_ROLE_FCP_IM;
1031	}
1032
1033	/* Setup the debugfs node for this scsi_host */
1034	if (bfa_debugfs_enable)
1035		bfad_debugfs_init(&bfad->pport);
1036
1037	bfad->bfad_flags |= BFAD_CFG_PPORT_DONE;
1038
1039out:
1040	return rc;
1041}
1042
1043void
1044bfad_uncfg_pport(struct bfad_s *bfad)
1045{
1046	/* Remove the debugfs node for this scsi_host */
1047	kfree(bfad->regdata);
1048	bfad_debugfs_exit(&bfad->pport);
1049
1050	if ((supported_fc4s & BFA_LPORT_ROLE_FCP_IM) &&
1051	    (bfad->pport.roles & BFA_LPORT_ROLE_FCP_IM)) {
1052		bfad_im_scsi_host_free(bfad, bfad->pport.im_port);
1053		bfad_im_port_clean(bfad->pport.im_port);
1054		kfree(bfad->pport.im_port);
1055		bfad->pport.roles &= ~BFA_LPORT_ROLE_FCP_IM;
1056	}
1057
1058	bfad->bfad_flags &= ~BFAD_CFG_PPORT_DONE;
1059}
1060
1061bfa_status_t
1062bfad_start_ops(struct bfad_s *bfad) {
1063
1064	int	retval;
1065	unsigned long	flags;
1066	struct bfad_vport_s *vport, *vport_new;
1067	struct bfa_fcs_driver_info_s driver_info;
1068
1069	/* Fill the driver_info info to fcs*/
1070	memset(&driver_info, 0, sizeof(driver_info));
1071	strncpy(driver_info.version, BFAD_DRIVER_VERSION,
1072		sizeof(driver_info.version) - 1);
1073	if (host_name)
1074		strncpy(driver_info.host_machine_name, host_name,
1075			sizeof(driver_info.host_machine_name) - 1);
1076	if (os_name)
1077		strncpy(driver_info.host_os_name, os_name,
1078			sizeof(driver_info.host_os_name) - 1);
1079	if (os_patch)
1080		strncpy(driver_info.host_os_patch, os_patch,
1081			sizeof(driver_info.host_os_patch) - 1);
1082
1083	strncpy(driver_info.os_device_name, bfad->pci_name,
1084		sizeof(driver_info.os_device_name - 1));
1085
1086	/* FCS INIT */
1087	spin_lock_irqsave(&bfad->bfad_lock, flags);
1088	bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
1089	bfa_fcs_init(&bfad->bfa_fcs);
1090	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1091
1092	/* PPORT FCS config */
1093	bfad_fcs_port_cfg(bfad);
1094
1095	retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
1096	if (retval != BFA_STATUS_OK) {
1097		if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
1098			bfa_sm_set_state(bfad, bfad_sm_failed);
1099		bfad_stop(bfad);
1100		return BFA_STATUS_FAILED;
1101	}
1102
1103	/* BFAD level FC4 IM specific resource allocation */
1104	retval = bfad_im_probe(bfad);
1105	if (retval != BFA_STATUS_OK) {
1106		printk(KERN_WARNING "bfad_im_probe failed\n");
1107		if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
1108			bfa_sm_set_state(bfad, bfad_sm_failed);
1109		bfad_im_probe_undo(bfad);
1110		bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
1111		bfad_uncfg_pport(bfad);
1112		bfad_stop(bfad);
1113		return BFA_STATUS_FAILED;
1114	} else
1115		bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
1116
1117	bfad_drv_start(bfad);
1118
1119	/* Complete pbc vport create */
1120	list_for_each_entry_safe(vport, vport_new, &bfad->pbc_vport_list,
1121				list_entry) {
1122		struct fc_vport_identifiers vid;
1123		struct fc_vport *fc_vport;
1124		char pwwn_buf[BFA_STRING_32];
1125
1126		memset(&vid, 0, sizeof(vid));
1127		vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
1128		vid.vport_type = FC_PORTTYPE_NPIV;
1129		vid.disable = false;
1130		vid.node_name = wwn_to_u64((u8 *)
1131				(&((vport->fcs_vport).lport.port_cfg.nwwn)));
1132		vid.port_name = wwn_to_u64((u8 *)
1133				(&((vport->fcs_vport).lport.port_cfg.pwwn)));
1134		fc_vport = fc_vport_create(bfad->pport.im_port->shost, 0, &vid);
1135		if (!fc_vport) {
1136			wwn2str(pwwn_buf, vid.port_name);
1137			printk(KERN_WARNING "bfad%d: failed to create pbc vport"
1138				" %s\n", bfad->inst_no, pwwn_buf);
1139		}
1140		list_del(&vport->list_entry);
1141		kfree(vport);
1142	}
1143
1144	/*
1145	 * If bfa_linkup_delay is set to -1 default; try to retrive the
1146	 * value using the bfad_os_get_linkup_delay(); else use the
1147	 * passed in module param value as the bfa_linkup_delay.
1148	 */
1149	if (bfa_linkup_delay < 0) {
1150		bfa_linkup_delay = bfad_os_get_linkup_delay(bfad);
1151		bfad_os_rport_online_wait(bfad);
1152		bfa_linkup_delay = -1;
1153	} else
1154		bfad_os_rport_online_wait(bfad);
1155
1156	BFA_LOG(KERN_INFO, bfad, log_level, "bfa device claimed\n");
1157
1158	return BFA_STATUS_OK;
1159}
1160
1161int
1162bfad_worker(void *ptr)
1163{
1164	struct bfad_s *bfad;
1165	unsigned long   flags;
1166
1167	bfad = (struct bfad_s *)ptr;
1168
1169	while (!kthread_should_stop()) {
1170
1171		/* Send event BFAD_E_INIT_SUCCESS */
1172		bfa_sm_send_event(bfad, BFAD_E_INIT_SUCCESS);
1173
1174		spin_lock_irqsave(&bfad->bfad_lock, flags);
1175		bfad->bfad_tsk = NULL;
1176		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1177
1178		break;
1179	}
1180
1181	return 0;
1182}
1183
1184/**
1185 *  BFA driver interrupt functions
1186 */
1187irqreturn_t
1188bfad_intx(int irq, void *dev_id)
1189{
1190	struct bfad_s	*bfad = dev_id;
1191	struct list_head	doneq;
1192	unsigned long	flags;
1193	bfa_boolean_t rc;
1194
1195	spin_lock_irqsave(&bfad->bfad_lock, flags);
1196	rc = bfa_intx(&bfad->bfa);
1197	if (!rc) {
1198		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1199		return IRQ_NONE;
1200	}
1201
1202	bfa_comp_deq(&bfad->bfa, &doneq);
1203	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1204
1205	if (!list_empty(&doneq)) {
1206		bfa_comp_process(&bfad->bfa, &doneq);
1207
1208		spin_lock_irqsave(&bfad->bfad_lock, flags);
1209		bfa_comp_free(&bfad->bfa, &doneq);
1210		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1211		bfa_trc_fp(bfad, irq);
1212	}
1213
1214	return IRQ_HANDLED;
1215
1216}
1217
1218static irqreturn_t
1219bfad_msix(int irq, void *dev_id)
1220{
1221	struct bfad_msix_s *vec = dev_id;
1222	struct bfad_s *bfad = vec->bfad;
1223	struct list_head doneq;
1224	unsigned long   flags;
1225
1226	spin_lock_irqsave(&bfad->bfad_lock, flags);
1227
1228	bfa_msix(&bfad->bfa, vec->msix.entry);
1229	bfa_comp_deq(&bfad->bfa, &doneq);
1230	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1231
1232	if (!list_empty(&doneq)) {
1233		bfa_comp_process(&bfad->bfa, &doneq);
1234
1235		spin_lock_irqsave(&bfad->bfad_lock, flags);
1236		bfa_comp_free(&bfad->bfa, &doneq);
1237		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1238	}
1239
1240	return IRQ_HANDLED;
1241}
1242
1243/**
1244 * Initialize the MSIX entry table.
1245 */
1246static void
1247bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
1248			 int mask, int max_bit)
1249{
1250	int	i;
1251	int	match = 0x00000001;
1252
1253	for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
1254		if (mask & match) {
1255			bfad->msix_tab[bfad->nvec].msix.entry = i;
1256			bfad->msix_tab[bfad->nvec].bfad = bfad;
1257			msix_entries[bfad->nvec].entry = i;
1258			bfad->nvec++;
1259		}
1260
1261		match <<= 1;
1262	}
1263
1264}
1265
1266int
1267bfad_install_msix_handler(struct bfad_s *bfad)
1268{
1269	int i, error = 0;
1270
1271	for (i = 0; i < bfad->nvec; i++) {
1272		sprintf(bfad->msix_tab[i].name, "bfa-%s-%s",
1273				bfad->pci_name,
1274				((bfa_asic_id_ct(bfad->hal_pcidev.device_id)) ?
1275				msix_name_ct[i] : msix_name_cb[i]));
1276
1277		error = request_irq(bfad->msix_tab[i].msix.vector,
1278				    (irq_handler_t) bfad_msix, 0,
1279				    bfad->msix_tab[i].name, &bfad->msix_tab[i]);
1280		bfa_trc(bfad, i);
1281		bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
1282		if (error) {
1283			int	j;
1284
1285			for (j = 0; j < i; j++)
1286				free_irq(bfad->msix_tab[j].msix.vector,
1287						&bfad->msix_tab[j]);
1288
1289			return 1;
1290		}
1291	}
1292
1293	return 0;
1294}
1295
1296/**
1297 * Setup MSIX based interrupt.
1298 */
1299int
1300bfad_setup_intr(struct bfad_s *bfad)
1301{
1302	int error = 0;
1303	u32 mask = 0, i, num_bit = 0, max_bit = 0;
1304	struct msix_entry msix_entries[MAX_MSIX_ENTRY];
1305	struct pci_dev *pdev = bfad->pcidev;
1306
1307	/* Call BFA to get the msix map for this PCI function.  */
1308	bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
1309
1310	/* Set up the msix entry table */
1311	bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
1312
1313	if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) ||
1314	    (!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) {
1315
1316		error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
1317		if (error) {
1318			/*
1319			 * Only error number of vector is available.
1320			 * We don't have a mechanism to map multiple
1321			 * interrupts into one vector, so even if we
1322			 * can try to request less vectors, we don't
1323			 * know how to associate interrupt events to
1324			 *  vectors. Linux doesn't dupicate vectors
1325			 * in the MSIX table for this case.
1326			 */
1327
1328			printk(KERN_WARNING "bfad%d: "
1329				"pci_enable_msix failed (%d),"
1330				" use line based.\n", bfad->inst_no, error);
1331
1332			goto line_based;
1333		}
1334
1335		/* Save the vectors */
1336		for (i = 0; i < bfad->nvec; i++) {
1337			bfa_trc(bfad, msix_entries[i].vector);
1338			bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
1339		}
1340
1341		bfa_msix_init(&bfad->bfa, bfad->nvec);
1342
1343		bfad->bfad_flags |= BFAD_MSIX_ON;
1344
1345		return error;
1346	}
1347
1348line_based:
1349	error = 0;
1350	if (request_irq
1351	    (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS,
1352	     BFAD_DRIVER_NAME, bfad) != 0) {
1353		/* Enable interrupt handler failed */
1354		return 1;
1355	}
1356
1357	return error;
1358}
1359
1360void
1361bfad_remove_intr(struct bfad_s *bfad)
1362{
1363	int	i;
1364
1365	if (bfad->bfad_flags & BFAD_MSIX_ON) {
1366		for (i = 0; i < bfad->nvec; i++)
1367			free_irq(bfad->msix_tab[i].msix.vector,
1368					&bfad->msix_tab[i]);
1369
1370		pci_disable_msix(bfad->pcidev);
1371		bfad->bfad_flags &= ~BFAD_MSIX_ON;
1372	} else {
1373		free_irq(bfad->pcidev->irq, bfad);
1374	}
1375}
1376
1377/**
1378 * PCI probe entry.
1379 */
1380int
1381bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
1382{
1383	struct bfad_s	*bfad;
1384	int		error = -ENODEV, retval;
1385
1386	/* For single port cards - only claim function 0 */
1387	if ((pdev->device == BFA_PCI_DEVICE_ID_FC_8G1P) &&
1388		(PCI_FUNC(pdev->devfn) != 0))
1389		return -ENODEV;
1390
1391	bfad = kzalloc(sizeof(struct bfad_s), GFP_KERNEL);
1392	if (!bfad) {
1393		error = -ENOMEM;
1394		goto out;
1395	}
1396
1397	bfad->trcmod = kzalloc(sizeof(struct bfa_trc_mod_s), GFP_KERNEL);
1398	if (!bfad->trcmod) {
1399		printk(KERN_WARNING "Error alloc trace buffer!\n");
1400		error = -ENOMEM;
1401		goto out_alloc_trace_failure;
1402	}
1403
1404	/* TRACE INIT */
1405	bfa_trc_init(bfad->trcmod);
1406	bfa_trc(bfad, bfad_inst);
1407
1408	if (!(bfad_load_fwimg(pdev))) {
1409		kfree(bfad->trcmod);
1410		goto out_alloc_trace_failure;
1411	}
1412
1413	retval = bfad_pci_init(pdev, bfad);
1414	if (retval) {
1415		printk(KERN_WARNING "bfad_pci_init failure!\n");
1416		error = retval;
1417		goto out_pci_init_failure;
1418	}
1419
1420	mutex_lock(&bfad_mutex);
1421	bfad->inst_no = bfad_inst++;
1422	list_add_tail(&bfad->list_entry, &bfad_list);
1423	mutex_unlock(&bfad_mutex);
1424
1425	/* Initializing the state machine: State set to uninit */
1426	bfa_sm_set_state(bfad, bfad_sm_uninit);
1427
1428	spin_lock_init(&bfad->bfad_lock);
1429	pci_set_drvdata(pdev, bfad);
1430
1431	bfad->ref_count = 0;
1432	bfad->pport.bfad = bfad;
1433	INIT_LIST_HEAD(&bfad->pbc_vport_list);
1434
1435	retval = bfad_drv_init(bfad);
1436	if (retval != BFA_STATUS_OK)
1437		goto out_drv_init_failure;
1438
1439	bfa_sm_send_event(bfad, BFAD_E_CREATE);
1440
1441	if (bfa_sm_cmp_state(bfad, bfad_sm_uninit))
1442		goto out_bfad_sm_failure;
1443
1444	return 0;
1445
1446out_bfad_sm_failure:
1447	bfa_detach(&bfad->bfa);
1448	bfad_hal_mem_release(bfad);
1449out_drv_init_failure:
1450	mutex_lock(&bfad_mutex);
1451	bfad_inst--;
1452	list_del(&bfad->list_entry);
1453	mutex_unlock(&bfad_mutex);
1454	bfad_pci_uninit(pdev, bfad);
1455out_pci_init_failure:
1456	kfree(bfad->trcmod);
1457out_alloc_trace_failure:
1458	kfree(bfad);
1459out:
1460	return error;
1461}
1462
1463/**
1464 * PCI remove entry.
1465 */
1466void
1467bfad_pci_remove(struct pci_dev *pdev)
1468{
1469	struct bfad_s	      *bfad = pci_get_drvdata(pdev);
1470	unsigned long	flags;
1471
1472	bfa_trc(bfad, bfad->inst_no);
1473
1474	spin_lock_irqsave(&bfad->bfad_lock, flags);
1475	if (bfad->bfad_tsk != NULL) {
1476		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1477		kthread_stop(bfad->bfad_tsk);
1478	} else {
1479		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1480	}
1481
1482	/* Send Event BFAD_E_STOP */
1483	bfa_sm_send_event(bfad, BFAD_E_STOP);
1484
1485	/* Driver detach and dealloc mem */
1486	spin_lock_irqsave(&bfad->bfad_lock, flags);
1487	bfa_detach(&bfad->bfa);
1488	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1489	bfad_hal_mem_release(bfad);
1490
1491	/* Cleaning the BFAD instance */
1492	mutex_lock(&bfad_mutex);
1493	bfad_inst--;
1494	list_del(&bfad->list_entry);
1495	mutex_unlock(&bfad_mutex);
1496	bfad_pci_uninit(pdev, bfad);
1497
1498	kfree(bfad->trcmod);
1499	kfree(bfad);
1500}
1501
1502struct pci_device_id bfad_id_table[] = {
1503	{
1504		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1505		.device = BFA_PCI_DEVICE_ID_FC_8G2P,
1506		.subvendor = PCI_ANY_ID,
1507		.subdevice = PCI_ANY_ID,
1508	},
1509	{
1510		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1511		.device = BFA_PCI_DEVICE_ID_FC_8G1P,
1512		.subvendor = PCI_ANY_ID,
1513		.subdevice = PCI_ANY_ID,
1514	},
1515	{
1516		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1517		.device = BFA_PCI_DEVICE_ID_CT,
1518		.subvendor = PCI_ANY_ID,
1519		.subdevice = PCI_ANY_ID,
1520		.class = (PCI_CLASS_SERIAL_FIBER << 8),
1521		.class_mask = ~0,
1522	},
1523	{
1524		.vendor = BFA_PCI_VENDOR_ID_BROCADE,
1525		.device = BFA_PCI_DEVICE_ID_CT_FC,
1526		.subvendor = PCI_ANY_ID,
1527		.subdevice = PCI_ANY_ID,
1528		.class = (PCI_CLASS_SERIAL_FIBER << 8),
1529		.class_mask = ~0,
1530	},
1531
1532	{0, 0},
1533};
1534
1535MODULE_DEVICE_TABLE(pci, bfad_id_table);
1536
1537static struct pci_driver bfad_pci_driver = {
1538	.name = BFAD_DRIVER_NAME,
1539	.id_table = bfad_id_table,
1540	.probe = bfad_pci_probe,
1541	.remove = __devexit_p(bfad_pci_remove),
1542};
1543
1544/**
1545 * Driver module init.
1546 */
1547static int __init
1548bfad_init(void)
1549{
1550	int		error = 0;
1551
1552	printk(KERN_INFO "Brocade BFA FC/FCOE SCSI driver - version: %s\n",
1553			BFAD_DRIVER_VERSION);
1554
1555	if (num_sgpgs > 0)
1556		num_sgpgs_parm = num_sgpgs;
1557
1558	error = bfad_im_module_init();
1559	if (error) {
1560		error = -ENOMEM;
1561		printk(KERN_WARNING "bfad_im_module_init failure\n");
1562		goto ext;
1563	}
1564
1565	if (strcmp(FCPI_NAME, " fcpim") == 0)
1566		supported_fc4s |= BFA_LPORT_ROLE_FCP_IM;
1567
1568	bfa_ioc_auto_recover(ioc_auto_recover);
1569	bfa_fcs_rport_set_del_timeout(rport_del_timeout);
1570
1571	error = pci_register_driver(&bfad_pci_driver);
1572	if (error) {
1573		printk(KERN_WARNING "pci_register_driver failure\n");
1574		goto ext;
1575	}
1576
1577	return 0;
1578
1579ext:
1580	bfad_im_module_exit();
1581	return error;
1582}
1583
1584/**
1585 * Driver module exit.
1586 */
1587static void __exit
1588bfad_exit(void)
1589{
1590	pci_unregister_driver(&bfad_pci_driver);
1591	bfad_im_module_exit();
1592	bfad_free_fwimg();
1593}
1594
1595/* Firmware handling */
1596u32 *
1597bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
1598		u32 *bfi_image_size, char *fw_name)
1599{
1600	const struct firmware *fw;
1601
1602	if (request_firmware(&fw, fw_name, &pdev->dev)) {
1603		printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
1604		goto error;
1605	}
1606
1607	*bfi_image = vmalloc(fw->size);
1608	if (NULL == *bfi_image) {
1609		printk(KERN_ALERT "Fail to allocate buffer for fw image "
1610			"size=%x!\n", (u32) fw->size);
1611		goto error;
1612	}
1613
1614	memcpy(*bfi_image, fw->data, fw->size);
1615	*bfi_image_size = fw->size/sizeof(u32);
1616
1617	return *bfi_image;
1618
1619error:
1620	return NULL;
1621}
1622
1623u32 *
1624bfad_get_firmware_buf(struct pci_dev *pdev)
1625{
1626	if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
1627		if (bfi_image_ct_fc_size == 0)
1628			bfad_read_firmware(pdev, &bfi_image_ct_fc,
1629				&bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC);
1630		return bfi_image_ct_fc;
1631	} else if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
1632		if (bfi_image_ct_cna_size == 0)
1633			bfad_read_firmware(pdev, &bfi_image_ct_cna,
1634				&bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA);
1635		return bfi_image_ct_cna;
1636	} else {
1637		if (bfi_image_cb_fc_size == 0)
1638			bfad_read_firmware(pdev, &bfi_image_cb_fc,
1639				&bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC);
1640		return bfi_image_cb_fc;
1641	}
1642}
1643
1644module_init(bfad_init);
1645module_exit(bfad_exit);
1646MODULE_LICENSE("GPL");
1647MODULE_DESCRIPTION("Brocade Fibre Channel HBA Driver" BFAD_PROTO_NAME);
1648MODULE_AUTHOR("Brocade Communications Systems, Inc.");
1649MODULE_VERSION(BFAD_DRIVER_VERSION);
1650