qla_attr.c revision 08de2844c626511cfd1db9c36e5e7d126707f780
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c)  2003-2011 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8
9#include <linux/kthread.h>
10#include <linux/vmalloc.h>
11#include <linux/slab.h>
12#include <linux/delay.h>
13
14static int qla24xx_vport_disable(struct fc_vport *, bool);
15
16/* SYSFS attributes --------------------------------------------------------- */
17
18static ssize_t
19qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
20			   struct bin_attribute *bin_attr,
21			   char *buf, loff_t off, size_t count)
22{
23	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
24	    struct device, kobj)));
25	struct qla_hw_data *ha = vha->hw;
26	int rval = 0;
27
28	if (ha->fw_dump_reading == 0)
29		return 0;
30
31	if (IS_QLA82XX(ha)) {
32		if (off < ha->md_template_size) {
33			rval = memory_read_from_buffer(buf, count,
34			    &off, ha->md_tmplt_hdr, ha->md_template_size);
35			return rval;
36		}
37		off -= ha->md_template_size;
38		rval = memory_read_from_buffer(buf, count,
39		    &off, ha->md_dump, ha->md_dump_size);
40		return rval;
41	} else
42		return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
43					ha->fw_dump_len);
44}
45
46static ssize_t
47qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
48			    struct bin_attribute *bin_attr,
49			    char *buf, loff_t off, size_t count)
50{
51	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
52	    struct device, kobj)));
53	struct qla_hw_data *ha = vha->hw;
54	int reading;
55
56	if (off != 0)
57		return (0);
58
59	reading = simple_strtol(buf, NULL, 10);
60	switch (reading) {
61	case 0:
62		if (!ha->fw_dump_reading)
63			break;
64
65		ql_log(ql_log_info, vha, 0x705d,
66		    "Firmware dump cleared on (%ld).\n", vha->host_no);
67
68		if (IS_QLA82XX(vha->hw)) {
69			qla82xx_md_free(vha);
70			qla82xx_md_prep(vha);
71		}
72		ha->fw_dump_reading = 0;
73		ha->fw_dumped = 0;
74		break;
75	case 1:
76		if (ha->fw_dumped && !ha->fw_dump_reading) {
77			ha->fw_dump_reading = 1;
78
79			ql_log(ql_log_info, vha, 0x705e,
80			    "Raw firmware dump ready for read on (%ld).\n",
81			    vha->host_no);
82		}
83		break;
84	case 2:
85		qla2x00_alloc_fw_dump(vha);
86		break;
87	case 3:
88		if (IS_QLA82XX(ha)) {
89			qla82xx_idc_lock(ha);
90			qla82xx_set_reset_owner(vha);
91			qla82xx_idc_unlock(ha);
92		} else
93			qla2x00_system_error(vha);
94		break;
95	case 4:
96		if (IS_QLA82XX(ha)) {
97			if (ha->md_tmplt_hdr)
98				ql_dbg(ql_dbg_user, vha, 0x705b,
99				    "MiniDump supported with this firmware.\n");
100			else
101				ql_dbg(ql_dbg_user, vha, 0x709d,
102				    "MiniDump not supported with this firmware.\n");
103		}
104		break;
105	case 5:
106		if (IS_QLA82XX(ha))
107			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
108		break;
109	}
110	return (count);
111}
112
113static struct bin_attribute sysfs_fw_dump_attr = {
114	.attr = {
115		.name = "fw_dump",
116		.mode = S_IRUSR | S_IWUSR,
117	},
118	.size = 0,
119	.read = qla2x00_sysfs_read_fw_dump,
120	.write = qla2x00_sysfs_write_fw_dump,
121};
122
123static ssize_t
124qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
125			 struct bin_attribute *bin_attr,
126			 char *buf, loff_t off, size_t count)
127{
128	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
129	    struct device, kobj)));
130	struct qla_hw_data *ha = vha->hw;
131
132	if (!capable(CAP_SYS_ADMIN))
133		return 0;
134
135	if (IS_NOCACHE_VPD_TYPE(ha))
136		ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
137		    ha->nvram_size);
138	return memory_read_from_buffer(buf, count, &off, ha->nvram,
139					ha->nvram_size);
140}
141
142static ssize_t
143qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
144			  struct bin_attribute *bin_attr,
145			  char *buf, loff_t off, size_t count)
146{
147	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
148	    struct device, kobj)));
149	struct qla_hw_data *ha = vha->hw;
150	uint16_t	cnt;
151
152	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
153	    !ha->isp_ops->write_nvram)
154		return 0;
155
156	/* Checksum NVRAM. */
157	if (IS_FWI2_CAPABLE(ha)) {
158		uint32_t *iter;
159		uint32_t chksum;
160
161		iter = (uint32_t *)buf;
162		chksum = 0;
163		for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
164			chksum += le32_to_cpu(*iter++);
165		chksum = ~chksum + 1;
166		*iter = cpu_to_le32(chksum);
167	} else {
168		uint8_t *iter;
169		uint8_t chksum;
170
171		iter = (uint8_t *)buf;
172		chksum = 0;
173		for (cnt = 0; cnt < count - 1; cnt++)
174			chksum += *iter++;
175		chksum = ~chksum + 1;
176		*iter = chksum;
177	}
178
179	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
180		ql_log(ql_log_warn, vha, 0x705f,
181		    "HBA not online, failing NVRAM update.\n");
182		return -EAGAIN;
183	}
184
185	/* Write NVRAM. */
186	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
187	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
188	    count);
189
190	ql_dbg(ql_dbg_user, vha, 0x7060,
191	    "Setting ISP_ABORT_NEEDED\n");
192	/* NVRAM settings take effect immediately. */
193	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
194	qla2xxx_wake_dpc(vha);
195	qla2x00_wait_for_chip_reset(vha);
196
197	return (count);
198}
199
200static struct bin_attribute sysfs_nvram_attr = {
201	.attr = {
202		.name = "nvram",
203		.mode = S_IRUSR | S_IWUSR,
204	},
205	.size = 512,
206	.read = qla2x00_sysfs_read_nvram,
207	.write = qla2x00_sysfs_write_nvram,
208};
209
210static ssize_t
211qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
212			  struct bin_attribute *bin_attr,
213			  char *buf, loff_t off, size_t count)
214{
215	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
216	    struct device, kobj)));
217	struct qla_hw_data *ha = vha->hw;
218
219	if (ha->optrom_state != QLA_SREADING)
220		return 0;
221
222	return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
223					ha->optrom_region_size);
224}
225
226static ssize_t
227qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
228			   struct bin_attribute *bin_attr,
229			   char *buf, loff_t off, size_t count)
230{
231	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
232	    struct device, kobj)));
233	struct qla_hw_data *ha = vha->hw;
234
235	if (ha->optrom_state != QLA_SWRITING)
236		return -EINVAL;
237	if (off > ha->optrom_region_size)
238		return -ERANGE;
239	if (off + count > ha->optrom_region_size)
240		count = ha->optrom_region_size - off;
241
242	memcpy(&ha->optrom_buffer[off], buf, count);
243
244	return count;
245}
246
247static struct bin_attribute sysfs_optrom_attr = {
248	.attr = {
249		.name = "optrom",
250		.mode = S_IRUSR | S_IWUSR,
251	},
252	.size = 0,
253	.read = qla2x00_sysfs_read_optrom,
254	.write = qla2x00_sysfs_write_optrom,
255};
256
257static ssize_t
258qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
259			       struct bin_attribute *bin_attr,
260			       char *buf, loff_t off, size_t count)
261{
262	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
263	    struct device, kobj)));
264	struct qla_hw_data *ha = vha->hw;
265
266	uint32_t start = 0;
267	uint32_t size = ha->optrom_size;
268	int val, valid;
269
270	if (off)
271		return 0;
272
273	if (unlikely(pci_channel_offline(ha->pdev)))
274		return 0;
275
276	if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
277		return -EINVAL;
278	if (start > ha->optrom_size)
279		return -EINVAL;
280
281	switch (val) {
282	case 0:
283		if (ha->optrom_state != QLA_SREADING &&
284		    ha->optrom_state != QLA_SWRITING)
285			break;
286
287		ha->optrom_state = QLA_SWAITING;
288
289		ql_dbg(ql_dbg_user, vha, 0x7061,
290		    "Freeing flash region allocation -- 0x%x bytes.\n",
291		    ha->optrom_region_size);
292
293		vfree(ha->optrom_buffer);
294		ha->optrom_buffer = NULL;
295		break;
296	case 1:
297		if (ha->optrom_state != QLA_SWAITING)
298			break;
299
300		ha->optrom_region_start = start;
301		ha->optrom_region_size = start + size > ha->optrom_size ?
302		    ha->optrom_size - start : size;
303
304		ha->optrom_state = QLA_SREADING;
305		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
306		if (ha->optrom_buffer == NULL) {
307			ql_log(ql_log_warn, vha, 0x7062,
308			    "Unable to allocate memory for optrom retrieval "
309			    "(%x).\n", ha->optrom_region_size);
310
311			ha->optrom_state = QLA_SWAITING;
312			return count;
313		}
314
315		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
316			ql_log(ql_log_warn, vha, 0x7063,
317			    "HBA not online, failing NVRAM update.\n");
318			return -EAGAIN;
319		}
320
321		ql_dbg(ql_dbg_user, vha, 0x7064,
322		    "Reading flash region -- 0x%x/0x%x.\n",
323		    ha->optrom_region_start, ha->optrom_region_size);
324
325		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
326		ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
327		    ha->optrom_region_start, ha->optrom_region_size);
328		break;
329	case 2:
330		if (ha->optrom_state != QLA_SWAITING)
331			break;
332
333		/*
334		 * We need to be more restrictive on which FLASH regions are
335		 * allowed to be updated via user-space.  Regions accessible
336		 * via this method include:
337		 *
338		 * ISP21xx/ISP22xx/ISP23xx type boards:
339		 *
340		 * 	0x000000 -> 0x020000 -- Boot code.
341		 *
342		 * ISP2322/ISP24xx type boards:
343		 *
344		 * 	0x000000 -> 0x07ffff -- Boot code.
345		 * 	0x080000 -> 0x0fffff -- Firmware.
346		 *
347		 * ISP25xx type boards:
348		 *
349		 * 	0x000000 -> 0x07ffff -- Boot code.
350		 * 	0x080000 -> 0x0fffff -- Firmware.
351		 * 	0x120000 -> 0x12ffff -- VPD and HBA parameters.
352		 */
353		valid = 0;
354		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
355			valid = 1;
356		else if (start == (ha->flt_region_boot * 4) ||
357		    start == (ha->flt_region_fw * 4))
358			valid = 1;
359		else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
360			valid = 1;
361		if (!valid) {
362			ql_log(ql_log_warn, vha, 0x7065,
363			    "Invalid start region 0x%x/0x%x.\n", start, size);
364			return -EINVAL;
365		}
366
367		ha->optrom_region_start = start;
368		ha->optrom_region_size = start + size > ha->optrom_size ?
369		    ha->optrom_size - start : size;
370
371		ha->optrom_state = QLA_SWRITING;
372		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
373		if (ha->optrom_buffer == NULL) {
374			ql_log(ql_log_warn, vha, 0x7066,
375			    "Unable to allocate memory for optrom update "
376			    "(%x)\n", ha->optrom_region_size);
377
378			ha->optrom_state = QLA_SWAITING;
379			return count;
380		}
381
382		ql_dbg(ql_dbg_user, vha, 0x7067,
383		    "Staging flash region write -- 0x%x/0x%x.\n",
384		    ha->optrom_region_start, ha->optrom_region_size);
385
386		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
387		break;
388	case 3:
389		if (ha->optrom_state != QLA_SWRITING)
390			break;
391
392		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
393			ql_log(ql_log_warn, vha, 0x7068,
394			    "HBA not online, failing flash update.\n");
395			return -EAGAIN;
396		}
397
398		ql_dbg(ql_dbg_user, vha, 0x7069,
399		    "Writing flash region -- 0x%x/0x%x.\n",
400		    ha->optrom_region_start, ha->optrom_region_size);
401
402		ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
403		    ha->optrom_region_start, ha->optrom_region_size);
404		break;
405	default:
406		count = -EINVAL;
407	}
408	return count;
409}
410
411static struct bin_attribute sysfs_optrom_ctl_attr = {
412	.attr = {
413		.name = "optrom_ctl",
414		.mode = S_IWUSR,
415	},
416	.size = 0,
417	.write = qla2x00_sysfs_write_optrom_ctl,
418};
419
420static ssize_t
421qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
422		       struct bin_attribute *bin_attr,
423		       char *buf, loff_t off, size_t count)
424{
425	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
426	    struct device, kobj)));
427	struct qla_hw_data *ha = vha->hw;
428
429	if (unlikely(pci_channel_offline(ha->pdev)))
430		return 0;
431
432	if (!capable(CAP_SYS_ADMIN))
433		return 0;
434
435	if (IS_NOCACHE_VPD_TYPE(ha))
436		ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
437		    ha->vpd_size);
438	return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
439}
440
441static ssize_t
442qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
443			struct bin_attribute *bin_attr,
444			char *buf, loff_t off, size_t count)
445{
446	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
447	    struct device, kobj)));
448	struct qla_hw_data *ha = vha->hw;
449	uint8_t *tmp_data;
450
451	if (unlikely(pci_channel_offline(ha->pdev)))
452		return 0;
453
454	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
455	    !ha->isp_ops->write_nvram)
456		return 0;
457
458	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
459		ql_log(ql_log_warn, vha, 0x706a,
460		    "HBA not online, failing VPD update.\n");
461		return -EAGAIN;
462	}
463
464	/* Write NVRAM. */
465	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
466	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
467
468	/* Update flash version information for 4Gb & above. */
469	if (!IS_FWI2_CAPABLE(ha))
470		goto done;
471
472	tmp_data = vmalloc(256);
473	if (!tmp_data) {
474		ql_log(ql_log_warn, vha, 0x706b,
475		    "Unable to allocate memory for VPD information update.\n");
476		goto done;
477	}
478	ha->isp_ops->get_flash_version(vha, tmp_data);
479	vfree(tmp_data);
480done:
481	return count;
482}
483
484static struct bin_attribute sysfs_vpd_attr = {
485	.attr = {
486		.name = "vpd",
487		.mode = S_IRUSR | S_IWUSR,
488	},
489	.size = 0,
490	.read = qla2x00_sysfs_read_vpd,
491	.write = qla2x00_sysfs_write_vpd,
492};
493
494static ssize_t
495qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
496		       struct bin_attribute *bin_attr,
497		       char *buf, loff_t off, size_t count)
498{
499	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
500	    struct device, kobj)));
501	struct qla_hw_data *ha = vha->hw;
502	uint16_t iter, addr, offset;
503	int rval;
504
505	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
506		return 0;
507
508	if (ha->sfp_data)
509		goto do_read;
510
511	ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
512	    &ha->sfp_data_dma);
513	if (!ha->sfp_data) {
514		ql_log(ql_log_warn, vha, 0x706c,
515		    "Unable to allocate memory for SFP read-data.\n");
516		return 0;
517	}
518
519do_read:
520	memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
521	addr = 0xa0;
522	for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
523	    iter++, offset += SFP_BLOCK_SIZE) {
524		if (iter == 4) {
525			/* Skip to next device address. */
526			addr = 0xa2;
527			offset = 0;
528		}
529
530		rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
531		    addr, offset, SFP_BLOCK_SIZE, 0);
532		if (rval != QLA_SUCCESS) {
533			ql_log(ql_log_warn, vha, 0x706d,
534			    "Unable to read SFP data (%x/%x/%x).\n", rval,
535			    addr, offset);
536
537			count = 0;
538			break;
539		}
540		memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
541		buf += SFP_BLOCK_SIZE;
542	}
543
544	return count;
545}
546
547static struct bin_attribute sysfs_sfp_attr = {
548	.attr = {
549		.name = "sfp",
550		.mode = S_IRUSR | S_IWUSR,
551	},
552	.size = SFP_DEV_SIZE * 2,
553	.read = qla2x00_sysfs_read_sfp,
554};
555
556static ssize_t
557qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
558			struct bin_attribute *bin_attr,
559			char *buf, loff_t off, size_t count)
560{
561	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
562	    struct device, kobj)));
563	struct qla_hw_data *ha = vha->hw;
564	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
565	int type;
566
567	if (off != 0)
568		return 0;
569
570	type = simple_strtol(buf, NULL, 10);
571	switch (type) {
572	case 0x2025c:
573		ql_log(ql_log_info, vha, 0x706e,
574		    "Issuing ISP reset.\n");
575
576		scsi_block_requests(vha->host);
577		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
578		if (IS_QLA82XX(ha)) {
579			qla82xx_idc_lock(ha);
580			qla82xx_set_reset_owner(vha);
581			qla82xx_idc_unlock(ha);
582		}
583		qla2xxx_wake_dpc(vha);
584		qla2x00_wait_for_chip_reset(vha);
585		scsi_unblock_requests(vha->host);
586		break;
587	case 0x2025d:
588		if (!IS_QLA81XX(ha))
589			break;
590
591		ql_log(ql_log_info, vha, 0x706f,
592		    "Issuing MPI reset.\n");
593
594		/* Make sure FC side is not in reset */
595		qla2x00_wait_for_hba_online(vha);
596
597		/* Issue MPI reset */
598		scsi_block_requests(vha->host);
599		if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
600			ql_log(ql_log_warn, vha, 0x7070,
601			    "MPI reset failed.\n");
602		scsi_unblock_requests(vha->host);
603		break;
604	case 0x2025e:
605		if (!IS_QLA82XX(ha) || vha != base_vha) {
606			ql_log(ql_log_info, vha, 0x7071,
607			    "FCoE ctx reset no supported.\n");
608			return count;
609		}
610
611		ql_log(ql_log_info, vha, 0x7072,
612		    "Issuing FCoE ctx reset.\n");
613		set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
614		qla2xxx_wake_dpc(vha);
615		qla2x00_wait_for_fcoe_ctx_reset(vha);
616		break;
617	}
618	return count;
619}
620
621static struct bin_attribute sysfs_reset_attr = {
622	.attr = {
623		.name = "reset",
624		.mode = S_IWUSR,
625	},
626	.size = 0,
627	.write = qla2x00_sysfs_write_reset,
628};
629
630static ssize_t
631qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
632			struct bin_attribute *bin_attr,
633			char *buf, loff_t off, size_t count)
634{
635	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
636	    struct device, kobj)));
637	struct qla_hw_data *ha = vha->hw;
638	uint16_t dev, adr, opt, len;
639	int rval;
640
641	ha->edc_data_len = 0;
642
643	if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
644		return 0;
645
646	if (!ha->edc_data) {
647		ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
648		    &ha->edc_data_dma);
649		if (!ha->edc_data) {
650			ql_log(ql_log_warn, vha, 0x7073,
651			    "Unable to allocate memory for EDC write.\n");
652			return 0;
653		}
654	}
655
656	dev = le16_to_cpup((void *)&buf[0]);
657	adr = le16_to_cpup((void *)&buf[2]);
658	opt = le16_to_cpup((void *)&buf[4]);
659	len = le16_to_cpup((void *)&buf[6]);
660
661	if (!(opt & BIT_0))
662		if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
663			return -EINVAL;
664
665	memcpy(ha->edc_data, &buf[8], len);
666
667	rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
668	    dev, adr, len, opt);
669	if (rval != QLA_SUCCESS) {
670		ql_log(ql_log_warn, vha, 0x7074,
671		    "Unable to write EDC (%x) %02x:%04x:%02x:%02x\n",
672		    rval, dev, adr, opt, len, buf[8]);
673		return 0;
674	}
675
676	return count;
677}
678
679static struct bin_attribute sysfs_edc_attr = {
680	.attr = {
681		.name = "edc",
682		.mode = S_IWUSR,
683	},
684	.size = 0,
685	.write = qla2x00_sysfs_write_edc,
686};
687
688static ssize_t
689qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
690			struct bin_attribute *bin_attr,
691			char *buf, loff_t off, size_t count)
692{
693	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
694	    struct device, kobj)));
695	struct qla_hw_data *ha = vha->hw;
696	uint16_t dev, adr, opt, len;
697	int rval;
698
699	ha->edc_data_len = 0;
700
701	if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
702		return 0;
703
704	if (!ha->edc_data) {
705		ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
706		    &ha->edc_data_dma);
707		if (!ha->edc_data) {
708			ql_log(ql_log_warn, vha, 0x708c,
709			    "Unable to allocate memory for EDC status.\n");
710			return 0;
711		}
712	}
713
714	dev = le16_to_cpup((void *)&buf[0]);
715	adr = le16_to_cpup((void *)&buf[2]);
716	opt = le16_to_cpup((void *)&buf[4]);
717	len = le16_to_cpup((void *)&buf[6]);
718
719	if (!(opt & BIT_0))
720		if (len == 0 || len > DMA_POOL_SIZE)
721			return -EINVAL;
722
723	memset(ha->edc_data, 0, len);
724	rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
725			dev, adr, len, opt);
726	if (rval != QLA_SUCCESS) {
727		ql_log(ql_log_info, vha, 0x7075,
728		    "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
729		    rval, dev, adr, opt, len);
730		return 0;
731	}
732
733	ha->edc_data_len = len;
734
735	return count;
736}
737
738static ssize_t
739qla2x00_sysfs_read_edc_status(struct file *filp, struct kobject *kobj,
740			   struct bin_attribute *bin_attr,
741			   char *buf, loff_t off, size_t count)
742{
743	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
744	    struct device, kobj)));
745	struct qla_hw_data *ha = vha->hw;
746
747	if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
748		return 0;
749
750	if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
751		return -EINVAL;
752
753	memcpy(buf, ha->edc_data, ha->edc_data_len);
754
755	return ha->edc_data_len;
756}
757
758static struct bin_attribute sysfs_edc_status_attr = {
759	.attr = {
760		.name = "edc_status",
761		.mode = S_IRUSR | S_IWUSR,
762	},
763	.size = 0,
764	.write = qla2x00_sysfs_write_edc_status,
765	.read = qla2x00_sysfs_read_edc_status,
766};
767
768static ssize_t
769qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
770		       struct bin_attribute *bin_attr,
771		       char *buf, loff_t off, size_t count)
772{
773	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
774	    struct device, kobj)));
775	struct qla_hw_data *ha = vha->hw;
776	int rval;
777	uint16_t actual_size;
778
779	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
780		return 0;
781
782	if (ha->xgmac_data)
783		goto do_read;
784
785	ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
786	    &ha->xgmac_data_dma, GFP_KERNEL);
787	if (!ha->xgmac_data) {
788		ql_log(ql_log_warn, vha, 0x7076,
789		    "Unable to allocate memory for XGMAC read-data.\n");
790		return 0;
791	}
792
793do_read:
794	actual_size = 0;
795	memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
796
797	rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
798	    XGMAC_DATA_SIZE, &actual_size);
799	if (rval != QLA_SUCCESS) {
800		ql_log(ql_log_warn, vha, 0x7077,
801		    "Unable to read XGMAC data (%x).\n", rval);
802		count = 0;
803	}
804
805	count = actual_size > count ? count: actual_size;
806	memcpy(buf, ha->xgmac_data, count);
807
808	return count;
809}
810
811static struct bin_attribute sysfs_xgmac_stats_attr = {
812	.attr = {
813		.name = "xgmac_stats",
814		.mode = S_IRUSR,
815	},
816	.size = 0,
817	.read = qla2x00_sysfs_read_xgmac_stats,
818};
819
820static ssize_t
821qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
822		       struct bin_attribute *bin_attr,
823		       char *buf, loff_t off, size_t count)
824{
825	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
826	    struct device, kobj)));
827	struct qla_hw_data *ha = vha->hw;
828	int rval;
829	uint16_t actual_size;
830
831	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
832		return 0;
833
834	if (ha->dcbx_tlv)
835		goto do_read;
836
837	ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
838	    &ha->dcbx_tlv_dma, GFP_KERNEL);
839	if (!ha->dcbx_tlv) {
840		ql_log(ql_log_warn, vha, 0x7078,
841		    "Unable to allocate memory for DCBX TLV read-data.\n");
842		return 0;
843	}
844
845do_read:
846	actual_size = 0;
847	memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
848
849	rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
850	    DCBX_TLV_DATA_SIZE);
851	if (rval != QLA_SUCCESS) {
852		ql_log(ql_log_warn, vha, 0x7079,
853		    "Unable to read DCBX TLV (%x).\n", rval);
854		count = 0;
855	}
856
857	memcpy(buf, ha->dcbx_tlv, count);
858
859	return count;
860}
861
862static struct bin_attribute sysfs_dcbx_tlv_attr = {
863	.attr = {
864		.name = "dcbx_tlv",
865		.mode = S_IRUSR,
866	},
867	.size = 0,
868	.read = qla2x00_sysfs_read_dcbx_tlv,
869};
870
871static struct sysfs_entry {
872	char *name;
873	struct bin_attribute *attr;
874	int is4GBp_only;
875} bin_file_entries[] = {
876	{ "fw_dump", &sysfs_fw_dump_attr, },
877	{ "nvram", &sysfs_nvram_attr, },
878	{ "optrom", &sysfs_optrom_attr, },
879	{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
880	{ "vpd", &sysfs_vpd_attr, 1 },
881	{ "sfp", &sysfs_sfp_attr, 1 },
882	{ "reset", &sysfs_reset_attr, },
883	{ "edc", &sysfs_edc_attr, 2 },
884	{ "edc_status", &sysfs_edc_status_attr, 2 },
885	{ "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
886	{ "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
887	{ NULL },
888};
889
890void
891qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
892{
893	struct Scsi_Host *host = vha->host;
894	struct sysfs_entry *iter;
895	int ret;
896
897	for (iter = bin_file_entries; iter->name; iter++) {
898		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
899			continue;
900		if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
901			continue;
902		if (iter->is4GBp_only == 3 && !(IS_QLA8XXX_TYPE(vha->hw)))
903			continue;
904
905		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
906		    iter->attr);
907		if (ret)
908			ql_log(ql_log_warn, vha, 0x00f3,
909			    "Unable to create sysfs %s binary attribute (%d).\n",
910			    iter->name, ret);
911		else
912			ql_dbg(ql_dbg_init, vha, 0x00f4,
913			    "Successfully created sysfs %s binary attribure.\n",
914			    iter->name);
915	}
916}
917
918void
919qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
920{
921	struct Scsi_Host *host = vha->host;
922	struct sysfs_entry *iter;
923	struct qla_hw_data *ha = vha->hw;
924
925	for (iter = bin_file_entries; iter->name; iter++) {
926		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
927			continue;
928		if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
929			continue;
930		if (iter->is4GBp_only == 3 && !!(IS_QLA8XXX_TYPE(vha->hw)))
931			continue;
932
933		sysfs_remove_bin_file(&host->shost_gendev.kobj,
934		    iter->attr);
935	}
936
937	if (ha->beacon_blink_led == 1)
938		ha->isp_ops->beacon_off(vha);
939}
940
941/* Scsi_Host attributes. */
942
943static ssize_t
944qla2x00_drvr_version_show(struct device *dev,
945			  struct device_attribute *attr, char *buf)
946{
947	return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
948}
949
950static ssize_t
951qla2x00_fw_version_show(struct device *dev,
952			struct device_attribute *attr, char *buf)
953{
954	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
955	struct qla_hw_data *ha = vha->hw;
956	char fw_str[128];
957
958	return snprintf(buf, PAGE_SIZE, "%s\n",
959	    ha->isp_ops->fw_version_str(vha, fw_str));
960}
961
962static ssize_t
963qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
964			char *buf)
965{
966	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
967	struct qla_hw_data *ha = vha->hw;
968	uint32_t sn;
969
970	if (IS_FWI2_CAPABLE(ha)) {
971		qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
972		return snprintf(buf, PAGE_SIZE, "%s\n", buf);
973	}
974
975	sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
976	return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
977	    sn % 100000);
978}
979
980static ssize_t
981qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
982		      char *buf)
983{
984	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
985	return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
986}
987
988static ssize_t
989qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
990		    char *buf)
991{
992	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
993	struct qla_hw_data *ha = vha->hw;
994	return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
995	    ha->product_id[0], ha->product_id[1], ha->product_id[2],
996	    ha->product_id[3]);
997}
998
999static ssize_t
1000qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
1001			char *buf)
1002{
1003	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1004	return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
1005}
1006
1007static ssize_t
1008qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
1009			char *buf)
1010{
1011	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1012	return snprintf(buf, PAGE_SIZE, "%s\n",
1013	    vha->hw->model_desc ? vha->hw->model_desc : "");
1014}
1015
1016static ssize_t
1017qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
1018		      char *buf)
1019{
1020	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1021	char pci_info[30];
1022
1023	return snprintf(buf, PAGE_SIZE, "%s\n",
1024	    vha->hw->isp_ops->pci_info_str(vha, pci_info));
1025}
1026
1027static ssize_t
1028qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
1029			char *buf)
1030{
1031	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1032	struct qla_hw_data *ha = vha->hw;
1033	int len = 0;
1034
1035	if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
1036	    atomic_read(&vha->loop_state) == LOOP_DEAD ||
1037	    vha->device_flags & DFLG_NO_CABLE)
1038		len = snprintf(buf, PAGE_SIZE, "Link Down\n");
1039	else if (atomic_read(&vha->loop_state) != LOOP_READY ||
1040	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1041	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1042		len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
1043	else {
1044		len = snprintf(buf, PAGE_SIZE, "Link Up - ");
1045
1046		switch (ha->current_topology) {
1047		case ISP_CFG_NL:
1048			len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1049			break;
1050		case ISP_CFG_FL:
1051			len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
1052			break;
1053		case ISP_CFG_N:
1054			len += snprintf(buf + len, PAGE_SIZE-len,
1055			    "N_Port to N_Port\n");
1056			break;
1057		case ISP_CFG_F:
1058			len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
1059			break;
1060		default:
1061			len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1062			break;
1063		}
1064	}
1065	return len;
1066}
1067
1068static ssize_t
1069qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1070		 char *buf)
1071{
1072	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1073	int len = 0;
1074
1075	switch (vha->hw->zio_mode) {
1076	case QLA_ZIO_MODE_6:
1077		len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1078		break;
1079	case QLA_ZIO_DISABLED:
1080		len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1081		break;
1082	}
1083	return len;
1084}
1085
1086static ssize_t
1087qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1088		  const char *buf, size_t count)
1089{
1090	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1091	struct qla_hw_data *ha = vha->hw;
1092	int val = 0;
1093	uint16_t zio_mode;
1094
1095	if (!IS_ZIO_SUPPORTED(ha))
1096		return -ENOTSUPP;
1097
1098	if (sscanf(buf, "%d", &val) != 1)
1099		return -EINVAL;
1100
1101	if (val)
1102		zio_mode = QLA_ZIO_MODE_6;
1103	else
1104		zio_mode = QLA_ZIO_DISABLED;
1105
1106	/* Update per-hba values and queue a reset. */
1107	if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1108		ha->zio_mode = zio_mode;
1109		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1110	}
1111	return strlen(buf);
1112}
1113
1114static ssize_t
1115qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1116		       char *buf)
1117{
1118	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1119
1120	return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1121}
1122
1123static ssize_t
1124qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1125			const char *buf, size_t count)
1126{
1127	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1128	int val = 0;
1129	uint16_t zio_timer;
1130
1131	if (sscanf(buf, "%d", &val) != 1)
1132		return -EINVAL;
1133	if (val > 25500 || val < 100)
1134		return -ERANGE;
1135
1136	zio_timer = (uint16_t)(val / 100);
1137	vha->hw->zio_timer = zio_timer;
1138
1139	return strlen(buf);
1140}
1141
1142static ssize_t
1143qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1144		    char *buf)
1145{
1146	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1147	int len = 0;
1148
1149	if (vha->hw->beacon_blink_led)
1150		len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1151	else
1152		len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1153	return len;
1154}
1155
1156static ssize_t
1157qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1158		     const char *buf, size_t count)
1159{
1160	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1161	struct qla_hw_data *ha = vha->hw;
1162	int val = 0;
1163	int rval;
1164
1165	if (IS_QLA2100(ha) || IS_QLA2200(ha))
1166		return -EPERM;
1167
1168	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
1169		ql_log(ql_log_warn, vha, 0x707a,
1170		    "Abort ISP active -- ignoring beacon request.\n");
1171		return -EBUSY;
1172	}
1173
1174	if (sscanf(buf, "%d", &val) != 1)
1175		return -EINVAL;
1176
1177	if (val)
1178		rval = ha->isp_ops->beacon_on(vha);
1179	else
1180		rval = ha->isp_ops->beacon_off(vha);
1181
1182	if (rval != QLA_SUCCESS)
1183		count = 0;
1184
1185	return count;
1186}
1187
1188static ssize_t
1189qla2x00_optrom_bios_version_show(struct device *dev,
1190				 struct device_attribute *attr, char *buf)
1191{
1192	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1193	struct qla_hw_data *ha = vha->hw;
1194	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1195	    ha->bios_revision[0]);
1196}
1197
1198static ssize_t
1199qla2x00_optrom_efi_version_show(struct device *dev,
1200				struct device_attribute *attr, char *buf)
1201{
1202	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1203	struct qla_hw_data *ha = vha->hw;
1204	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1205	    ha->efi_revision[0]);
1206}
1207
1208static ssize_t
1209qla2x00_optrom_fcode_version_show(struct device *dev,
1210				  struct device_attribute *attr, char *buf)
1211{
1212	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1213	struct qla_hw_data *ha = vha->hw;
1214	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1215	    ha->fcode_revision[0]);
1216}
1217
1218static ssize_t
1219qla2x00_optrom_fw_version_show(struct device *dev,
1220			       struct device_attribute *attr, char *buf)
1221{
1222	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1223	struct qla_hw_data *ha = vha->hw;
1224	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1225	    ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1226	    ha->fw_revision[3]);
1227}
1228
1229static ssize_t
1230qla2x00_optrom_gold_fw_version_show(struct device *dev,
1231    struct device_attribute *attr, char *buf)
1232{
1233	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1234	struct qla_hw_data *ha = vha->hw;
1235
1236	if (!IS_QLA81XX(ha))
1237		return snprintf(buf, PAGE_SIZE, "\n");
1238
1239	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1240	    ha->gold_fw_version[0], ha->gold_fw_version[1],
1241	    ha->gold_fw_version[2], ha->gold_fw_version[3]);
1242}
1243
1244static ssize_t
1245qla2x00_total_isp_aborts_show(struct device *dev,
1246			      struct device_attribute *attr, char *buf)
1247{
1248	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1249	struct qla_hw_data *ha = vha->hw;
1250	return snprintf(buf, PAGE_SIZE, "%d\n",
1251	    ha->qla_stats.total_isp_aborts);
1252}
1253
1254static ssize_t
1255qla24xx_84xx_fw_version_show(struct device *dev,
1256	struct device_attribute *attr, char *buf)
1257{
1258	int rval = QLA_SUCCESS;
1259	uint16_t status[2] = {0, 0};
1260	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1261	struct qla_hw_data *ha = vha->hw;
1262
1263	if (!IS_QLA84XX(ha))
1264		return snprintf(buf, PAGE_SIZE, "\n");
1265
1266	if (ha->cs84xx->op_fw_version == 0)
1267		rval = qla84xx_verify_chip(vha, status);
1268
1269	if ((rval == QLA_SUCCESS) && (status[0] == 0))
1270		return snprintf(buf, PAGE_SIZE, "%u\n",
1271			(uint32_t)ha->cs84xx->op_fw_version);
1272
1273	return snprintf(buf, PAGE_SIZE, "\n");
1274}
1275
1276static ssize_t
1277qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1278    char *buf)
1279{
1280	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1281	struct qla_hw_data *ha = vha->hw;
1282
1283	if (!IS_QLA81XX(ha))
1284		return snprintf(buf, PAGE_SIZE, "\n");
1285
1286	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1287	    ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1288	    ha->mpi_capabilities);
1289}
1290
1291static ssize_t
1292qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1293    char *buf)
1294{
1295	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1296	struct qla_hw_data *ha = vha->hw;
1297
1298	if (!IS_QLA81XX(ha))
1299		return snprintf(buf, PAGE_SIZE, "\n");
1300
1301	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1302	    ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1303}
1304
1305static ssize_t
1306qla2x00_flash_block_size_show(struct device *dev,
1307			      struct device_attribute *attr, char *buf)
1308{
1309	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1310	struct qla_hw_data *ha = vha->hw;
1311
1312	return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1313}
1314
1315static ssize_t
1316qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1317    char *buf)
1318{
1319	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1320
1321	if (!IS_QLA8XXX_TYPE(vha->hw))
1322		return snprintf(buf, PAGE_SIZE, "\n");
1323
1324	return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1325}
1326
1327static ssize_t
1328qla2x00_vn_port_mac_address_show(struct device *dev,
1329    struct device_attribute *attr, char *buf)
1330{
1331	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1332
1333	if (!IS_QLA8XXX_TYPE(vha->hw))
1334		return snprintf(buf, PAGE_SIZE, "\n");
1335
1336	return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1337	    vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1338	    vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1339	    vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1340}
1341
1342static ssize_t
1343qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1344    char *buf)
1345{
1346	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1347
1348	return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1349}
1350
1351static ssize_t
1352qla2x00_thermal_temp_show(struct device *dev,
1353	struct device_attribute *attr, char *buf)
1354{
1355	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1356	int rval = QLA_FUNCTION_FAILED;
1357	uint16_t temp, frac;
1358
1359	if (!vha->hw->flags.thermal_supported)
1360		return snprintf(buf, PAGE_SIZE, "\n");
1361
1362	temp = frac = 0;
1363	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1364	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1365		ql_log(ql_log_warn, vha, 0x707b,
1366		    "ISP reset active.\n");
1367	else if (!vha->hw->flags.eeh_busy)
1368		rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
1369	if (rval != QLA_SUCCESS)
1370		temp = frac = 0;
1371
1372	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
1373}
1374
1375static ssize_t
1376qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1377    char *buf)
1378{
1379	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1380	int rval = QLA_FUNCTION_FAILED;
1381	uint16_t state[5];
1382
1383	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1384		test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1385		ql_log(ql_log_warn, vha, 0x707c,
1386		    "ISP reset active.\n");
1387	else if (!vha->hw->flags.eeh_busy)
1388		rval = qla2x00_get_firmware_state(vha, state);
1389	if (rval != QLA_SUCCESS)
1390		memset(state, -1, sizeof(state));
1391
1392	return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
1393	    state[1], state[2], state[3], state[4]);
1394}
1395
1396static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1397static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1398static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
1399static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
1400static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
1401static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
1402static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
1403static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
1404static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
1405static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
1406static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
1407		   qla2x00_zio_timer_store);
1408static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
1409		   qla2x00_beacon_store);
1410static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
1411		   qla2x00_optrom_bios_version_show, NULL);
1412static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
1413		   qla2x00_optrom_efi_version_show, NULL);
1414static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1415		   qla2x00_optrom_fcode_version_show, NULL);
1416static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1417		   NULL);
1418static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
1419    qla2x00_optrom_gold_fw_version_show, NULL);
1420static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
1421		   NULL);
1422static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
1423		   NULL);
1424static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
1425static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
1426static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1427		   NULL);
1428static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1429static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1430		   qla2x00_vn_port_mac_address_show, NULL);
1431static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1432static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1433static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
1434
1435struct device_attribute *qla2x00_host_attrs[] = {
1436	&dev_attr_driver_version,
1437	&dev_attr_fw_version,
1438	&dev_attr_serial_num,
1439	&dev_attr_isp_name,
1440	&dev_attr_isp_id,
1441	&dev_attr_model_name,
1442	&dev_attr_model_desc,
1443	&dev_attr_pci_info,
1444	&dev_attr_link_state,
1445	&dev_attr_zio,
1446	&dev_attr_zio_timer,
1447	&dev_attr_beacon,
1448	&dev_attr_optrom_bios_version,
1449	&dev_attr_optrom_efi_version,
1450	&dev_attr_optrom_fcode_version,
1451	&dev_attr_optrom_fw_version,
1452	&dev_attr_84xx_fw_version,
1453	&dev_attr_total_isp_aborts,
1454	&dev_attr_mpi_version,
1455	&dev_attr_phy_version,
1456	&dev_attr_flash_block_size,
1457	&dev_attr_vlan_id,
1458	&dev_attr_vn_port_mac_address,
1459	&dev_attr_fabric_param,
1460	&dev_attr_fw_state,
1461	&dev_attr_optrom_gold_fw_version,
1462	&dev_attr_thermal_temp,
1463	NULL,
1464};
1465
1466/* Host attributes. */
1467
1468static void
1469qla2x00_get_host_port_id(struct Scsi_Host *shost)
1470{
1471	scsi_qla_host_t *vha = shost_priv(shost);
1472
1473	fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
1474	    vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
1475}
1476
1477static void
1478qla2x00_get_host_speed(struct Scsi_Host *shost)
1479{
1480	struct qla_hw_data *ha = ((struct scsi_qla_host *)
1481					(shost_priv(shost)))->hw;
1482	u32 speed = FC_PORTSPEED_UNKNOWN;
1483
1484	switch (ha->link_data_rate) {
1485	case PORT_SPEED_1GB:
1486		speed = FC_PORTSPEED_1GBIT;
1487		break;
1488	case PORT_SPEED_2GB:
1489		speed = FC_PORTSPEED_2GBIT;
1490		break;
1491	case PORT_SPEED_4GB:
1492		speed = FC_PORTSPEED_4GBIT;
1493		break;
1494	case PORT_SPEED_8GB:
1495		speed = FC_PORTSPEED_8GBIT;
1496		break;
1497	case PORT_SPEED_10GB:
1498		speed = FC_PORTSPEED_10GBIT;
1499		break;
1500	}
1501	fc_host_speed(shost) = speed;
1502}
1503
1504static void
1505qla2x00_get_host_port_type(struct Scsi_Host *shost)
1506{
1507	scsi_qla_host_t *vha = shost_priv(shost);
1508	uint32_t port_type = FC_PORTTYPE_UNKNOWN;
1509
1510	if (vha->vp_idx) {
1511		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
1512		return;
1513	}
1514	switch (vha->hw->current_topology) {
1515	case ISP_CFG_NL:
1516		port_type = FC_PORTTYPE_LPORT;
1517		break;
1518	case ISP_CFG_FL:
1519		port_type = FC_PORTTYPE_NLPORT;
1520		break;
1521	case ISP_CFG_N:
1522		port_type = FC_PORTTYPE_PTP;
1523		break;
1524	case ISP_CFG_F:
1525		port_type = FC_PORTTYPE_NPORT;
1526		break;
1527	}
1528	fc_host_port_type(shost) = port_type;
1529}
1530
1531static void
1532qla2x00_get_starget_node_name(struct scsi_target *starget)
1533{
1534	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1535	scsi_qla_host_t *vha = shost_priv(host);
1536	fc_port_t *fcport;
1537	u64 node_name = 0;
1538
1539	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1540		if (fcport->rport &&
1541		    starget->id == fcport->rport->scsi_target_id) {
1542			node_name = wwn_to_u64(fcport->node_name);
1543			break;
1544		}
1545	}
1546
1547	fc_starget_node_name(starget) = node_name;
1548}
1549
1550static void
1551qla2x00_get_starget_port_name(struct scsi_target *starget)
1552{
1553	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1554	scsi_qla_host_t *vha = shost_priv(host);
1555	fc_port_t *fcport;
1556	u64 port_name = 0;
1557
1558	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1559		if (fcport->rport &&
1560		    starget->id == fcport->rport->scsi_target_id) {
1561			port_name = wwn_to_u64(fcport->port_name);
1562			break;
1563		}
1564	}
1565
1566	fc_starget_port_name(starget) = port_name;
1567}
1568
1569static void
1570qla2x00_get_starget_port_id(struct scsi_target *starget)
1571{
1572	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1573	scsi_qla_host_t *vha = shost_priv(host);
1574	fc_port_t *fcport;
1575	uint32_t port_id = ~0U;
1576
1577	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1578		if (fcport->rport &&
1579		    starget->id == fcport->rport->scsi_target_id) {
1580			port_id = fcport->d_id.b.domain << 16 |
1581			    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
1582			break;
1583		}
1584	}
1585
1586	fc_starget_port_id(starget) = port_id;
1587}
1588
1589static void
1590qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1591{
1592	if (timeout)
1593		rport->dev_loss_tmo = timeout;
1594	else
1595		rport->dev_loss_tmo = 1;
1596}
1597
1598static void
1599qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1600{
1601	struct Scsi_Host *host = rport_to_shost(rport);
1602	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1603	unsigned long flags;
1604
1605	if (!fcport)
1606		return;
1607
1608	/* Now that the rport has been deleted, set the fcport state to
1609	   FCS_DEVICE_DEAD */
1610	qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
1611
1612	/*
1613	 * Transport has effectively 'deleted' the rport, clear
1614	 * all local references.
1615	 */
1616	spin_lock_irqsave(host->host_lock, flags);
1617	fcport->rport = fcport->drport = NULL;
1618	*((fc_port_t **)rport->dd_data) = NULL;
1619	spin_unlock_irqrestore(host->host_lock, flags);
1620
1621	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1622		return;
1623
1624	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1625		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1626		return;
1627	}
1628}
1629
1630static void
1631qla2x00_terminate_rport_io(struct fc_rport *rport)
1632{
1633	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1634
1635	if (!fcport)
1636		return;
1637
1638	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1639		return;
1640
1641	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1642		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1643		return;
1644	}
1645	/*
1646	 * At this point all fcport's software-states are cleared.  Perform any
1647	 * final cleanup of firmware resources (PCBs and XCBs).
1648	 */
1649	if (fcport->loop_id != FC_NO_LOOP_ID &&
1650	    !test_bit(UNLOADING, &fcport->vha->dpc_flags))
1651		fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1652			fcport->loop_id, fcport->d_id.b.domain,
1653			fcport->d_id.b.area, fcport->d_id.b.al_pa);
1654}
1655
1656static int
1657qla2x00_issue_lip(struct Scsi_Host *shost)
1658{
1659	scsi_qla_host_t *vha = shost_priv(shost);
1660
1661	qla2x00_loop_reset(vha);
1662	return 0;
1663}
1664
1665static struct fc_host_statistics *
1666qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1667{
1668	scsi_qla_host_t *vha = shost_priv(shost);
1669	struct qla_hw_data *ha = vha->hw;
1670	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1671	int rval;
1672	struct link_statistics *stats;
1673	dma_addr_t stats_dma;
1674	struct fc_host_statistics *pfc_host_stat;
1675
1676	pfc_host_stat = &ha->fc_host_stat;
1677	memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1678
1679	if (test_bit(UNLOADING, &vha->dpc_flags))
1680		goto done;
1681
1682	if (unlikely(pci_channel_offline(ha->pdev)))
1683		goto done;
1684
1685	stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1686	if (stats == NULL) {
1687		ql_log(ql_log_warn, vha, 0x707d,
1688		    "Failed to allocate memory for stats.\n");
1689		goto done;
1690	}
1691	memset(stats, 0, DMA_POOL_SIZE);
1692
1693	rval = QLA_FUNCTION_FAILED;
1694	if (IS_FWI2_CAPABLE(ha)) {
1695		rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
1696	} else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1697		    !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) &&
1698		    !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
1699		    !ha->dpc_active) {
1700		/* Must be in a 'READY' state for statistics retrieval. */
1701		rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
1702						stats, stats_dma);
1703	}
1704
1705	if (rval != QLA_SUCCESS)
1706		goto done_free;
1707
1708	pfc_host_stat->link_failure_count = stats->link_fail_cnt;
1709	pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
1710	pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
1711	pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
1712	pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
1713	pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
1714	if (IS_FWI2_CAPABLE(ha)) {
1715		pfc_host_stat->lip_count = stats->lip_cnt;
1716		pfc_host_stat->tx_frames = stats->tx_frames;
1717		pfc_host_stat->rx_frames = stats->rx_frames;
1718		pfc_host_stat->dumped_frames = stats->dumped_frames;
1719		pfc_host_stat->nos_count = stats->nos_rcvd;
1720	}
1721	pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
1722	pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
1723
1724done_free:
1725        dma_pool_free(ha->s_dma_pool, stats, stats_dma);
1726done:
1727	return pfc_host_stat;
1728}
1729
1730static void
1731qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1732{
1733	scsi_qla_host_t *vha = shost_priv(shost);
1734
1735	qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
1736}
1737
1738static void
1739qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
1740{
1741	scsi_qla_host_t *vha = shost_priv(shost);
1742
1743	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1744}
1745
1746static void
1747qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1748{
1749	scsi_qla_host_t *vha = shost_priv(shost);
1750	uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \
1751		0xFF, 0xFF, 0xFF, 0xFF};
1752	u64 fabric_name = wwn_to_u64(node_name);
1753
1754	if (vha->device_flags & SWITCH_FOUND)
1755		fabric_name = wwn_to_u64(vha->fabric_node_name);
1756
1757	fc_host_fabric_name(shost) = fabric_name;
1758}
1759
1760static void
1761qla2x00_get_host_port_state(struct Scsi_Host *shost)
1762{
1763	scsi_qla_host_t *vha = shost_priv(shost);
1764	struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
1765
1766	if (!base_vha->flags.online)
1767		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1768	else if (atomic_read(&base_vha->loop_state) == LOOP_TIMEOUT)
1769		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1770	else
1771		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1772}
1773
1774static int
1775qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1776{
1777	int	ret = 0;
1778	uint8_t	qos = 0;
1779	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1780	scsi_qla_host_t *vha = NULL;
1781	struct qla_hw_data *ha = base_vha->hw;
1782	uint16_t options = 0;
1783	int	cnt;
1784	struct req_que *req = ha->req_q_map[0];
1785
1786	ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1787	if (ret) {
1788		ql_log(ql_log_warn, vha, 0x707e,
1789		    "Vport sanity check failed, status %x\n", ret);
1790		return (ret);
1791	}
1792
1793	vha = qla24xx_create_vhost(fc_vport);
1794	if (vha == NULL) {
1795		ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
1796		return FC_VPORT_FAILED;
1797	}
1798	if (disable) {
1799		atomic_set(&vha->vp_state, VP_OFFLINE);
1800		fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
1801	} else
1802		atomic_set(&vha->vp_state, VP_FAILED);
1803
1804	/* ready to create vport */
1805	ql_log(ql_log_info, vha, 0x7080,
1806	    "VP entry id %d assigned.\n", vha->vp_idx);
1807
1808	/* initialized vport states */
1809	atomic_set(&vha->loop_state, LOOP_DOWN);
1810	vha->vp_err_state=  VP_ERR_PORTDWN;
1811	vha->vp_prev_err_state=  VP_ERR_UNKWN;
1812	/* Check if physical ha port is Up */
1813	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1814	    atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1815		/* Don't retry or attempt login of this virtual port */
1816		ql_dbg(ql_dbg_user, vha, 0x7081,
1817		    "Vport loop state is not UP.\n");
1818		atomic_set(&vha->loop_state, LOOP_DEAD);
1819		if (!disable)
1820			fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1821	}
1822
1823	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
1824		if (ha->fw_attributes & BIT_4) {
1825			int prot = 0;
1826			vha->flags.difdix_supported = 1;
1827			ql_dbg(ql_dbg_user, vha, 0x7082,
1828			    "Registered for DIF/DIX type 1 and 3 protection.\n");
1829			if (ql2xenabledif == 1)
1830				prot = SHOST_DIX_TYPE0_PROTECTION;
1831			scsi_host_set_prot(vha->host,
1832			    prot | SHOST_DIF_TYPE1_PROTECTION
1833			    | SHOST_DIF_TYPE2_PROTECTION
1834			    | SHOST_DIF_TYPE3_PROTECTION
1835			    | SHOST_DIX_TYPE1_PROTECTION
1836			    | SHOST_DIX_TYPE2_PROTECTION
1837			    | SHOST_DIX_TYPE3_PROTECTION);
1838			scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC);
1839		} else
1840			vha->flags.difdix_supported = 0;
1841	}
1842
1843	if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1844				   &ha->pdev->dev)) {
1845		ql_dbg(ql_dbg_user, vha, 0x7083,
1846		    "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
1847		goto vport_create_failed_2;
1848	}
1849
1850	/* initialize attributes */
1851	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
1852	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1853	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1854	fc_host_supported_classes(vha->host) =
1855		fc_host_supported_classes(base_vha->host);
1856	fc_host_supported_speeds(vha->host) =
1857		fc_host_supported_speeds(base_vha->host);
1858
1859	qla24xx_vport_disable(fc_vport, disable);
1860
1861	if (ha->flags.cpu_affinity_enabled) {
1862		req = ha->req_q_map[1];
1863		ql_dbg(ql_dbg_multiq, vha, 0xc000,
1864		    "Request queue %p attached with "
1865		    "VP[%d], cpu affinity =%d\n",
1866		    req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
1867		goto vport_queue;
1868	} else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1869		goto vport_queue;
1870	/* Create a request queue in QoS mode for the vport */
1871	for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
1872		if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
1873			&& memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
1874					8) == 0) {
1875			qos = ha->npiv_info[cnt].q_qos;
1876			break;
1877		}
1878	}
1879	if (qos) {
1880		ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1881			qos);
1882		if (!ret)
1883			ql_log(ql_log_warn, vha, 0x7084,
1884			    "Can't create request queue for VP[%d]\n",
1885			    vha->vp_idx);
1886		else {
1887			ql_dbg(ql_dbg_multiq, vha, 0xc001,
1888			    "Request Que:%d Q0s: %d) created for VP[%d]\n",
1889			    ret, qos, vha->vp_idx);
1890			ql_dbg(ql_dbg_user, vha, 0x7085,
1891			    "Request Que:%d Q0s: %d) created for VP[%d]\n",
1892			    ret, qos, vha->vp_idx);
1893			req = ha->req_q_map[ret];
1894		}
1895	}
1896
1897vport_queue:
1898	vha->req = req;
1899	return 0;
1900
1901vport_create_failed_2:
1902	qla24xx_disable_vp(vha);
1903	qla24xx_deallocate_vp_id(vha);
1904	scsi_host_put(vha->host);
1905	return FC_VPORT_FAILED;
1906}
1907
1908static int
1909qla24xx_vport_delete(struct fc_vport *fc_vport)
1910{
1911	scsi_qla_host_t *vha = fc_vport->dd_data;
1912	struct qla_hw_data *ha = vha->hw;
1913	uint16_t id = vha->vp_idx;
1914
1915	while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
1916	    test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
1917		msleep(1000);
1918
1919	qla24xx_disable_vp(vha);
1920
1921	vha->flags.delete_progress = 1;
1922
1923	fc_remove_host(vha->host);
1924
1925	scsi_remove_host(vha->host);
1926
1927	/* Allow timer to run to drain queued items, when removing vp */
1928	qla24xx_deallocate_vp_id(vha);
1929
1930	if (vha->timer_active) {
1931		qla2x00_vp_stop_timer(vha);
1932		ql_dbg(ql_dbg_user, vha, 0x7086,
1933		    "Timer for the VP[%d] has stopped\n", vha->vp_idx);
1934	}
1935
1936	/* No pending activities shall be there on the vha now */
1937	if (ql2xextended_error_logging & ql_dbg_user)
1938		msleep(random32()%10);  /* Just to see if something falls on
1939					* the net we have placed below */
1940
1941	BUG_ON(atomic_read(&vha->vref_count));
1942
1943	qla2x00_free_fcports(vha);
1944
1945	mutex_lock(&ha->vport_lock);
1946	ha->cur_vport_count--;
1947	clear_bit(vha->vp_idx, ha->vp_idx_map);
1948	mutex_unlock(&ha->vport_lock);
1949
1950	if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1951		if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1952			ql_log(ql_log_warn, vha, 0x7087,
1953			    "Queue delete failed.\n");
1954	}
1955
1956	scsi_host_put(vha->host);
1957	ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
1958	return 0;
1959}
1960
1961static int
1962qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1963{
1964	scsi_qla_host_t *vha = fc_vport->dd_data;
1965
1966	if (disable)
1967		qla24xx_disable_vp(vha);
1968	else
1969		qla24xx_enable_vp(vha);
1970
1971	return 0;
1972}
1973
1974struct fc_function_template qla2xxx_transport_functions = {
1975
1976	.show_host_node_name = 1,
1977	.show_host_port_name = 1,
1978	.show_host_supported_classes = 1,
1979	.show_host_supported_speeds = 1,
1980
1981	.get_host_port_id = qla2x00_get_host_port_id,
1982	.show_host_port_id = 1,
1983	.get_host_speed = qla2x00_get_host_speed,
1984	.show_host_speed = 1,
1985	.get_host_port_type = qla2x00_get_host_port_type,
1986	.show_host_port_type = 1,
1987	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
1988	.show_host_symbolic_name = 1,
1989	.set_host_system_hostname = qla2x00_set_host_system_hostname,
1990	.show_host_system_hostname = 1,
1991	.get_host_fabric_name = qla2x00_get_host_fabric_name,
1992	.show_host_fabric_name = 1,
1993	.get_host_port_state = qla2x00_get_host_port_state,
1994	.show_host_port_state = 1,
1995
1996	.dd_fcrport_size = sizeof(struct fc_port *),
1997	.show_rport_supported_classes = 1,
1998
1999	.get_starget_node_name = qla2x00_get_starget_node_name,
2000	.show_starget_node_name = 1,
2001	.get_starget_port_name = qla2x00_get_starget_port_name,
2002	.show_starget_port_name = 1,
2003	.get_starget_port_id  = qla2x00_get_starget_port_id,
2004	.show_starget_port_id = 1,
2005
2006	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2007	.show_rport_dev_loss_tmo = 1,
2008
2009	.issue_fc_host_lip = qla2x00_issue_lip,
2010	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2011	.terminate_rport_io = qla2x00_terminate_rport_io,
2012	.get_fc_host_stats = qla2x00_get_fc_host_stats,
2013
2014	.vport_create = qla24xx_vport_create,
2015	.vport_disable = qla24xx_vport_disable,
2016	.vport_delete = qla24xx_vport_delete,
2017	.bsg_request = qla24xx_bsg_request,
2018	.bsg_timeout = qla24xx_bsg_timeout,
2019};
2020
2021struct fc_function_template qla2xxx_transport_vport_functions = {
2022
2023	.show_host_node_name = 1,
2024	.show_host_port_name = 1,
2025	.show_host_supported_classes = 1,
2026
2027	.get_host_port_id = qla2x00_get_host_port_id,
2028	.show_host_port_id = 1,
2029	.get_host_speed = qla2x00_get_host_speed,
2030	.show_host_speed = 1,
2031	.get_host_port_type = qla2x00_get_host_port_type,
2032	.show_host_port_type = 1,
2033	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2034	.show_host_symbolic_name = 1,
2035	.set_host_system_hostname = qla2x00_set_host_system_hostname,
2036	.show_host_system_hostname = 1,
2037	.get_host_fabric_name = qla2x00_get_host_fabric_name,
2038	.show_host_fabric_name = 1,
2039	.get_host_port_state = qla2x00_get_host_port_state,
2040	.show_host_port_state = 1,
2041
2042	.dd_fcrport_size = sizeof(struct fc_port *),
2043	.show_rport_supported_classes = 1,
2044
2045	.get_starget_node_name = qla2x00_get_starget_node_name,
2046	.show_starget_node_name = 1,
2047	.get_starget_port_name = qla2x00_get_starget_port_name,
2048	.show_starget_port_name = 1,
2049	.get_starget_port_id  = qla2x00_get_starget_port_id,
2050	.show_starget_port_id = 1,
2051
2052	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2053	.show_rport_dev_loss_tmo = 1,
2054
2055	.issue_fc_host_lip = qla2x00_issue_lip,
2056	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2057	.terminate_rport_io = qla2x00_terminate_rport_io,
2058	.get_fc_host_stats = qla2x00_get_fc_host_stats,
2059	.bsg_request = qla24xx_bsg_request,
2060	.bsg_timeout = qla24xx_bsg_timeout,
2061};
2062
2063void
2064qla2x00_init_host_attr(scsi_qla_host_t *vha)
2065{
2066	struct qla_hw_data *ha = vha->hw;
2067	u32 speed = FC_PORTSPEED_UNKNOWN;
2068
2069	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
2070	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2071	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2072	fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
2073	fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
2074	fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
2075
2076	if (IS_QLA8XXX_TYPE(ha))
2077		speed = FC_PORTSPEED_10GBIT;
2078	else if (IS_QLA25XX(ha))
2079		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
2080		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2081	else if (IS_QLA24XX_TYPE(ha))
2082		speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
2083		    FC_PORTSPEED_1GBIT;
2084	else if (IS_QLA23XX(ha))
2085		speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2086	else
2087		speed = FC_PORTSPEED_1GBIT;
2088	fc_host_supported_speeds(vha->host) = speed;
2089}
2090