qla_attr.c revision 49e85c23beb1f12aba59450126ff7e803fbc767d
1/*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c)  2003-2011 QLogic Corporation
4 *
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7#include "qla_def.h"
8
9#include <linux/kthread.h>
10#include <linux/vmalloc.h>
11#include <linux/slab.h>
12#include <linux/delay.h>
13
14static int qla24xx_vport_disable(struct fc_vport *, bool);
15
16/* SYSFS attributes --------------------------------------------------------- */
17
18static ssize_t
19qla2x00_sysfs_read_fw_dump(struct file *filp, struct kobject *kobj,
20			   struct bin_attribute *bin_attr,
21			   char *buf, loff_t off, size_t count)
22{
23	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
24	    struct device, kobj)));
25	struct qla_hw_data *ha = vha->hw;
26	int rval = 0;
27
28	if (ha->fw_dump_reading == 0)
29		return 0;
30
31	if (IS_QLA82XX(ha)) {
32		if (off < ha->md_template_size) {
33			rval = memory_read_from_buffer(buf, count,
34			    &off, ha->md_tmplt_hdr, ha->md_template_size);
35			return rval;
36		}
37		off -= ha->md_template_size;
38		rval = memory_read_from_buffer(buf, count,
39		    &off, ha->md_dump, ha->md_dump_size);
40		return rval;
41	} else
42		return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
43					ha->fw_dump_len);
44}
45
46static ssize_t
47qla2x00_sysfs_write_fw_dump(struct file *filp, struct kobject *kobj,
48			    struct bin_attribute *bin_attr,
49			    char *buf, loff_t off, size_t count)
50{
51	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
52	    struct device, kobj)));
53	struct qla_hw_data *ha = vha->hw;
54	int reading;
55
56	if (off != 0)
57		return (0);
58
59	reading = simple_strtol(buf, NULL, 10);
60	switch (reading) {
61	case 0:
62		if (!ha->fw_dump_reading)
63			break;
64
65		ql_log(ql_log_info, vha, 0x705d,
66		    "Firmware dump cleared on (%ld).\n", vha->host_no);
67
68		if (IS_QLA82XX(vha->hw)) {
69			qla82xx_md_free(vha);
70			qla82xx_md_prep(vha);
71		}
72		ha->fw_dump_reading = 0;
73		ha->fw_dumped = 0;
74		break;
75	case 1:
76		if (ha->fw_dumped && !ha->fw_dump_reading) {
77			ha->fw_dump_reading = 1;
78
79			ql_log(ql_log_info, vha, 0x705e,
80			    "Raw firmware dump ready for read on (%ld).\n",
81			    vha->host_no);
82		}
83		break;
84	case 2:
85		qla2x00_alloc_fw_dump(vha);
86		break;
87	case 3:
88		if (IS_QLA82XX(ha)) {
89			qla82xx_idc_lock(ha);
90			qla82xx_set_reset_owner(vha);
91			qla82xx_idc_unlock(ha);
92		} else
93			qla2x00_system_error(vha);
94		break;
95	case 4:
96		if (IS_QLA82XX(ha)) {
97			if (ha->md_tmplt_hdr)
98				ql_dbg(ql_dbg_user, vha, 0x705b,
99				    "MiniDump supported with this firmware.\n");
100			else
101				ql_dbg(ql_dbg_user, vha, 0x709d,
102				    "MiniDump not supported with this firmware.\n");
103		}
104		break;
105	case 5:
106		if (IS_QLA82XX(ha))
107			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
108		break;
109	}
110	return -EINVAL;
111}
112
113static struct bin_attribute sysfs_fw_dump_attr = {
114	.attr = {
115		.name = "fw_dump",
116		.mode = S_IRUSR | S_IWUSR,
117	},
118	.size = 0,
119	.read = qla2x00_sysfs_read_fw_dump,
120	.write = qla2x00_sysfs_write_fw_dump,
121};
122
123static ssize_t
124qla2x00_sysfs_read_nvram(struct file *filp, struct kobject *kobj,
125			 struct bin_attribute *bin_attr,
126			 char *buf, loff_t off, size_t count)
127{
128	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
129	    struct device, kobj)));
130	struct qla_hw_data *ha = vha->hw;
131
132	if (!capable(CAP_SYS_ADMIN))
133		return 0;
134
135	if (IS_NOCACHE_VPD_TYPE(ha))
136		ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
137		    ha->nvram_size);
138	return memory_read_from_buffer(buf, count, &off, ha->nvram,
139					ha->nvram_size);
140}
141
142static ssize_t
143qla2x00_sysfs_write_nvram(struct file *filp, struct kobject *kobj,
144			  struct bin_attribute *bin_attr,
145			  char *buf, loff_t off, size_t count)
146{
147	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
148	    struct device, kobj)));
149	struct qla_hw_data *ha = vha->hw;
150	uint16_t	cnt;
151
152	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->nvram_size ||
153	    !ha->isp_ops->write_nvram)
154		return -EINVAL;
155
156	/* Checksum NVRAM. */
157	if (IS_FWI2_CAPABLE(ha)) {
158		uint32_t *iter;
159		uint32_t chksum;
160
161		iter = (uint32_t *)buf;
162		chksum = 0;
163		for (cnt = 0; cnt < ((count >> 2) - 1); cnt++)
164			chksum += le32_to_cpu(*iter++);
165		chksum = ~chksum + 1;
166		*iter = cpu_to_le32(chksum);
167	} else {
168		uint8_t *iter;
169		uint8_t chksum;
170
171		iter = (uint8_t *)buf;
172		chksum = 0;
173		for (cnt = 0; cnt < count - 1; cnt++)
174			chksum += *iter++;
175		chksum = ~chksum + 1;
176		*iter = chksum;
177	}
178
179	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
180		ql_log(ql_log_warn, vha, 0x705f,
181		    "HBA not online, failing NVRAM update.\n");
182		return -EAGAIN;
183	}
184
185	/* Write NVRAM. */
186	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->nvram_base, count);
187	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->nvram, ha->nvram_base,
188	    count);
189
190	ql_dbg(ql_dbg_user, vha, 0x7060,
191	    "Setting ISP_ABORT_NEEDED\n");
192	/* NVRAM settings take effect immediately. */
193	set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
194	qla2xxx_wake_dpc(vha);
195	qla2x00_wait_for_chip_reset(vha);
196
197	return count;
198}
199
200static struct bin_attribute sysfs_nvram_attr = {
201	.attr = {
202		.name = "nvram",
203		.mode = S_IRUSR | S_IWUSR,
204	},
205	.size = 512,
206	.read = qla2x00_sysfs_read_nvram,
207	.write = qla2x00_sysfs_write_nvram,
208};
209
210static ssize_t
211qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
212			  struct bin_attribute *bin_attr,
213			  char *buf, loff_t off, size_t count)
214{
215	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
216	    struct device, kobj)));
217	struct qla_hw_data *ha = vha->hw;
218
219	if (ha->optrom_state != QLA_SREADING)
220		return 0;
221
222	return memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
223					ha->optrom_region_size);
224}
225
226static ssize_t
227qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
228			   struct bin_attribute *bin_attr,
229			   char *buf, loff_t off, size_t count)
230{
231	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
232	    struct device, kobj)));
233	struct qla_hw_data *ha = vha->hw;
234
235	if (ha->optrom_state != QLA_SWRITING)
236		return -EINVAL;
237	if (off > ha->optrom_region_size)
238		return -ERANGE;
239	if (off + count > ha->optrom_region_size)
240		count = ha->optrom_region_size - off;
241
242	memcpy(&ha->optrom_buffer[off], buf, count);
243
244	return count;
245}
246
247static struct bin_attribute sysfs_optrom_attr = {
248	.attr = {
249		.name = "optrom",
250		.mode = S_IRUSR | S_IWUSR,
251	},
252	.size = 0,
253	.read = qla2x00_sysfs_read_optrom,
254	.write = qla2x00_sysfs_write_optrom,
255};
256
257static ssize_t
258qla2x00_sysfs_write_optrom_ctl(struct file *filp, struct kobject *kobj,
259			       struct bin_attribute *bin_attr,
260			       char *buf, loff_t off, size_t count)
261{
262	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
263	    struct device, kobj)));
264	struct qla_hw_data *ha = vha->hw;
265
266	uint32_t start = 0;
267	uint32_t size = ha->optrom_size;
268	int val, valid;
269
270	if (off)
271		return -EINVAL;
272
273	if (unlikely(pci_channel_offline(ha->pdev)))
274		return -EAGAIN;
275
276	if (sscanf(buf, "%d:%x:%x", &val, &start, &size) < 1)
277		return -EINVAL;
278	if (start > ha->optrom_size)
279		return -EINVAL;
280
281	switch (val) {
282	case 0:
283		if (ha->optrom_state != QLA_SREADING &&
284		    ha->optrom_state != QLA_SWRITING)
285			return -EINVAL;
286
287		ha->optrom_state = QLA_SWAITING;
288
289		ql_dbg(ql_dbg_user, vha, 0x7061,
290		    "Freeing flash region allocation -- 0x%x bytes.\n",
291		    ha->optrom_region_size);
292
293		vfree(ha->optrom_buffer);
294		ha->optrom_buffer = NULL;
295		break;
296	case 1:
297		if (ha->optrom_state != QLA_SWAITING)
298			return -EINVAL;
299
300		ha->optrom_region_start = start;
301		ha->optrom_region_size = start + size > ha->optrom_size ?
302		    ha->optrom_size - start : size;
303
304		ha->optrom_state = QLA_SREADING;
305		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
306		if (ha->optrom_buffer == NULL) {
307			ql_log(ql_log_warn, vha, 0x7062,
308			    "Unable to allocate memory for optrom retrieval "
309			    "(%x).\n", ha->optrom_region_size);
310
311			ha->optrom_state = QLA_SWAITING;
312			return -ENOMEM;
313		}
314
315		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
316			ql_log(ql_log_warn, vha, 0x7063,
317			    "HBA not online, failing NVRAM update.\n");
318			return -EAGAIN;
319		}
320
321		ql_dbg(ql_dbg_user, vha, 0x7064,
322		    "Reading flash region -- 0x%x/0x%x.\n",
323		    ha->optrom_region_start, ha->optrom_region_size);
324
325		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
326		ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
327		    ha->optrom_region_start, ha->optrom_region_size);
328		break;
329	case 2:
330		if (ha->optrom_state != QLA_SWAITING)
331			return -EINVAL;
332
333		/*
334		 * We need to be more restrictive on which FLASH regions are
335		 * allowed to be updated via user-space.  Regions accessible
336		 * via this method include:
337		 *
338		 * ISP21xx/ISP22xx/ISP23xx type boards:
339		 *
340		 * 	0x000000 -> 0x020000 -- Boot code.
341		 *
342		 * ISP2322/ISP24xx type boards:
343		 *
344		 * 	0x000000 -> 0x07ffff -- Boot code.
345		 * 	0x080000 -> 0x0fffff -- Firmware.
346		 *
347		 * ISP25xx type boards:
348		 *
349		 * 	0x000000 -> 0x07ffff -- Boot code.
350		 * 	0x080000 -> 0x0fffff -- Firmware.
351		 * 	0x120000 -> 0x12ffff -- VPD and HBA parameters.
352		 */
353		valid = 0;
354		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
355			valid = 1;
356		else if (start == (ha->flt_region_boot * 4) ||
357		    start == (ha->flt_region_fw * 4))
358			valid = 1;
359		else if (IS_QLA25XX(ha) || IS_QLA8XXX_TYPE(ha))
360			valid = 1;
361		if (!valid) {
362			ql_log(ql_log_warn, vha, 0x7065,
363			    "Invalid start region 0x%x/0x%x.\n", start, size);
364			return -EINVAL;
365		}
366
367		ha->optrom_region_start = start;
368		ha->optrom_region_size = start + size > ha->optrom_size ?
369		    ha->optrom_size - start : size;
370
371		ha->optrom_state = QLA_SWRITING;
372		ha->optrom_buffer = vmalloc(ha->optrom_region_size);
373		if (ha->optrom_buffer == NULL) {
374			ql_log(ql_log_warn, vha, 0x7066,
375			    "Unable to allocate memory for optrom update "
376			    "(%x)\n", ha->optrom_region_size);
377
378			ha->optrom_state = QLA_SWAITING;
379			return -ENOMEM;
380		}
381
382		ql_dbg(ql_dbg_user, vha, 0x7067,
383		    "Staging flash region write -- 0x%x/0x%x.\n",
384		    ha->optrom_region_start, ha->optrom_region_size);
385
386		memset(ha->optrom_buffer, 0, ha->optrom_region_size);
387		break;
388	case 3:
389		if (ha->optrom_state != QLA_SWRITING)
390			return -ENOMEM;
391
392		if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
393			ql_log(ql_log_warn, vha, 0x7068,
394			    "HBA not online, failing flash update.\n");
395			return -EAGAIN;
396		}
397
398		ql_dbg(ql_dbg_user, vha, 0x7069,
399		    "Writing flash region -- 0x%x/0x%x.\n",
400		    ha->optrom_region_start, ha->optrom_region_size);
401
402		ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
403		    ha->optrom_region_start, ha->optrom_region_size);
404		break;
405	default:
406		return -EINVAL;
407	}
408	return count;
409}
410
411static struct bin_attribute sysfs_optrom_ctl_attr = {
412	.attr = {
413		.name = "optrom_ctl",
414		.mode = S_IWUSR,
415	},
416	.size = 0,
417	.write = qla2x00_sysfs_write_optrom_ctl,
418};
419
420static ssize_t
421qla2x00_sysfs_read_vpd(struct file *filp, struct kobject *kobj,
422		       struct bin_attribute *bin_attr,
423		       char *buf, loff_t off, size_t count)
424{
425	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
426	    struct device, kobj)));
427	struct qla_hw_data *ha = vha->hw;
428
429	if (unlikely(pci_channel_offline(ha->pdev)))
430		return -EAGAIN;
431
432	if (!capable(CAP_SYS_ADMIN))
433		return -EINVAL;
434
435	if (IS_NOCACHE_VPD_TYPE(ha))
436		ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_vpd << 2,
437		    ha->vpd_size);
438	return memory_read_from_buffer(buf, count, &off, ha->vpd, ha->vpd_size);
439}
440
441static ssize_t
442qla2x00_sysfs_write_vpd(struct file *filp, struct kobject *kobj,
443			struct bin_attribute *bin_attr,
444			char *buf, loff_t off, size_t count)
445{
446	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
447	    struct device, kobj)));
448	struct qla_hw_data *ha = vha->hw;
449	uint8_t *tmp_data;
450
451	if (unlikely(pci_channel_offline(ha->pdev)))
452		return 0;
453
454	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != ha->vpd_size ||
455	    !ha->isp_ops->write_nvram)
456		return 0;
457
458	if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
459		ql_log(ql_log_warn, vha, 0x706a,
460		    "HBA not online, failing VPD update.\n");
461		return -EAGAIN;
462	}
463
464	/* Write NVRAM. */
465	ha->isp_ops->write_nvram(vha, (uint8_t *)buf, ha->vpd_base, count);
466	ha->isp_ops->read_nvram(vha, (uint8_t *)ha->vpd, ha->vpd_base, count);
467
468	/* Update flash version information for 4Gb & above. */
469	if (!IS_FWI2_CAPABLE(ha))
470		return -EINVAL;
471
472	tmp_data = vmalloc(256);
473	if (!tmp_data) {
474		ql_log(ql_log_warn, vha, 0x706b,
475		    "Unable to allocate memory for VPD information update.\n");
476		return -ENOMEM;
477	}
478	ha->isp_ops->get_flash_version(vha, tmp_data);
479	vfree(tmp_data);
480
481	return count;
482}
483
484static struct bin_attribute sysfs_vpd_attr = {
485	.attr = {
486		.name = "vpd",
487		.mode = S_IRUSR | S_IWUSR,
488	},
489	.size = 0,
490	.read = qla2x00_sysfs_read_vpd,
491	.write = qla2x00_sysfs_write_vpd,
492};
493
494static ssize_t
495qla2x00_sysfs_read_sfp(struct file *filp, struct kobject *kobj,
496		       struct bin_attribute *bin_attr,
497		       char *buf, loff_t off, size_t count)
498{
499	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
500	    struct device, kobj)));
501	struct qla_hw_data *ha = vha->hw;
502	uint16_t iter, addr, offset;
503	int rval;
504
505	if (!capable(CAP_SYS_ADMIN) || off != 0 || count != SFP_DEV_SIZE * 2)
506		return 0;
507
508	if (ha->sfp_data)
509		goto do_read;
510
511	ha->sfp_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
512	    &ha->sfp_data_dma);
513	if (!ha->sfp_data) {
514		ql_log(ql_log_warn, vha, 0x706c,
515		    "Unable to allocate memory for SFP read-data.\n");
516		return 0;
517	}
518
519do_read:
520	memset(ha->sfp_data, 0, SFP_BLOCK_SIZE);
521	addr = 0xa0;
522	for (iter = 0, offset = 0; iter < (SFP_DEV_SIZE * 2) / SFP_BLOCK_SIZE;
523	    iter++, offset += SFP_BLOCK_SIZE) {
524		if (iter == 4) {
525			/* Skip to next device address. */
526			addr = 0xa2;
527			offset = 0;
528		}
529
530		rval = qla2x00_read_sfp(vha, ha->sfp_data_dma, ha->sfp_data,
531		    addr, offset, SFP_BLOCK_SIZE, 0);
532		if (rval != QLA_SUCCESS) {
533			ql_log(ql_log_warn, vha, 0x706d,
534			    "Unable to read SFP data (%x/%x/%x).\n", rval,
535			    addr, offset);
536
537			return -EIO;
538		}
539		memcpy(buf, ha->sfp_data, SFP_BLOCK_SIZE);
540		buf += SFP_BLOCK_SIZE;
541	}
542
543	return count;
544}
545
546static struct bin_attribute sysfs_sfp_attr = {
547	.attr = {
548		.name = "sfp",
549		.mode = S_IRUSR | S_IWUSR,
550	},
551	.size = SFP_DEV_SIZE * 2,
552	.read = qla2x00_sysfs_read_sfp,
553};
554
555static ssize_t
556qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
557			struct bin_attribute *bin_attr,
558			char *buf, loff_t off, size_t count)
559{
560	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
561	    struct device, kobj)));
562	struct qla_hw_data *ha = vha->hw;
563	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
564	int type;
565
566	if (off != 0)
567		return -EINVAL;
568
569	type = simple_strtol(buf, NULL, 10);
570	switch (type) {
571	case 0x2025c:
572		ql_log(ql_log_info, vha, 0x706e,
573		    "Issuing ISP reset.\n");
574
575		scsi_block_requests(vha->host);
576		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
577		if (IS_QLA82XX(ha)) {
578			qla82xx_idc_lock(ha);
579			qla82xx_set_reset_owner(vha);
580			qla82xx_idc_unlock(ha);
581		}
582		qla2xxx_wake_dpc(vha);
583		qla2x00_wait_for_chip_reset(vha);
584		scsi_unblock_requests(vha->host);
585		break;
586	case 0x2025d:
587		if (!IS_QLA81XX(ha))
588			return -EPERM;
589
590		ql_log(ql_log_info, vha, 0x706f,
591		    "Issuing MPI reset.\n");
592
593		/* Make sure FC side is not in reset */
594		qla2x00_wait_for_hba_online(vha);
595
596		/* Issue MPI reset */
597		scsi_block_requests(vha->host);
598		if (qla81xx_restart_mpi_firmware(vha) != QLA_SUCCESS)
599			ql_log(ql_log_warn, vha, 0x7070,
600			    "MPI reset failed.\n");
601		scsi_unblock_requests(vha->host);
602		break;
603	case 0x2025e:
604		if (!IS_QLA82XX(ha) || vha != base_vha) {
605			ql_log(ql_log_info, vha, 0x7071,
606			    "FCoE ctx reset no supported.\n");
607			return -EPERM;
608		}
609
610		ql_log(ql_log_info, vha, 0x7072,
611		    "Issuing FCoE ctx reset.\n");
612		set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
613		qla2xxx_wake_dpc(vha);
614		qla2x00_wait_for_fcoe_ctx_reset(vha);
615		break;
616	}
617	return count;
618}
619
620static struct bin_attribute sysfs_reset_attr = {
621	.attr = {
622		.name = "reset",
623		.mode = S_IWUSR,
624	},
625	.size = 0,
626	.write = qla2x00_sysfs_write_reset,
627};
628
629static ssize_t
630qla2x00_sysfs_write_edc(struct file *filp, struct kobject *kobj,
631			struct bin_attribute *bin_attr,
632			char *buf, loff_t off, size_t count)
633{
634	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
635	    struct device, kobj)));
636	struct qla_hw_data *ha = vha->hw;
637	uint16_t dev, adr, opt, len;
638	int rval;
639
640	ha->edc_data_len = 0;
641
642	if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
643		return -EINVAL;
644
645	if (!ha->edc_data) {
646		ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
647		    &ha->edc_data_dma);
648		if (!ha->edc_data) {
649			ql_log(ql_log_warn, vha, 0x7073,
650			    "Unable to allocate memory for EDC write.\n");
651			return -ENOMEM;
652		}
653	}
654
655	dev = le16_to_cpup((void *)&buf[0]);
656	adr = le16_to_cpup((void *)&buf[2]);
657	opt = le16_to_cpup((void *)&buf[4]);
658	len = le16_to_cpup((void *)&buf[6]);
659
660	if (!(opt & BIT_0))
661		if (len == 0 || len > DMA_POOL_SIZE || len > count - 8)
662			return -EINVAL;
663
664	memcpy(ha->edc_data, &buf[8], len);
665
666	rval = qla2x00_write_sfp(vha, ha->edc_data_dma, ha->edc_data,
667	    dev, adr, len, opt);
668	if (rval != QLA_SUCCESS) {
669		ql_log(ql_log_warn, vha, 0x7074,
670		    "Unable to write EDC (%x) %02x:%04x:%02x:%02hhx\n",
671		    rval, dev, adr, opt, len, buf[8]);
672		return -EIO;
673	}
674
675	return count;
676}
677
678static struct bin_attribute sysfs_edc_attr = {
679	.attr = {
680		.name = "edc",
681		.mode = S_IWUSR,
682	},
683	.size = 0,
684	.write = qla2x00_sysfs_write_edc,
685};
686
687static ssize_t
688qla2x00_sysfs_write_edc_status(struct file *filp, struct kobject *kobj,
689			struct bin_attribute *bin_attr,
690			char *buf, loff_t off, size_t count)
691{
692	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
693	    struct device, kobj)));
694	struct qla_hw_data *ha = vha->hw;
695	uint16_t dev, adr, opt, len;
696	int rval;
697
698	ha->edc_data_len = 0;
699
700	if (!capable(CAP_SYS_ADMIN) || off != 0 || count < 8)
701		return -EINVAL;
702
703	if (!ha->edc_data) {
704		ha->edc_data = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL,
705		    &ha->edc_data_dma);
706		if (!ha->edc_data) {
707			ql_log(ql_log_warn, vha, 0x708c,
708			    "Unable to allocate memory for EDC status.\n");
709			return -ENOMEM;
710		}
711	}
712
713	dev = le16_to_cpup((void *)&buf[0]);
714	adr = le16_to_cpup((void *)&buf[2]);
715	opt = le16_to_cpup((void *)&buf[4]);
716	len = le16_to_cpup((void *)&buf[6]);
717
718	if (!(opt & BIT_0))
719		if (len == 0 || len > DMA_POOL_SIZE)
720			return -EINVAL;
721
722	memset(ha->edc_data, 0, len);
723	rval = qla2x00_read_sfp(vha, ha->edc_data_dma, ha->edc_data,
724			dev, adr, len, opt);
725	if (rval != QLA_SUCCESS) {
726		ql_log(ql_log_info, vha, 0x7075,
727		    "Unable to write EDC status (%x) %02x:%04x:%02x.\n",
728		    rval, dev, adr, opt, len);
729		return -EIO;
730	}
731
732	ha->edc_data_len = len;
733
734	return count;
735}
736
737static ssize_t
738qla2x00_sysfs_read_edc_status(struct file *filp, struct kobject *kobj,
739			   struct bin_attribute *bin_attr,
740			   char *buf, loff_t off, size_t count)
741{
742	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
743	    struct device, kobj)));
744	struct qla_hw_data *ha = vha->hw;
745
746	if (!capable(CAP_SYS_ADMIN) || off != 0 || count == 0)
747		return 0;
748
749	if (!ha->edc_data || ha->edc_data_len == 0 || ha->edc_data_len > count)
750		return -EINVAL;
751
752	memcpy(buf, ha->edc_data, ha->edc_data_len);
753
754	return ha->edc_data_len;
755}
756
757static struct bin_attribute sysfs_edc_status_attr = {
758	.attr = {
759		.name = "edc_status",
760		.mode = S_IRUSR | S_IWUSR,
761	},
762	.size = 0,
763	.write = qla2x00_sysfs_write_edc_status,
764	.read = qla2x00_sysfs_read_edc_status,
765};
766
767static ssize_t
768qla2x00_sysfs_read_xgmac_stats(struct file *filp, struct kobject *kobj,
769		       struct bin_attribute *bin_attr,
770		       char *buf, loff_t off, size_t count)
771{
772	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
773	    struct device, kobj)));
774	struct qla_hw_data *ha = vha->hw;
775	int rval;
776	uint16_t actual_size;
777
778	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
779		return 0;
780
781	if (ha->xgmac_data)
782		goto do_read;
783
784	ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
785	    &ha->xgmac_data_dma, GFP_KERNEL);
786	if (!ha->xgmac_data) {
787		ql_log(ql_log_warn, vha, 0x7076,
788		    "Unable to allocate memory for XGMAC read-data.\n");
789		return 0;
790	}
791
792do_read:
793	actual_size = 0;
794	memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
795
796	rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
797	    XGMAC_DATA_SIZE, &actual_size);
798	if (rval != QLA_SUCCESS) {
799		ql_log(ql_log_warn, vha, 0x7077,
800		    "Unable to read XGMAC data (%x).\n", rval);
801		count = 0;
802	}
803
804	count = actual_size > count ? count: actual_size;
805	memcpy(buf, ha->xgmac_data, count);
806
807	return count;
808}
809
810static struct bin_attribute sysfs_xgmac_stats_attr = {
811	.attr = {
812		.name = "xgmac_stats",
813		.mode = S_IRUSR,
814	},
815	.size = 0,
816	.read = qla2x00_sysfs_read_xgmac_stats,
817};
818
819static ssize_t
820qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj,
821		       struct bin_attribute *bin_attr,
822		       char *buf, loff_t off, size_t count)
823{
824	struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
825	    struct device, kobj)));
826	struct qla_hw_data *ha = vha->hw;
827	int rval;
828	uint16_t actual_size;
829
830	if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
831		return 0;
832
833	if (ha->dcbx_tlv)
834		goto do_read;
835
836	ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
837	    &ha->dcbx_tlv_dma, GFP_KERNEL);
838	if (!ha->dcbx_tlv) {
839		ql_log(ql_log_warn, vha, 0x7078,
840		    "Unable to allocate memory for DCBX TLV read-data.\n");
841		return -ENOMEM;
842	}
843
844do_read:
845	actual_size = 0;
846	memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
847
848	rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
849	    DCBX_TLV_DATA_SIZE);
850	if (rval != QLA_SUCCESS) {
851		ql_log(ql_log_warn, vha, 0x7079,
852		    "Unable to read DCBX TLV (%x).\n", rval);
853		return -EIO;
854	}
855
856	memcpy(buf, ha->dcbx_tlv, count);
857
858	return count;
859}
860
861static struct bin_attribute sysfs_dcbx_tlv_attr = {
862	.attr = {
863		.name = "dcbx_tlv",
864		.mode = S_IRUSR,
865	},
866	.size = 0,
867	.read = qla2x00_sysfs_read_dcbx_tlv,
868};
869
870static struct sysfs_entry {
871	char *name;
872	struct bin_attribute *attr;
873	int is4GBp_only;
874} bin_file_entries[] = {
875	{ "fw_dump", &sysfs_fw_dump_attr, },
876	{ "nvram", &sysfs_nvram_attr, },
877	{ "optrom", &sysfs_optrom_attr, },
878	{ "optrom_ctl", &sysfs_optrom_ctl_attr, },
879	{ "vpd", &sysfs_vpd_attr, 1 },
880	{ "sfp", &sysfs_sfp_attr, 1 },
881	{ "reset", &sysfs_reset_attr, },
882	{ "edc", &sysfs_edc_attr, 2 },
883	{ "edc_status", &sysfs_edc_status_attr, 2 },
884	{ "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
885	{ "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
886	{ NULL },
887};
888
889void
890qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
891{
892	struct Scsi_Host *host = vha->host;
893	struct sysfs_entry *iter;
894	int ret;
895
896	for (iter = bin_file_entries; iter->name; iter++) {
897		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(vha->hw))
898			continue;
899		if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
900			continue;
901		if (iter->is4GBp_only == 3 && !(IS_QLA8XXX_TYPE(vha->hw)))
902			continue;
903
904		ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
905		    iter->attr);
906		if (ret)
907			ql_log(ql_log_warn, vha, 0x00f3,
908			    "Unable to create sysfs %s binary attribute (%d).\n",
909			    iter->name, ret);
910		else
911			ql_dbg(ql_dbg_init, vha, 0x00f4,
912			    "Successfully created sysfs %s binary attribure.\n",
913			    iter->name);
914	}
915}
916
917void
918qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
919{
920	struct Scsi_Host *host = vha->host;
921	struct sysfs_entry *iter;
922	struct qla_hw_data *ha = vha->hw;
923
924	for (iter = bin_file_entries; iter->name; iter++) {
925		if (iter->is4GBp_only && !IS_FWI2_CAPABLE(ha))
926			continue;
927		if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
928			continue;
929		if (iter->is4GBp_only == 3 && !!(IS_QLA8XXX_TYPE(vha->hw)))
930			continue;
931
932		sysfs_remove_bin_file(&host->shost_gendev.kobj,
933		    iter->attr);
934	}
935
936	if (ha->beacon_blink_led == 1)
937		ha->isp_ops->beacon_off(vha);
938}
939
940/* Scsi_Host attributes. */
941
942static ssize_t
943qla2x00_drvr_version_show(struct device *dev,
944			  struct device_attribute *attr, char *buf)
945{
946	return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
947}
948
949static ssize_t
950qla2x00_fw_version_show(struct device *dev,
951			struct device_attribute *attr, char *buf)
952{
953	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
954	struct qla_hw_data *ha = vha->hw;
955	char fw_str[128];
956
957	return snprintf(buf, PAGE_SIZE, "%s\n",
958	    ha->isp_ops->fw_version_str(vha, fw_str));
959}
960
961static ssize_t
962qla2x00_serial_num_show(struct device *dev, struct device_attribute *attr,
963			char *buf)
964{
965	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
966	struct qla_hw_data *ha = vha->hw;
967	uint32_t sn;
968
969	if (IS_FWI2_CAPABLE(ha)) {
970		qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
971		return snprintf(buf, PAGE_SIZE, "%s\n", buf);
972	}
973
974	sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
975	return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
976	    sn % 100000);
977}
978
979static ssize_t
980qla2x00_isp_name_show(struct device *dev, struct device_attribute *attr,
981		      char *buf)
982{
983	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
984	return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
985}
986
987static ssize_t
988qla2x00_isp_id_show(struct device *dev, struct device_attribute *attr,
989		    char *buf)
990{
991	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
992	struct qla_hw_data *ha = vha->hw;
993	return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
994	    ha->product_id[0], ha->product_id[1], ha->product_id[2],
995	    ha->product_id[3]);
996}
997
998static ssize_t
999qla2x00_model_name_show(struct device *dev, struct device_attribute *attr,
1000			char *buf)
1001{
1002	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1003	return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
1004}
1005
1006static ssize_t
1007qla2x00_model_desc_show(struct device *dev, struct device_attribute *attr,
1008			char *buf)
1009{
1010	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1011	return snprintf(buf, PAGE_SIZE, "%s\n",
1012	    vha->hw->model_desc ? vha->hw->model_desc : "");
1013}
1014
1015static ssize_t
1016qla2x00_pci_info_show(struct device *dev, struct device_attribute *attr,
1017		      char *buf)
1018{
1019	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1020	char pci_info[30];
1021
1022	return snprintf(buf, PAGE_SIZE, "%s\n",
1023	    vha->hw->isp_ops->pci_info_str(vha, pci_info));
1024}
1025
1026static ssize_t
1027qla2x00_link_state_show(struct device *dev, struct device_attribute *attr,
1028			char *buf)
1029{
1030	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1031	struct qla_hw_data *ha = vha->hw;
1032	int len = 0;
1033
1034	if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
1035	    atomic_read(&vha->loop_state) == LOOP_DEAD ||
1036	    vha->device_flags & DFLG_NO_CABLE)
1037		len = snprintf(buf, PAGE_SIZE, "Link Down\n");
1038	else if (atomic_read(&vha->loop_state) != LOOP_READY ||
1039	    test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1040	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1041		len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
1042	else {
1043		len = snprintf(buf, PAGE_SIZE, "Link Up - ");
1044
1045		switch (ha->current_topology) {
1046		case ISP_CFG_NL:
1047			len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1048			break;
1049		case ISP_CFG_FL:
1050			len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
1051			break;
1052		case ISP_CFG_N:
1053			len += snprintf(buf + len, PAGE_SIZE-len,
1054			    "N_Port to N_Port\n");
1055			break;
1056		case ISP_CFG_F:
1057			len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
1058			break;
1059		default:
1060			len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
1061			break;
1062		}
1063	}
1064	return len;
1065}
1066
1067static ssize_t
1068qla2x00_zio_show(struct device *dev, struct device_attribute *attr,
1069		 char *buf)
1070{
1071	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1072	int len = 0;
1073
1074	switch (vha->hw->zio_mode) {
1075	case QLA_ZIO_MODE_6:
1076		len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
1077		break;
1078	case QLA_ZIO_DISABLED:
1079		len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1080		break;
1081	}
1082	return len;
1083}
1084
1085static ssize_t
1086qla2x00_zio_store(struct device *dev, struct device_attribute *attr,
1087		  const char *buf, size_t count)
1088{
1089	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1090	struct qla_hw_data *ha = vha->hw;
1091	int val = 0;
1092	uint16_t zio_mode;
1093
1094	if (!IS_ZIO_SUPPORTED(ha))
1095		return -ENOTSUPP;
1096
1097	if (sscanf(buf, "%d", &val) != 1)
1098		return -EINVAL;
1099
1100	if (val)
1101		zio_mode = QLA_ZIO_MODE_6;
1102	else
1103		zio_mode = QLA_ZIO_DISABLED;
1104
1105	/* Update per-hba values and queue a reset. */
1106	if (zio_mode != QLA_ZIO_DISABLED || ha->zio_mode != QLA_ZIO_DISABLED) {
1107		ha->zio_mode = zio_mode;
1108		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1109	}
1110	return strlen(buf);
1111}
1112
1113static ssize_t
1114qla2x00_zio_timer_show(struct device *dev, struct device_attribute *attr,
1115		       char *buf)
1116{
1117	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1118
1119	return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
1120}
1121
1122static ssize_t
1123qla2x00_zio_timer_store(struct device *dev, struct device_attribute *attr,
1124			const char *buf, size_t count)
1125{
1126	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1127	int val = 0;
1128	uint16_t zio_timer;
1129
1130	if (sscanf(buf, "%d", &val) != 1)
1131		return -EINVAL;
1132	if (val > 25500 || val < 100)
1133		return -ERANGE;
1134
1135	zio_timer = (uint16_t)(val / 100);
1136	vha->hw->zio_timer = zio_timer;
1137
1138	return strlen(buf);
1139}
1140
1141static ssize_t
1142qla2x00_beacon_show(struct device *dev, struct device_attribute *attr,
1143		    char *buf)
1144{
1145	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1146	int len = 0;
1147
1148	if (vha->hw->beacon_blink_led)
1149		len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
1150	else
1151		len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
1152	return len;
1153}
1154
1155static ssize_t
1156qla2x00_beacon_store(struct device *dev, struct device_attribute *attr,
1157		     const char *buf, size_t count)
1158{
1159	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1160	struct qla_hw_data *ha = vha->hw;
1161	int val = 0;
1162	int rval;
1163
1164	if (IS_QLA2100(ha) || IS_QLA2200(ha))
1165		return -EPERM;
1166
1167	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) {
1168		ql_log(ql_log_warn, vha, 0x707a,
1169		    "Abort ISP active -- ignoring beacon request.\n");
1170		return -EBUSY;
1171	}
1172
1173	if (sscanf(buf, "%d", &val) != 1)
1174		return -EINVAL;
1175
1176	if (val)
1177		rval = ha->isp_ops->beacon_on(vha);
1178	else
1179		rval = ha->isp_ops->beacon_off(vha);
1180
1181	if (rval != QLA_SUCCESS)
1182		count = 0;
1183
1184	return count;
1185}
1186
1187static ssize_t
1188qla2x00_optrom_bios_version_show(struct device *dev,
1189				 struct device_attribute *attr, char *buf)
1190{
1191	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1192	struct qla_hw_data *ha = vha->hw;
1193	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
1194	    ha->bios_revision[0]);
1195}
1196
1197static ssize_t
1198qla2x00_optrom_efi_version_show(struct device *dev,
1199				struct device_attribute *attr, char *buf)
1200{
1201	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1202	struct qla_hw_data *ha = vha->hw;
1203	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
1204	    ha->efi_revision[0]);
1205}
1206
1207static ssize_t
1208qla2x00_optrom_fcode_version_show(struct device *dev,
1209				  struct device_attribute *attr, char *buf)
1210{
1211	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1212	struct qla_hw_data *ha = vha->hw;
1213	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
1214	    ha->fcode_revision[0]);
1215}
1216
1217static ssize_t
1218qla2x00_optrom_fw_version_show(struct device *dev,
1219			       struct device_attribute *attr, char *buf)
1220{
1221	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1222	struct qla_hw_data *ha = vha->hw;
1223	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
1224	    ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
1225	    ha->fw_revision[3]);
1226}
1227
1228static ssize_t
1229qla2x00_optrom_gold_fw_version_show(struct device *dev,
1230    struct device_attribute *attr, char *buf)
1231{
1232	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1233	struct qla_hw_data *ha = vha->hw;
1234
1235	if (!IS_QLA81XX(ha))
1236		return snprintf(buf, PAGE_SIZE, "\n");
1237
1238	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
1239	    ha->gold_fw_version[0], ha->gold_fw_version[1],
1240	    ha->gold_fw_version[2], ha->gold_fw_version[3]);
1241}
1242
1243static ssize_t
1244qla2x00_total_isp_aborts_show(struct device *dev,
1245			      struct device_attribute *attr, char *buf)
1246{
1247	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1248	struct qla_hw_data *ha = vha->hw;
1249	return snprintf(buf, PAGE_SIZE, "%d\n",
1250	    ha->qla_stats.total_isp_aborts);
1251}
1252
1253static ssize_t
1254qla24xx_84xx_fw_version_show(struct device *dev,
1255	struct device_attribute *attr, char *buf)
1256{
1257	int rval = QLA_SUCCESS;
1258	uint16_t status[2] = {0, 0};
1259	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1260	struct qla_hw_data *ha = vha->hw;
1261
1262	if (!IS_QLA84XX(ha))
1263		return snprintf(buf, PAGE_SIZE, "\n");
1264
1265	if (ha->cs84xx->op_fw_version == 0)
1266		rval = qla84xx_verify_chip(vha, status);
1267
1268	if ((rval == QLA_SUCCESS) && (status[0] == 0))
1269		return snprintf(buf, PAGE_SIZE, "%u\n",
1270			(uint32_t)ha->cs84xx->op_fw_version);
1271
1272	return snprintf(buf, PAGE_SIZE, "\n");
1273}
1274
1275static ssize_t
1276qla2x00_mpi_version_show(struct device *dev, struct device_attribute *attr,
1277    char *buf)
1278{
1279	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1280	struct qla_hw_data *ha = vha->hw;
1281
1282	if (!IS_QLA81XX(ha))
1283		return snprintf(buf, PAGE_SIZE, "\n");
1284
1285	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
1286	    ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
1287	    ha->mpi_capabilities);
1288}
1289
1290static ssize_t
1291qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1292    char *buf)
1293{
1294	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1295	struct qla_hw_data *ha = vha->hw;
1296
1297	if (!IS_QLA81XX(ha))
1298		return snprintf(buf, PAGE_SIZE, "\n");
1299
1300	return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
1301	    ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
1302}
1303
1304static ssize_t
1305qla2x00_flash_block_size_show(struct device *dev,
1306			      struct device_attribute *attr, char *buf)
1307{
1308	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1309	struct qla_hw_data *ha = vha->hw;
1310
1311	return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
1312}
1313
1314static ssize_t
1315qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
1316    char *buf)
1317{
1318	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1319
1320	if (!IS_QLA8XXX_TYPE(vha->hw))
1321		return snprintf(buf, PAGE_SIZE, "\n");
1322
1323	return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
1324}
1325
1326static ssize_t
1327qla2x00_vn_port_mac_address_show(struct device *dev,
1328    struct device_attribute *attr, char *buf)
1329{
1330	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1331
1332	if (!IS_QLA8XXX_TYPE(vha->hw))
1333		return snprintf(buf, PAGE_SIZE, "\n");
1334
1335	return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
1336	    vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
1337	    vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
1338	    vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
1339}
1340
1341static ssize_t
1342qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
1343    char *buf)
1344{
1345	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1346
1347	return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
1348}
1349
1350static ssize_t
1351qla2x00_thermal_temp_show(struct device *dev,
1352	struct device_attribute *attr, char *buf)
1353{
1354	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1355	int rval = QLA_FUNCTION_FAILED;
1356	uint16_t temp, frac;
1357
1358	if (!vha->hw->flags.thermal_supported)
1359		return snprintf(buf, PAGE_SIZE, "\n");
1360
1361	temp = frac = 0;
1362	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1363	    test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1364		ql_log(ql_log_warn, vha, 0x707b,
1365		    "ISP reset active.\n");
1366	else if (!vha->hw->flags.eeh_busy)
1367		rval = qla2x00_get_thermal_temp(vha, &temp, &frac);
1368	if (rval != QLA_SUCCESS)
1369		temp = frac = 0;
1370
1371	return snprintf(buf, PAGE_SIZE, "%d.%02d\n", temp, frac);
1372}
1373
1374static ssize_t
1375qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
1376    char *buf)
1377{
1378	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1379	int rval = QLA_FUNCTION_FAILED;
1380	uint16_t state[5];
1381
1382	if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags) ||
1383		test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags))
1384		ql_log(ql_log_warn, vha, 0x707c,
1385		    "ISP reset active.\n");
1386	else if (!vha->hw->flags.eeh_busy)
1387		rval = qla2x00_get_firmware_state(vha, state);
1388	if (rval != QLA_SUCCESS)
1389		memset(state, -1, sizeof(state));
1390
1391	return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
1392	    state[1], state[2], state[3], state[4]);
1393}
1394
1395static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
1396static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
1397static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
1398static DEVICE_ATTR(isp_name, S_IRUGO, qla2x00_isp_name_show, NULL);
1399static DEVICE_ATTR(isp_id, S_IRUGO, qla2x00_isp_id_show, NULL);
1400static DEVICE_ATTR(model_name, S_IRUGO, qla2x00_model_name_show, NULL);
1401static DEVICE_ATTR(model_desc, S_IRUGO, qla2x00_model_desc_show, NULL);
1402static DEVICE_ATTR(pci_info, S_IRUGO, qla2x00_pci_info_show, NULL);
1403static DEVICE_ATTR(link_state, S_IRUGO, qla2x00_link_state_show, NULL);
1404static DEVICE_ATTR(zio, S_IRUGO | S_IWUSR, qla2x00_zio_show, qla2x00_zio_store);
1405static DEVICE_ATTR(zio_timer, S_IRUGO | S_IWUSR, qla2x00_zio_timer_show,
1406		   qla2x00_zio_timer_store);
1407static DEVICE_ATTR(beacon, S_IRUGO | S_IWUSR, qla2x00_beacon_show,
1408		   qla2x00_beacon_store);
1409static DEVICE_ATTR(optrom_bios_version, S_IRUGO,
1410		   qla2x00_optrom_bios_version_show, NULL);
1411static DEVICE_ATTR(optrom_efi_version, S_IRUGO,
1412		   qla2x00_optrom_efi_version_show, NULL);
1413static DEVICE_ATTR(optrom_fcode_version, S_IRUGO,
1414		   qla2x00_optrom_fcode_version_show, NULL);
1415static DEVICE_ATTR(optrom_fw_version, S_IRUGO, qla2x00_optrom_fw_version_show,
1416		   NULL);
1417static DEVICE_ATTR(optrom_gold_fw_version, S_IRUGO,
1418    qla2x00_optrom_gold_fw_version_show, NULL);
1419static DEVICE_ATTR(84xx_fw_version, S_IRUGO, qla24xx_84xx_fw_version_show,
1420		   NULL);
1421static DEVICE_ATTR(total_isp_aborts, S_IRUGO, qla2x00_total_isp_aborts_show,
1422		   NULL);
1423static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
1424static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
1425static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
1426		   NULL);
1427static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
1428static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
1429		   qla2x00_vn_port_mac_address_show, NULL);
1430static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
1431static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
1432static DEVICE_ATTR(thermal_temp, S_IRUGO, qla2x00_thermal_temp_show, NULL);
1433
1434struct device_attribute *qla2x00_host_attrs[] = {
1435	&dev_attr_driver_version,
1436	&dev_attr_fw_version,
1437	&dev_attr_serial_num,
1438	&dev_attr_isp_name,
1439	&dev_attr_isp_id,
1440	&dev_attr_model_name,
1441	&dev_attr_model_desc,
1442	&dev_attr_pci_info,
1443	&dev_attr_link_state,
1444	&dev_attr_zio,
1445	&dev_attr_zio_timer,
1446	&dev_attr_beacon,
1447	&dev_attr_optrom_bios_version,
1448	&dev_attr_optrom_efi_version,
1449	&dev_attr_optrom_fcode_version,
1450	&dev_attr_optrom_fw_version,
1451	&dev_attr_84xx_fw_version,
1452	&dev_attr_total_isp_aborts,
1453	&dev_attr_mpi_version,
1454	&dev_attr_phy_version,
1455	&dev_attr_flash_block_size,
1456	&dev_attr_vlan_id,
1457	&dev_attr_vn_port_mac_address,
1458	&dev_attr_fabric_param,
1459	&dev_attr_fw_state,
1460	&dev_attr_optrom_gold_fw_version,
1461	&dev_attr_thermal_temp,
1462	NULL,
1463};
1464
1465/* Host attributes. */
1466
1467static void
1468qla2x00_get_host_port_id(struct Scsi_Host *shost)
1469{
1470	scsi_qla_host_t *vha = shost_priv(shost);
1471
1472	fc_host_port_id(shost) = vha->d_id.b.domain << 16 |
1473	    vha->d_id.b.area << 8 | vha->d_id.b.al_pa;
1474}
1475
1476static void
1477qla2x00_get_host_speed(struct Scsi_Host *shost)
1478{
1479	struct qla_hw_data *ha = ((struct scsi_qla_host *)
1480					(shost_priv(shost)))->hw;
1481	u32 speed = FC_PORTSPEED_UNKNOWN;
1482
1483	switch (ha->link_data_rate) {
1484	case PORT_SPEED_1GB:
1485		speed = FC_PORTSPEED_1GBIT;
1486		break;
1487	case PORT_SPEED_2GB:
1488		speed = FC_PORTSPEED_2GBIT;
1489		break;
1490	case PORT_SPEED_4GB:
1491		speed = FC_PORTSPEED_4GBIT;
1492		break;
1493	case PORT_SPEED_8GB:
1494		speed = FC_PORTSPEED_8GBIT;
1495		break;
1496	case PORT_SPEED_10GB:
1497		speed = FC_PORTSPEED_10GBIT;
1498		break;
1499	}
1500	fc_host_speed(shost) = speed;
1501}
1502
1503static void
1504qla2x00_get_host_port_type(struct Scsi_Host *shost)
1505{
1506	scsi_qla_host_t *vha = shost_priv(shost);
1507	uint32_t port_type = FC_PORTTYPE_UNKNOWN;
1508
1509	if (vha->vp_idx) {
1510		fc_host_port_type(shost) = FC_PORTTYPE_NPIV;
1511		return;
1512	}
1513	switch (vha->hw->current_topology) {
1514	case ISP_CFG_NL:
1515		port_type = FC_PORTTYPE_LPORT;
1516		break;
1517	case ISP_CFG_FL:
1518		port_type = FC_PORTTYPE_NLPORT;
1519		break;
1520	case ISP_CFG_N:
1521		port_type = FC_PORTTYPE_PTP;
1522		break;
1523	case ISP_CFG_F:
1524		port_type = FC_PORTTYPE_NPORT;
1525		break;
1526	}
1527	fc_host_port_type(shost) = port_type;
1528}
1529
1530static void
1531qla2x00_get_starget_node_name(struct scsi_target *starget)
1532{
1533	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1534	scsi_qla_host_t *vha = shost_priv(host);
1535	fc_port_t *fcport;
1536	u64 node_name = 0;
1537
1538	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1539		if (fcport->rport &&
1540		    starget->id == fcport->rport->scsi_target_id) {
1541			node_name = wwn_to_u64(fcport->node_name);
1542			break;
1543		}
1544	}
1545
1546	fc_starget_node_name(starget) = node_name;
1547}
1548
1549static void
1550qla2x00_get_starget_port_name(struct scsi_target *starget)
1551{
1552	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1553	scsi_qla_host_t *vha = shost_priv(host);
1554	fc_port_t *fcport;
1555	u64 port_name = 0;
1556
1557	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1558		if (fcport->rport &&
1559		    starget->id == fcport->rport->scsi_target_id) {
1560			port_name = wwn_to_u64(fcport->port_name);
1561			break;
1562		}
1563	}
1564
1565	fc_starget_port_name(starget) = port_name;
1566}
1567
1568static void
1569qla2x00_get_starget_port_id(struct scsi_target *starget)
1570{
1571	struct Scsi_Host *host = dev_to_shost(starget->dev.parent);
1572	scsi_qla_host_t *vha = shost_priv(host);
1573	fc_port_t *fcport;
1574	uint32_t port_id = ~0U;
1575
1576	list_for_each_entry(fcport, &vha->vp_fcports, list) {
1577		if (fcport->rport &&
1578		    starget->id == fcport->rport->scsi_target_id) {
1579			port_id = fcport->d_id.b.domain << 16 |
1580			    fcport->d_id.b.area << 8 | fcport->d_id.b.al_pa;
1581			break;
1582		}
1583	}
1584
1585	fc_starget_port_id(starget) = port_id;
1586}
1587
1588static void
1589qla2x00_set_rport_loss_tmo(struct fc_rport *rport, uint32_t timeout)
1590{
1591	if (timeout)
1592		rport->dev_loss_tmo = timeout;
1593	else
1594		rport->dev_loss_tmo = 1;
1595}
1596
1597static void
1598qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport)
1599{
1600	struct Scsi_Host *host = rport_to_shost(rport);
1601	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1602	unsigned long flags;
1603
1604	if (!fcport)
1605		return;
1606
1607	/* Now that the rport has been deleted, set the fcport state to
1608	   FCS_DEVICE_DEAD */
1609	qla2x00_set_fcport_state(fcport, FCS_DEVICE_DEAD);
1610
1611	/*
1612	 * Transport has effectively 'deleted' the rport, clear
1613	 * all local references.
1614	 */
1615	spin_lock_irqsave(host->host_lock, flags);
1616	fcport->rport = fcport->drport = NULL;
1617	*((fc_port_t **)rport->dd_data) = NULL;
1618	spin_unlock_irqrestore(host->host_lock, flags);
1619
1620	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1621		return;
1622
1623	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1624		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1625		return;
1626	}
1627}
1628
1629static void
1630qla2x00_terminate_rport_io(struct fc_rport *rport)
1631{
1632	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
1633
1634	if (!fcport)
1635		return;
1636
1637	if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags))
1638		return;
1639
1640	if (unlikely(pci_channel_offline(fcport->vha->hw->pdev))) {
1641		qla2x00_abort_all_cmds(fcport->vha, DID_NO_CONNECT << 16);
1642		return;
1643	}
1644	/*
1645	 * At this point all fcport's software-states are cleared.  Perform any
1646	 * final cleanup of firmware resources (PCBs and XCBs).
1647	 */
1648	if (fcport->loop_id != FC_NO_LOOP_ID &&
1649	    !test_bit(UNLOADING, &fcport->vha->dpc_flags))
1650		fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
1651			fcport->loop_id, fcport->d_id.b.domain,
1652			fcport->d_id.b.area, fcport->d_id.b.al_pa);
1653}
1654
1655static int
1656qla2x00_issue_lip(struct Scsi_Host *shost)
1657{
1658	scsi_qla_host_t *vha = shost_priv(shost);
1659
1660	qla2x00_loop_reset(vha);
1661	return 0;
1662}
1663
1664static struct fc_host_statistics *
1665qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1666{
1667	scsi_qla_host_t *vha = shost_priv(shost);
1668	struct qla_hw_data *ha = vha->hw;
1669	struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1670	int rval;
1671	struct link_statistics *stats;
1672	dma_addr_t stats_dma;
1673	struct fc_host_statistics *pfc_host_stat;
1674
1675	pfc_host_stat = &ha->fc_host_stat;
1676	memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1677
1678	if (test_bit(UNLOADING, &vha->dpc_flags))
1679		goto done;
1680
1681	if (unlikely(pci_channel_offline(ha->pdev)))
1682		goto done;
1683
1684	stats = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &stats_dma);
1685	if (stats == NULL) {
1686		ql_log(ql_log_warn, vha, 0x707d,
1687		    "Failed to allocate memory for stats.\n");
1688		goto done;
1689	}
1690	memset(stats, 0, DMA_POOL_SIZE);
1691
1692	rval = QLA_FUNCTION_FAILED;
1693	if (IS_FWI2_CAPABLE(ha)) {
1694		rval = qla24xx_get_isp_stats(base_vha, stats, stats_dma);
1695	} else if (atomic_read(&base_vha->loop_state) == LOOP_READY &&
1696		    !test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) &&
1697		    !test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) &&
1698		    !ha->dpc_active) {
1699		/* Must be in a 'READY' state for statistics retrieval. */
1700		rval = qla2x00_get_link_status(base_vha, base_vha->loop_id,
1701						stats, stats_dma);
1702	}
1703
1704	if (rval != QLA_SUCCESS)
1705		goto done_free;
1706
1707	pfc_host_stat->link_failure_count = stats->link_fail_cnt;
1708	pfc_host_stat->loss_of_sync_count = stats->loss_sync_cnt;
1709	pfc_host_stat->loss_of_signal_count = stats->loss_sig_cnt;
1710	pfc_host_stat->prim_seq_protocol_err_count = stats->prim_seq_err_cnt;
1711	pfc_host_stat->invalid_tx_word_count = stats->inval_xmit_word_cnt;
1712	pfc_host_stat->invalid_crc_count = stats->inval_crc_cnt;
1713	if (IS_FWI2_CAPABLE(ha)) {
1714		pfc_host_stat->lip_count = stats->lip_cnt;
1715		pfc_host_stat->tx_frames = stats->tx_frames;
1716		pfc_host_stat->rx_frames = stats->rx_frames;
1717		pfc_host_stat->dumped_frames = stats->dumped_frames;
1718		pfc_host_stat->nos_count = stats->nos_rcvd;
1719	}
1720	pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20;
1721	pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20;
1722
1723done_free:
1724        dma_pool_free(ha->s_dma_pool, stats, stats_dma);
1725done:
1726	return pfc_host_stat;
1727}
1728
1729static void
1730qla2x00_get_host_symbolic_name(struct Scsi_Host *shost)
1731{
1732	scsi_qla_host_t *vha = shost_priv(shost);
1733
1734	qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
1735}
1736
1737static void
1738qla2x00_set_host_system_hostname(struct Scsi_Host *shost)
1739{
1740	scsi_qla_host_t *vha = shost_priv(shost);
1741
1742	set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
1743}
1744
1745static void
1746qla2x00_get_host_fabric_name(struct Scsi_Host *shost)
1747{
1748	scsi_qla_host_t *vha = shost_priv(shost);
1749	uint8_t node_name[WWN_SIZE] = { 0xFF, 0xFF, 0xFF, 0xFF, \
1750		0xFF, 0xFF, 0xFF, 0xFF};
1751	u64 fabric_name = wwn_to_u64(node_name);
1752
1753	if (vha->device_flags & SWITCH_FOUND)
1754		fabric_name = wwn_to_u64(vha->fabric_node_name);
1755
1756	fc_host_fabric_name(shost) = fabric_name;
1757}
1758
1759static void
1760qla2x00_get_host_port_state(struct Scsi_Host *shost)
1761{
1762	scsi_qla_host_t *vha = shost_priv(shost);
1763	struct scsi_qla_host *base_vha = pci_get_drvdata(vha->hw->pdev);
1764
1765	if (!base_vha->flags.online) {
1766		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
1767		return;
1768	}
1769
1770	switch (atomic_read(&base_vha->loop_state)) {
1771	case LOOP_UPDATE:
1772		fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
1773		break;
1774	case LOOP_DOWN:
1775		if (test_bit(LOOP_RESYNC_NEEDED, &base_vha->dpc_flags))
1776			fc_host_port_state(shost) = FC_PORTSTATE_DIAGNOSTICS;
1777		else
1778			fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1779		break;
1780	case LOOP_DEAD:
1781		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
1782		break;
1783	case LOOP_READY:
1784		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
1785		break;
1786	default:
1787		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
1788		break;
1789	}
1790}
1791
1792static int
1793qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1794{
1795	int	ret = 0;
1796	uint8_t	qos = 0;
1797	scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
1798	scsi_qla_host_t *vha = NULL;
1799	struct qla_hw_data *ha = base_vha->hw;
1800	uint16_t options = 0;
1801	int	cnt;
1802	struct req_que *req = ha->req_q_map[0];
1803
1804	ret = qla24xx_vport_create_req_sanity_check(fc_vport);
1805	if (ret) {
1806		ql_log(ql_log_warn, vha, 0x707e,
1807		    "Vport sanity check failed, status %x\n", ret);
1808		return (ret);
1809	}
1810
1811	vha = qla24xx_create_vhost(fc_vport);
1812	if (vha == NULL) {
1813		ql_log(ql_log_warn, vha, 0x707f, "Vport create host failed.\n");
1814		return FC_VPORT_FAILED;
1815	}
1816	if (disable) {
1817		atomic_set(&vha->vp_state, VP_OFFLINE);
1818		fc_vport_set_state(fc_vport, FC_VPORT_DISABLED);
1819	} else
1820		atomic_set(&vha->vp_state, VP_FAILED);
1821
1822	/* ready to create vport */
1823	ql_log(ql_log_info, vha, 0x7080,
1824	    "VP entry id %d assigned.\n", vha->vp_idx);
1825
1826	/* initialized vport states */
1827	atomic_set(&vha->loop_state, LOOP_DOWN);
1828	vha->vp_err_state=  VP_ERR_PORTDWN;
1829	vha->vp_prev_err_state=  VP_ERR_UNKWN;
1830	/* Check if physical ha port is Up */
1831	if (atomic_read(&base_vha->loop_state) == LOOP_DOWN ||
1832	    atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
1833		/* Don't retry or attempt login of this virtual port */
1834		ql_dbg(ql_dbg_user, vha, 0x7081,
1835		    "Vport loop state is not UP.\n");
1836		atomic_set(&vha->loop_state, LOOP_DEAD);
1837		if (!disable)
1838			fc_vport_set_state(fc_vport, FC_VPORT_LINKDOWN);
1839	}
1840
1841	if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) {
1842		if (ha->fw_attributes & BIT_4) {
1843			int prot = 0;
1844			vha->flags.difdix_supported = 1;
1845			ql_dbg(ql_dbg_user, vha, 0x7082,
1846			    "Registered for DIF/DIX type 1 and 3 protection.\n");
1847			if (ql2xenabledif == 1)
1848				prot = SHOST_DIX_TYPE0_PROTECTION;
1849			scsi_host_set_prot(vha->host,
1850			    prot | SHOST_DIF_TYPE1_PROTECTION
1851			    | SHOST_DIF_TYPE2_PROTECTION
1852			    | SHOST_DIF_TYPE3_PROTECTION
1853			    | SHOST_DIX_TYPE1_PROTECTION
1854			    | SHOST_DIX_TYPE2_PROTECTION
1855			    | SHOST_DIX_TYPE3_PROTECTION);
1856			scsi_host_set_guard(vha->host, SHOST_DIX_GUARD_CRC);
1857		} else
1858			vha->flags.difdix_supported = 0;
1859	}
1860
1861	if (scsi_add_host_with_dma(vha->host, &fc_vport->dev,
1862				   &ha->pdev->dev)) {
1863		ql_dbg(ql_dbg_user, vha, 0x7083,
1864		    "scsi_add_host failure for VP[%d].\n", vha->vp_idx);
1865		goto vport_create_failed_2;
1866	}
1867
1868	/* initialize attributes */
1869	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
1870	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1871	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1872	fc_host_supported_classes(vha->host) =
1873		fc_host_supported_classes(base_vha->host);
1874	fc_host_supported_speeds(vha->host) =
1875		fc_host_supported_speeds(base_vha->host);
1876
1877	qla24xx_vport_disable(fc_vport, disable);
1878
1879	if (ha->flags.cpu_affinity_enabled) {
1880		req = ha->req_q_map[1];
1881		ql_dbg(ql_dbg_multiq, vha, 0xc000,
1882		    "Request queue %p attached with "
1883		    "VP[%d], cpu affinity =%d\n",
1884		    req, vha->vp_idx, ha->flags.cpu_affinity_enabled);
1885		goto vport_queue;
1886	} else if (ql2xmaxqueues == 1 || !ha->npiv_info)
1887		goto vport_queue;
1888	/* Create a request queue in QoS mode for the vport */
1889	for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
1890		if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
1891			&& memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
1892					8) == 0) {
1893			qos = ha->npiv_info[cnt].q_qos;
1894			break;
1895		}
1896	}
1897	if (qos) {
1898		ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
1899			qos);
1900		if (!ret)
1901			ql_log(ql_log_warn, vha, 0x7084,
1902			    "Can't create request queue for VP[%d]\n",
1903			    vha->vp_idx);
1904		else {
1905			ql_dbg(ql_dbg_multiq, vha, 0xc001,
1906			    "Request Que:%d Q0s: %d) created for VP[%d]\n",
1907			    ret, qos, vha->vp_idx);
1908			ql_dbg(ql_dbg_user, vha, 0x7085,
1909			    "Request Que:%d Q0s: %d) created for VP[%d]\n",
1910			    ret, qos, vha->vp_idx);
1911			req = ha->req_q_map[ret];
1912		}
1913	}
1914
1915vport_queue:
1916	vha->req = req;
1917	return 0;
1918
1919vport_create_failed_2:
1920	qla24xx_disable_vp(vha);
1921	qla24xx_deallocate_vp_id(vha);
1922	scsi_host_put(vha->host);
1923	return FC_VPORT_FAILED;
1924}
1925
1926static int
1927qla24xx_vport_delete(struct fc_vport *fc_vport)
1928{
1929	scsi_qla_host_t *vha = fc_vport->dd_data;
1930	struct qla_hw_data *ha = vha->hw;
1931	uint16_t id = vha->vp_idx;
1932
1933	while (test_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags) ||
1934	    test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags))
1935		msleep(1000);
1936
1937	qla24xx_disable_vp(vha);
1938
1939	vha->flags.delete_progress = 1;
1940
1941	fc_remove_host(vha->host);
1942
1943	scsi_remove_host(vha->host);
1944
1945	/* Allow timer to run to drain queued items, when removing vp */
1946	qla24xx_deallocate_vp_id(vha);
1947
1948	if (vha->timer_active) {
1949		qla2x00_vp_stop_timer(vha);
1950		ql_dbg(ql_dbg_user, vha, 0x7086,
1951		    "Timer for the VP[%d] has stopped\n", vha->vp_idx);
1952	}
1953
1954	/* No pending activities shall be there on the vha now */
1955	if (ql2xextended_error_logging & ql_dbg_user)
1956		msleep(random32()%10);  /* Just to see if something falls on
1957					* the net we have placed below */
1958
1959	BUG_ON(atomic_read(&vha->vref_count));
1960
1961	qla2x00_free_fcports(vha);
1962
1963	mutex_lock(&ha->vport_lock);
1964	ha->cur_vport_count--;
1965	clear_bit(vha->vp_idx, ha->vp_idx_map);
1966	mutex_unlock(&ha->vport_lock);
1967
1968	if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
1969		if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
1970			ql_log(ql_log_warn, vha, 0x7087,
1971			    "Queue delete failed.\n");
1972	}
1973
1974	scsi_host_put(vha->host);
1975	ql_log(ql_log_info, vha, 0x7088, "VP[%d] deleted.\n", id);
1976	return 0;
1977}
1978
1979static int
1980qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
1981{
1982	scsi_qla_host_t *vha = fc_vport->dd_data;
1983
1984	if (disable)
1985		qla24xx_disable_vp(vha);
1986	else
1987		qla24xx_enable_vp(vha);
1988
1989	return 0;
1990}
1991
1992struct fc_function_template qla2xxx_transport_functions = {
1993
1994	.show_host_node_name = 1,
1995	.show_host_port_name = 1,
1996	.show_host_supported_classes = 1,
1997	.show_host_supported_speeds = 1,
1998
1999	.get_host_port_id = qla2x00_get_host_port_id,
2000	.show_host_port_id = 1,
2001	.get_host_speed = qla2x00_get_host_speed,
2002	.show_host_speed = 1,
2003	.get_host_port_type = qla2x00_get_host_port_type,
2004	.show_host_port_type = 1,
2005	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2006	.show_host_symbolic_name = 1,
2007	.set_host_system_hostname = qla2x00_set_host_system_hostname,
2008	.show_host_system_hostname = 1,
2009	.get_host_fabric_name = qla2x00_get_host_fabric_name,
2010	.show_host_fabric_name = 1,
2011	.get_host_port_state = qla2x00_get_host_port_state,
2012	.show_host_port_state = 1,
2013
2014	.dd_fcrport_size = sizeof(struct fc_port *),
2015	.show_rport_supported_classes = 1,
2016
2017	.get_starget_node_name = qla2x00_get_starget_node_name,
2018	.show_starget_node_name = 1,
2019	.get_starget_port_name = qla2x00_get_starget_port_name,
2020	.show_starget_port_name = 1,
2021	.get_starget_port_id  = qla2x00_get_starget_port_id,
2022	.show_starget_port_id = 1,
2023
2024	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2025	.show_rport_dev_loss_tmo = 1,
2026
2027	.issue_fc_host_lip = qla2x00_issue_lip,
2028	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2029	.terminate_rport_io = qla2x00_terminate_rport_io,
2030	.get_fc_host_stats = qla2x00_get_fc_host_stats,
2031
2032	.vport_create = qla24xx_vport_create,
2033	.vport_disable = qla24xx_vport_disable,
2034	.vport_delete = qla24xx_vport_delete,
2035	.bsg_request = qla24xx_bsg_request,
2036	.bsg_timeout = qla24xx_bsg_timeout,
2037};
2038
2039struct fc_function_template qla2xxx_transport_vport_functions = {
2040
2041	.show_host_node_name = 1,
2042	.show_host_port_name = 1,
2043	.show_host_supported_classes = 1,
2044
2045	.get_host_port_id = qla2x00_get_host_port_id,
2046	.show_host_port_id = 1,
2047	.get_host_speed = qla2x00_get_host_speed,
2048	.show_host_speed = 1,
2049	.get_host_port_type = qla2x00_get_host_port_type,
2050	.show_host_port_type = 1,
2051	.get_host_symbolic_name = qla2x00_get_host_symbolic_name,
2052	.show_host_symbolic_name = 1,
2053	.set_host_system_hostname = qla2x00_set_host_system_hostname,
2054	.show_host_system_hostname = 1,
2055	.get_host_fabric_name = qla2x00_get_host_fabric_name,
2056	.show_host_fabric_name = 1,
2057	.get_host_port_state = qla2x00_get_host_port_state,
2058	.show_host_port_state = 1,
2059
2060	.dd_fcrport_size = sizeof(struct fc_port *),
2061	.show_rport_supported_classes = 1,
2062
2063	.get_starget_node_name = qla2x00_get_starget_node_name,
2064	.show_starget_node_name = 1,
2065	.get_starget_port_name = qla2x00_get_starget_port_name,
2066	.show_starget_port_name = 1,
2067	.get_starget_port_id  = qla2x00_get_starget_port_id,
2068	.show_starget_port_id = 1,
2069
2070	.set_rport_dev_loss_tmo = qla2x00_set_rport_loss_tmo,
2071	.show_rport_dev_loss_tmo = 1,
2072
2073	.issue_fc_host_lip = qla2x00_issue_lip,
2074	.dev_loss_tmo_callbk = qla2x00_dev_loss_tmo_callbk,
2075	.terminate_rport_io = qla2x00_terminate_rport_io,
2076	.get_fc_host_stats = qla2x00_get_fc_host_stats,
2077	.bsg_request = qla24xx_bsg_request,
2078	.bsg_timeout = qla24xx_bsg_timeout,
2079};
2080
2081void
2082qla2x00_init_host_attr(scsi_qla_host_t *vha)
2083{
2084	struct qla_hw_data *ha = vha->hw;
2085	u32 speed = FC_PORTSPEED_UNKNOWN;
2086
2087	fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
2088	fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
2089	fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
2090	fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
2091	fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
2092	fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
2093
2094	if (IS_QLA8XXX_TYPE(ha))
2095		speed = FC_PORTSPEED_10GBIT;
2096	else if (IS_QLA25XX(ha))
2097		speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
2098		    FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2099	else if (IS_QLA24XX_TYPE(ha))
2100		speed = FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
2101		    FC_PORTSPEED_1GBIT;
2102	else if (IS_QLA23XX(ha))
2103		speed = FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
2104	else
2105		speed = FC_PORTSPEED_1GBIT;
2106	fc_host_supported_speeds(vha->host) = speed;
2107}
2108