1/*
2 * Copyright (C) 2005 - 2011 Emulex
3 * All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation.  The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
9 *
10 * Contact Information:
11 * linux-drivers@emulex.com
12 *
13 * Emulex
14 * 3333 Susan Street
15 * Costa Mesa, CA 92626
16 */
17
18#include "be.h"
19#include "be_cmds.h"
20
21/* Must be a power of 2 or else MODULO will BUG_ON */
22static int be_get_temp_freq = 64;
23
24static inline void *embedded_payload(struct be_mcc_wrb *wrb)
25{
26	return wrb->payload.embedded_payload;
27}
28
29static void be_mcc_notify(struct be_adapter *adapter)
30{
31	struct be_queue_info *mccq = &adapter->mcc_obj.q;
32	u32 val = 0;
33
34	if (be_error(adapter))
35		return;
36
37	val |= mccq->id & DB_MCCQ_RING_ID_MASK;
38	val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
39
40	wmb();
41	iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
42}
43
44/* To check if valid bit is set, check the entire word as we don't know
45 * the endianness of the data (old entry is host endian while a new entry is
46 * little endian) */
47static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
48{
49	if (compl->flags != 0) {
50		compl->flags = le32_to_cpu(compl->flags);
51		BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
52		return true;
53	} else {
54		return false;
55	}
56}
57
58/* Need to reset the entire word that houses the valid bit */
59static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
60{
61	compl->flags = 0;
62}
63
64static int be_mcc_compl_process(struct be_adapter *adapter,
65	struct be_mcc_compl *compl)
66{
67	u16 compl_status, extd_status;
68
69	/* Just swap the status to host endian; mcc tag is opaquely copied
70	 * from mcc_wrb */
71	be_dws_le_to_cpu(compl, 4);
72
73	compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
74				CQE_STATUS_COMPL_MASK;
75
76	if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) ||
77		(compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) &&
78		(compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
79		adapter->flash_status = compl_status;
80		complete(&adapter->flash_compl);
81	}
82
83	if (compl_status == MCC_STATUS_SUCCESS) {
84		if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) ||
85			 (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) &&
86			(compl->tag1 == CMD_SUBSYSTEM_ETH)) {
87			be_parse_stats(adapter);
88			adapter->stats_cmd_sent = false;
89		}
90		if (compl->tag0 ==
91				OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) {
92			struct be_mcc_wrb *mcc_wrb =
93				queue_index_node(&adapter->mcc_obj.q,
94						compl->tag1);
95			struct be_cmd_resp_get_cntl_addnl_attribs *resp =
96				embedded_payload(mcc_wrb);
97			adapter->drv_stats.be_on_die_temperature =
98				resp->on_die_temperature;
99		}
100	} else {
101		if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
102			be_get_temp_freq = 0;
103
104		if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
105			compl_status == MCC_STATUS_ILLEGAL_REQUEST)
106			goto done;
107
108		if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
109			dev_warn(&adapter->pdev->dev, "This domain(VM) is not "
110				"permitted to execute this cmd (opcode %d)\n",
111				compl->tag0);
112		} else {
113			extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
114					CQE_STATUS_EXTD_MASK;
115			dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:"
116				"status %d, extd-status %d\n",
117				compl->tag0, compl_status, extd_status);
118		}
119	}
120done:
121	return compl_status;
122}
123
124/* Link state evt is a string of bytes; no need for endian swapping */
125static void be_async_link_state_process(struct be_adapter *adapter,
126		struct be_async_event_link_state *evt)
127{
128	/* When link status changes, link speed must be re-queried from FW */
129	adapter->link_speed = -1;
130
131	/* For the initial link status do not rely on the ASYNC event as
132	 * it may not be received in some cases.
133	 */
134	if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
135		be_link_status_update(adapter, evt->port_link_status);
136}
137
138/* Grp5 CoS Priority evt */
139static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
140		struct be_async_event_grp5_cos_priority *evt)
141{
142	if (evt->valid) {
143		adapter->vlan_prio_bmap = evt->available_priority_bmap;
144		adapter->recommended_prio &= ~VLAN_PRIO_MASK;
145		adapter->recommended_prio =
146			evt->reco_default_priority << VLAN_PRIO_SHIFT;
147	}
148}
149
150/* Grp5 QOS Speed evt */
151static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
152		struct be_async_event_grp5_qos_link_speed *evt)
153{
154	if (evt->physical_port == adapter->port_num) {
155		/* qos_link_speed is in units of 10 Mbps */
156		adapter->link_speed = evt->qos_link_speed * 10;
157	}
158}
159
160/*Grp5 PVID evt*/
161static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
162		struct be_async_event_grp5_pvid_state *evt)
163{
164	if (evt->enabled)
165		adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
166	else
167		adapter->pvid = 0;
168}
169
170static void be_async_grp5_evt_process(struct be_adapter *adapter,
171		u32 trailer, struct be_mcc_compl *evt)
172{
173	u8 event_type = 0;
174
175	event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
176		ASYNC_TRAILER_EVENT_TYPE_MASK;
177
178	switch (event_type) {
179	case ASYNC_EVENT_COS_PRIORITY:
180		be_async_grp5_cos_priority_process(adapter,
181		(struct be_async_event_grp5_cos_priority *)evt);
182	break;
183	case ASYNC_EVENT_QOS_SPEED:
184		be_async_grp5_qos_speed_process(adapter,
185		(struct be_async_event_grp5_qos_link_speed *)evt);
186	break;
187	case ASYNC_EVENT_PVID_STATE:
188		be_async_grp5_pvid_state_process(adapter,
189		(struct be_async_event_grp5_pvid_state *)evt);
190	break;
191	default:
192		dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
193		break;
194	}
195}
196
197static inline bool is_link_state_evt(u32 trailer)
198{
199	return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
200		ASYNC_TRAILER_EVENT_CODE_MASK) ==
201				ASYNC_EVENT_CODE_LINK_STATE;
202}
203
204static inline bool is_grp5_evt(u32 trailer)
205{
206	return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
207		ASYNC_TRAILER_EVENT_CODE_MASK) ==
208				ASYNC_EVENT_CODE_GRP_5);
209}
210
211static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
212{
213	struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
214	struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
215
216	if (be_mcc_compl_is_new(compl)) {
217		queue_tail_inc(mcc_cq);
218		return compl;
219	}
220	return NULL;
221}
222
223void be_async_mcc_enable(struct be_adapter *adapter)
224{
225	spin_lock_bh(&adapter->mcc_cq_lock);
226
227	be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
228	adapter->mcc_obj.rearm_cq = true;
229
230	spin_unlock_bh(&adapter->mcc_cq_lock);
231}
232
233void be_async_mcc_disable(struct be_adapter *adapter)
234{
235	adapter->mcc_obj.rearm_cq = false;
236}
237
238int be_process_mcc(struct be_adapter *adapter)
239{
240	struct be_mcc_compl *compl;
241	int num = 0, status = 0;
242	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
243
244	spin_lock_bh(&adapter->mcc_cq_lock);
245	while ((compl = be_mcc_compl_get(adapter))) {
246		if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
247			/* Interpret flags as an async trailer */
248			if (is_link_state_evt(compl->flags))
249				be_async_link_state_process(adapter,
250				(struct be_async_event_link_state *) compl);
251			else if (is_grp5_evt(compl->flags))
252				be_async_grp5_evt_process(adapter,
253				compl->flags, compl);
254		} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
255				status = be_mcc_compl_process(adapter, compl);
256				atomic_dec(&mcc_obj->q.used);
257		}
258		be_mcc_compl_use(compl);
259		num++;
260	}
261
262	if (num)
263		be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
264
265	spin_unlock_bh(&adapter->mcc_cq_lock);
266	return status;
267}
268
269/* Wait till no more pending mcc requests are present */
270static int be_mcc_wait_compl(struct be_adapter *adapter)
271{
272#define mcc_timeout		120000 /* 12s timeout */
273	int i, status = 0;
274	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
275
276	for (i = 0; i < mcc_timeout; i++) {
277		if (be_error(adapter))
278			return -EIO;
279
280		status = be_process_mcc(adapter);
281
282		if (atomic_read(&mcc_obj->q.used) == 0)
283			break;
284		udelay(100);
285	}
286	if (i == mcc_timeout) {
287		dev_err(&adapter->pdev->dev, "FW not responding\n");
288		adapter->fw_timeout = true;
289		return -1;
290	}
291	return status;
292}
293
294/* Notify MCC requests and wait for completion */
295static int be_mcc_notify_wait(struct be_adapter *adapter)
296{
297	be_mcc_notify(adapter);
298	return be_mcc_wait_compl(adapter);
299}
300
301static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
302{
303	int msecs = 0;
304	u32 ready;
305
306	do {
307		if (be_error(adapter))
308			return -EIO;
309
310		ready = ioread32(db);
311		if (ready == 0xffffffff)
312			return -1;
313
314		ready &= MPU_MAILBOX_DB_RDY_MASK;
315		if (ready)
316			break;
317
318		if (msecs > 4000) {
319			dev_err(&adapter->pdev->dev, "FW not responding\n");
320			adapter->fw_timeout = true;
321			be_detect_dump_ue(adapter);
322			return -1;
323		}
324
325		msleep(1);
326		msecs++;
327	} while (true);
328
329	return 0;
330}
331
332/*
333 * Insert the mailbox address into the doorbell in two steps
334 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
335 */
336static int be_mbox_notify_wait(struct be_adapter *adapter)
337{
338	int status;
339	u32 val = 0;
340	void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
341	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
342	struct be_mcc_mailbox *mbox = mbox_mem->va;
343	struct be_mcc_compl *compl = &mbox->compl;
344
345	/* wait for ready to be set */
346	status = be_mbox_db_ready_wait(adapter, db);
347	if (status != 0)
348		return status;
349
350	val |= MPU_MAILBOX_DB_HI_MASK;
351	/* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
352	val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
353	iowrite32(val, db);
354
355	/* wait for ready to be set */
356	status = be_mbox_db_ready_wait(adapter, db);
357	if (status != 0)
358		return status;
359
360	val = 0;
361	/* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
362	val |= (u32)(mbox_mem->dma >> 4) << 2;
363	iowrite32(val, db);
364
365	status = be_mbox_db_ready_wait(adapter, db);
366	if (status != 0)
367		return status;
368
369	/* A cq entry has been made now */
370	if (be_mcc_compl_is_new(compl)) {
371		status = be_mcc_compl_process(adapter, &mbox->compl);
372		be_mcc_compl_use(compl);
373		if (status)
374			return status;
375	} else {
376		dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
377		return -1;
378	}
379	return 0;
380}
381
382static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
383{
384	u32 sem;
385
386	if (lancer_chip(adapter))
387		sem  = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
388	else
389		sem  = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
390
391	*stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
392	if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
393		return -1;
394	else
395		return 0;
396}
397
398int be_cmd_POST(struct be_adapter *adapter)
399{
400	u16 stage;
401	int status, timeout = 0;
402	struct device *dev = &adapter->pdev->dev;
403
404	do {
405		status = be_POST_stage_get(adapter, &stage);
406		if (status) {
407			dev_err(dev, "POST error; stage=0x%x\n", stage);
408			return -1;
409		} else if (stage != POST_STAGE_ARMFW_RDY) {
410			if (msleep_interruptible(2000)) {
411				dev_err(dev, "Waiting for POST aborted\n");
412				return -EINTR;
413			}
414			timeout += 2;
415		} else {
416			return 0;
417		}
418	} while (timeout < 60);
419
420	dev_err(dev, "POST timeout; stage=0x%x\n", stage);
421	return -1;
422}
423
424
425static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
426{
427	return &wrb->payload.sgl[0];
428}
429
430
431/* Don't touch the hdr after it's prepared */
432/* mem will be NULL for embedded commands */
433static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
434				u8 subsystem, u8 opcode, int cmd_len,
435				struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
436{
437	struct be_sge *sge;
438
439	req_hdr->opcode = opcode;
440	req_hdr->subsystem = subsystem;
441	req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
442	req_hdr->version = 0;
443
444	wrb->tag0 = opcode;
445	wrb->tag1 = subsystem;
446	wrb->payload_length = cmd_len;
447	if (mem) {
448		wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
449			MCC_WRB_SGE_CNT_SHIFT;
450		sge = nonembedded_sgl(wrb);
451		sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
452		sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
453		sge->len = cpu_to_le32(mem->size);
454	} else
455		wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
456	be_dws_cpu_to_le(wrb, 8);
457}
458
459static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
460			struct be_dma_mem *mem)
461{
462	int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
463	u64 dma = (u64)mem->dma;
464
465	for (i = 0; i < buf_pages; i++) {
466		pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
467		pages[i].hi = cpu_to_le32(upper_32_bits(dma));
468		dma += PAGE_SIZE_4K;
469	}
470}
471
472/* Converts interrupt delay in microseconds to multiplier value */
473static u32 eq_delay_to_mult(u32 usec_delay)
474{
475#define MAX_INTR_RATE			651042
476	const u32 round = 10;
477	u32 multiplier;
478
479	if (usec_delay == 0)
480		multiplier = 0;
481	else {
482		u32 interrupt_rate = 1000000 / usec_delay;
483		/* Max delay, corresponding to the lowest interrupt rate */
484		if (interrupt_rate == 0)
485			multiplier = 1023;
486		else {
487			multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
488			multiplier /= interrupt_rate;
489			/* Round the multiplier to the closest value.*/
490			multiplier = (multiplier + round/2) / round;
491			multiplier = min(multiplier, (u32)1023);
492		}
493	}
494	return multiplier;
495}
496
497static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
498{
499	struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
500	struct be_mcc_wrb *wrb
501		= &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
502	memset(wrb, 0, sizeof(*wrb));
503	return wrb;
504}
505
506static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
507{
508	struct be_queue_info *mccq = &adapter->mcc_obj.q;
509	struct be_mcc_wrb *wrb;
510
511	if (atomic_read(&mccq->used) >= mccq->len) {
512		dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
513		return NULL;
514	}
515
516	wrb = queue_head_node(mccq);
517	queue_head_inc(mccq);
518	atomic_inc(&mccq->used);
519	memset(wrb, 0, sizeof(*wrb));
520	return wrb;
521}
522
523/* Tell fw we're about to start firing cmds by writing a
524 * special pattern across the wrb hdr; uses mbox
525 */
526int be_cmd_fw_init(struct be_adapter *adapter)
527{
528	u8 *wrb;
529	int status;
530
531	if (mutex_lock_interruptible(&adapter->mbox_lock))
532		return -1;
533
534	wrb = (u8 *)wrb_from_mbox(adapter);
535	*wrb++ = 0xFF;
536	*wrb++ = 0x12;
537	*wrb++ = 0x34;
538	*wrb++ = 0xFF;
539	*wrb++ = 0xFF;
540	*wrb++ = 0x56;
541	*wrb++ = 0x78;
542	*wrb = 0xFF;
543
544	status = be_mbox_notify_wait(adapter);
545
546	mutex_unlock(&adapter->mbox_lock);
547	return status;
548}
549
550/* Tell fw we're done with firing cmds by writing a
551 * special pattern across the wrb hdr; uses mbox
552 */
553int be_cmd_fw_clean(struct be_adapter *adapter)
554{
555	u8 *wrb;
556	int status;
557
558	if (mutex_lock_interruptible(&adapter->mbox_lock))
559		return -1;
560
561	wrb = (u8 *)wrb_from_mbox(adapter);
562	*wrb++ = 0xFF;
563	*wrb++ = 0xAA;
564	*wrb++ = 0xBB;
565	*wrb++ = 0xFF;
566	*wrb++ = 0xFF;
567	*wrb++ = 0xCC;
568	*wrb++ = 0xDD;
569	*wrb = 0xFF;
570
571	status = be_mbox_notify_wait(adapter);
572
573	mutex_unlock(&adapter->mbox_lock);
574	return status;
575}
576int be_cmd_eq_create(struct be_adapter *adapter,
577		struct be_queue_info *eq, int eq_delay)
578{
579	struct be_mcc_wrb *wrb;
580	struct be_cmd_req_eq_create *req;
581	struct be_dma_mem *q_mem = &eq->dma_mem;
582	int status;
583
584	if (mutex_lock_interruptible(&adapter->mbox_lock))
585		return -1;
586
587	wrb = wrb_from_mbox(adapter);
588	req = embedded_payload(wrb);
589
590	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
591		OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
592
593	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
594
595	AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
596	/* 4byte eqe*/
597	AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
598	AMAP_SET_BITS(struct amap_eq_context, count, req->context,
599			__ilog2_u32(eq->len/256));
600	AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
601			eq_delay_to_mult(eq_delay));
602	be_dws_cpu_to_le(req->context, sizeof(req->context));
603
604	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
605
606	status = be_mbox_notify_wait(adapter);
607	if (!status) {
608		struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
609		eq->id = le16_to_cpu(resp->eq_id);
610		eq->created = true;
611	}
612
613	mutex_unlock(&adapter->mbox_lock);
614	return status;
615}
616
617/* Use MCC */
618int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
619			u8 type, bool permanent, u32 if_handle, u32 pmac_id)
620{
621	struct be_mcc_wrb *wrb;
622	struct be_cmd_req_mac_query *req;
623	int status;
624
625	spin_lock_bh(&adapter->mcc_lock);
626
627	wrb = wrb_from_mccq(adapter);
628	if (!wrb) {
629		status = -EBUSY;
630		goto err;
631	}
632	req = embedded_payload(wrb);
633
634	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
635		OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
636	req->type = type;
637	if (permanent) {
638		req->permanent = 1;
639	} else {
640		req->if_id = cpu_to_le16((u16) if_handle);
641		req->pmac_id = cpu_to_le32(pmac_id);
642		req->permanent = 0;
643	}
644
645	status = be_mcc_notify_wait(adapter);
646	if (!status) {
647		struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
648		memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
649	}
650
651err:
652	spin_unlock_bh(&adapter->mcc_lock);
653	return status;
654}
655
656/* Uses synchronous MCCQ */
657int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
658		u32 if_id, u32 *pmac_id, u32 domain)
659{
660	struct be_mcc_wrb *wrb;
661	struct be_cmd_req_pmac_add *req;
662	int status;
663
664	spin_lock_bh(&adapter->mcc_lock);
665
666	wrb = wrb_from_mccq(adapter);
667	if (!wrb) {
668		status = -EBUSY;
669		goto err;
670	}
671	req = embedded_payload(wrb);
672
673	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
674		OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
675
676	req->hdr.domain = domain;
677	req->if_id = cpu_to_le32(if_id);
678	memcpy(req->mac_address, mac_addr, ETH_ALEN);
679
680	status = be_mcc_notify_wait(adapter);
681	if (!status) {
682		struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
683		*pmac_id = le32_to_cpu(resp->pmac_id);
684	}
685
686err:
687	spin_unlock_bh(&adapter->mcc_lock);
688
689	 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
690		status = -EPERM;
691
692	return status;
693}
694
695/* Uses synchronous MCCQ */
696int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
697{
698	struct be_mcc_wrb *wrb;
699	struct be_cmd_req_pmac_del *req;
700	int status;
701
702	if (pmac_id == -1)
703		return 0;
704
705	spin_lock_bh(&adapter->mcc_lock);
706
707	wrb = wrb_from_mccq(adapter);
708	if (!wrb) {
709		status = -EBUSY;
710		goto err;
711	}
712	req = embedded_payload(wrb);
713
714	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
715		OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
716
717	req->hdr.domain = dom;
718	req->if_id = cpu_to_le32(if_id);
719	req->pmac_id = cpu_to_le32(pmac_id);
720
721	status = be_mcc_notify_wait(adapter);
722
723err:
724	spin_unlock_bh(&adapter->mcc_lock);
725	return status;
726}
727
728/* Uses Mbox */
729int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
730		struct be_queue_info *eq, bool no_delay, int coalesce_wm)
731{
732	struct be_mcc_wrb *wrb;
733	struct be_cmd_req_cq_create *req;
734	struct be_dma_mem *q_mem = &cq->dma_mem;
735	void *ctxt;
736	int status;
737
738	if (mutex_lock_interruptible(&adapter->mbox_lock))
739		return -1;
740
741	wrb = wrb_from_mbox(adapter);
742	req = embedded_payload(wrb);
743	ctxt = &req->context;
744
745	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
746		OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
747
748	req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
749	if (lancer_chip(adapter)) {
750		req->hdr.version = 2;
751		req->page_size = 1; /* 1 for 4K */
752		AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
753								no_delay);
754		AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
755						__ilog2_u32(cq->len/256));
756		AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
757		AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
758								ctxt, 1);
759		AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
760								ctxt, eq->id);
761	} else {
762		AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
763								coalesce_wm);
764		AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
765								ctxt, no_delay);
766		AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
767						__ilog2_u32(cq->len/256));
768		AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
769		AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
770		AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
771	}
772
773	be_dws_cpu_to_le(ctxt, sizeof(req->context));
774
775	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
776
777	status = be_mbox_notify_wait(adapter);
778	if (!status) {
779		struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
780		cq->id = le16_to_cpu(resp->cq_id);
781		cq->created = true;
782	}
783
784	mutex_unlock(&adapter->mbox_lock);
785
786	return status;
787}
788
789static u32 be_encoded_q_len(int q_len)
790{
791	u32 len_encoded = fls(q_len); /* log2(len) + 1 */
792	if (len_encoded == 16)
793		len_encoded = 0;
794	return len_encoded;
795}
796
797int be_cmd_mccq_ext_create(struct be_adapter *adapter,
798			struct be_queue_info *mccq,
799			struct be_queue_info *cq)
800{
801	struct be_mcc_wrb *wrb;
802	struct be_cmd_req_mcc_ext_create *req;
803	struct be_dma_mem *q_mem = &mccq->dma_mem;
804	void *ctxt;
805	int status;
806
807	if (mutex_lock_interruptible(&adapter->mbox_lock))
808		return -1;
809
810	wrb = wrb_from_mbox(adapter);
811	req = embedded_payload(wrb);
812	ctxt = &req->context;
813
814	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
815			OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
816
817	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
818	if (lancer_chip(adapter)) {
819		req->hdr.version = 1;
820		req->cq_id = cpu_to_le16(cq->id);
821
822		AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
823						be_encoded_q_len(mccq->len));
824		AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
825		AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
826								ctxt, cq->id);
827		AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
828								 ctxt, 1);
829
830	} else {
831		AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
832		AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
833						be_encoded_q_len(mccq->len));
834		AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
835	}
836
837	/* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
838	req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
839	be_dws_cpu_to_le(ctxt, sizeof(req->context));
840
841	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
842
843	status = be_mbox_notify_wait(adapter);
844	if (!status) {
845		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
846		mccq->id = le16_to_cpu(resp->id);
847		mccq->created = true;
848	}
849	mutex_unlock(&adapter->mbox_lock);
850
851	return status;
852}
853
854int be_cmd_mccq_org_create(struct be_adapter *adapter,
855			struct be_queue_info *mccq,
856			struct be_queue_info *cq)
857{
858	struct be_mcc_wrb *wrb;
859	struct be_cmd_req_mcc_create *req;
860	struct be_dma_mem *q_mem = &mccq->dma_mem;
861	void *ctxt;
862	int status;
863
864	if (mutex_lock_interruptible(&adapter->mbox_lock))
865		return -1;
866
867	wrb = wrb_from_mbox(adapter);
868	req = embedded_payload(wrb);
869	ctxt = &req->context;
870
871	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
872			OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
873
874	req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
875
876	AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
877	AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
878			be_encoded_q_len(mccq->len));
879	AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
880
881	be_dws_cpu_to_le(ctxt, sizeof(req->context));
882
883	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
884
885	status = be_mbox_notify_wait(adapter);
886	if (!status) {
887		struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
888		mccq->id = le16_to_cpu(resp->id);
889		mccq->created = true;
890	}
891
892	mutex_unlock(&adapter->mbox_lock);
893	return status;
894}
895
896int be_cmd_mccq_create(struct be_adapter *adapter,
897			struct be_queue_info *mccq,
898			struct be_queue_info *cq)
899{
900	int status;
901
902	status = be_cmd_mccq_ext_create(adapter, mccq, cq);
903	if (status && !lancer_chip(adapter)) {
904		dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
905			"or newer to avoid conflicting priorities between NIC "
906			"and FCoE traffic");
907		status = be_cmd_mccq_org_create(adapter, mccq, cq);
908	}
909	return status;
910}
911
912int be_cmd_txq_create(struct be_adapter *adapter,
913			struct be_queue_info *txq,
914			struct be_queue_info *cq)
915{
916	struct be_mcc_wrb *wrb;
917	struct be_cmd_req_eth_tx_create *req;
918	struct be_dma_mem *q_mem = &txq->dma_mem;
919	void *ctxt;
920	int status;
921
922	spin_lock_bh(&adapter->mcc_lock);
923
924	wrb = wrb_from_mccq(adapter);
925	if (!wrb) {
926		status = -EBUSY;
927		goto err;
928	}
929
930	req = embedded_payload(wrb);
931	ctxt = &req->context;
932
933	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
934		OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
935
936	if (lancer_chip(adapter)) {
937		req->hdr.version = 1;
938		AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
939					adapter->if_handle);
940	}
941
942	req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
943	req->ulp_num = BE_ULP1_NUM;
944	req->type = BE_ETH_TX_RING_TYPE_STANDARD;
945
946	AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
947		be_encoded_q_len(txq->len));
948	AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
949	AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
950
951	be_dws_cpu_to_le(ctxt, sizeof(req->context));
952
953	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
954
955	status = be_mcc_notify_wait(adapter);
956	if (!status) {
957		struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
958		txq->id = le16_to_cpu(resp->cid);
959		txq->created = true;
960	}
961
962err:
963	spin_unlock_bh(&adapter->mcc_lock);
964
965	return status;
966}
967
968/* Uses MCC */
969int be_cmd_rxq_create(struct be_adapter *adapter,
970		struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
971		u32 if_id, u32 rss, u8 *rss_id)
972{
973	struct be_mcc_wrb *wrb;
974	struct be_cmd_req_eth_rx_create *req;
975	struct be_dma_mem *q_mem = &rxq->dma_mem;
976	int status;
977
978	spin_lock_bh(&adapter->mcc_lock);
979
980	wrb = wrb_from_mccq(adapter);
981	if (!wrb) {
982		status = -EBUSY;
983		goto err;
984	}
985	req = embedded_payload(wrb);
986
987	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
988				OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
989
990	req->cq_id = cpu_to_le16(cq_id);
991	req->frag_size = fls(frag_size) - 1;
992	req->num_pages = 2;
993	be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
994	req->interface_id = cpu_to_le32(if_id);
995	req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
996	req->rss_queue = cpu_to_le32(rss);
997
998	status = be_mcc_notify_wait(adapter);
999	if (!status) {
1000		struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1001		rxq->id = le16_to_cpu(resp->id);
1002		rxq->created = true;
1003		*rss_id = resp->rss_id;
1004	}
1005
1006err:
1007	spin_unlock_bh(&adapter->mcc_lock);
1008	return status;
1009}
1010
1011/* Generic destroyer function for all types of queues
1012 * Uses Mbox
1013 */
1014int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1015		int queue_type)
1016{
1017	struct be_mcc_wrb *wrb;
1018	struct be_cmd_req_q_destroy *req;
1019	u8 subsys = 0, opcode = 0;
1020	int status;
1021
1022	if (mutex_lock_interruptible(&adapter->mbox_lock))
1023		return -1;
1024
1025	wrb = wrb_from_mbox(adapter);
1026	req = embedded_payload(wrb);
1027
1028	switch (queue_type) {
1029	case QTYPE_EQ:
1030		subsys = CMD_SUBSYSTEM_COMMON;
1031		opcode = OPCODE_COMMON_EQ_DESTROY;
1032		break;
1033	case QTYPE_CQ:
1034		subsys = CMD_SUBSYSTEM_COMMON;
1035		opcode = OPCODE_COMMON_CQ_DESTROY;
1036		break;
1037	case QTYPE_TXQ:
1038		subsys = CMD_SUBSYSTEM_ETH;
1039		opcode = OPCODE_ETH_TX_DESTROY;
1040		break;
1041	case QTYPE_RXQ:
1042		subsys = CMD_SUBSYSTEM_ETH;
1043		opcode = OPCODE_ETH_RX_DESTROY;
1044		break;
1045	case QTYPE_MCCQ:
1046		subsys = CMD_SUBSYSTEM_COMMON;
1047		opcode = OPCODE_COMMON_MCC_DESTROY;
1048		break;
1049	default:
1050		BUG();
1051	}
1052
1053	be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1054				NULL);
1055	req->id = cpu_to_le16(q->id);
1056
1057	status = be_mbox_notify_wait(adapter);
1058	if (!status)
1059		q->created = false;
1060
1061	mutex_unlock(&adapter->mbox_lock);
1062	return status;
1063}
1064
1065/* Uses MCC */
1066int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1067{
1068	struct be_mcc_wrb *wrb;
1069	struct be_cmd_req_q_destroy *req;
1070	int status;
1071
1072	spin_lock_bh(&adapter->mcc_lock);
1073
1074	wrb = wrb_from_mccq(adapter);
1075	if (!wrb) {
1076		status = -EBUSY;
1077		goto err;
1078	}
1079	req = embedded_payload(wrb);
1080
1081	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1082			OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1083	req->id = cpu_to_le16(q->id);
1084
1085	status = be_mcc_notify_wait(adapter);
1086	if (!status)
1087		q->created = false;
1088
1089err:
1090	spin_unlock_bh(&adapter->mcc_lock);
1091	return status;
1092}
1093
1094/* Create an rx filtering policy configuration on an i/f
1095 * Uses MCCQ
1096 */
1097int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1098		u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain)
1099{
1100	struct be_mcc_wrb *wrb;
1101	struct be_cmd_req_if_create *req;
1102	int status;
1103
1104	spin_lock_bh(&adapter->mcc_lock);
1105
1106	wrb = wrb_from_mccq(adapter);
1107	if (!wrb) {
1108		status = -EBUSY;
1109		goto err;
1110	}
1111	req = embedded_payload(wrb);
1112
1113	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1114		OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1115	req->hdr.domain = domain;
1116	req->capability_flags = cpu_to_le32(cap_flags);
1117	req->enable_flags = cpu_to_le32(en_flags);
1118	if (mac)
1119		memcpy(req->mac_addr, mac, ETH_ALEN);
1120	else
1121		req->pmac_invalid = true;
1122
1123	status = be_mcc_notify_wait(adapter);
1124	if (!status) {
1125		struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1126		*if_handle = le32_to_cpu(resp->interface_id);
1127		if (mac)
1128			*pmac_id = le32_to_cpu(resp->pmac_id);
1129	}
1130
1131err:
1132	spin_unlock_bh(&adapter->mcc_lock);
1133	return status;
1134}
1135
1136/* Uses MCCQ */
1137int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1138{
1139	struct be_mcc_wrb *wrb;
1140	struct be_cmd_req_if_destroy *req;
1141	int status;
1142
1143	if (interface_id == -1)
1144		return 0;
1145
1146	spin_lock_bh(&adapter->mcc_lock);
1147
1148	wrb = wrb_from_mccq(adapter);
1149	if (!wrb) {
1150		status = -EBUSY;
1151		goto err;
1152	}
1153	req = embedded_payload(wrb);
1154
1155	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1156		OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1157	req->hdr.domain = domain;
1158	req->interface_id = cpu_to_le32(interface_id);
1159
1160	status = be_mcc_notify_wait(adapter);
1161err:
1162	spin_unlock_bh(&adapter->mcc_lock);
1163	return status;
1164}
1165
1166/* Get stats is a non embedded command: the request is not embedded inside
1167 * WRB but is a separate dma memory block
1168 * Uses asynchronous MCC
1169 */
1170int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1171{
1172	struct be_mcc_wrb *wrb;
1173	struct be_cmd_req_hdr *hdr;
1174	int status = 0;
1175
1176	if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1177		be_cmd_get_die_temperature(adapter);
1178
1179	spin_lock_bh(&adapter->mcc_lock);
1180
1181	wrb = wrb_from_mccq(adapter);
1182	if (!wrb) {
1183		status = -EBUSY;
1184		goto err;
1185	}
1186	hdr = nonemb_cmd->va;
1187
1188	be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1189		OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1190
1191	if (adapter->generation == BE_GEN3)
1192		hdr->version = 1;
1193
1194	be_mcc_notify(adapter);
1195	adapter->stats_cmd_sent = true;
1196
1197err:
1198	spin_unlock_bh(&adapter->mcc_lock);
1199	return status;
1200}
1201
1202/* Lancer Stats */
1203int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1204				struct be_dma_mem *nonemb_cmd)
1205{
1206
1207	struct be_mcc_wrb *wrb;
1208	struct lancer_cmd_req_pport_stats *req;
1209	int status = 0;
1210
1211	spin_lock_bh(&adapter->mcc_lock);
1212
1213	wrb = wrb_from_mccq(adapter);
1214	if (!wrb) {
1215		status = -EBUSY;
1216		goto err;
1217	}
1218	req = nonemb_cmd->va;
1219
1220	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1221			OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1222			nonemb_cmd);
1223
1224	req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num);
1225	req->cmd_params.params.reset_stats = 0;
1226
1227	be_mcc_notify(adapter);
1228	adapter->stats_cmd_sent = true;
1229
1230err:
1231	spin_unlock_bh(&adapter->mcc_lock);
1232	return status;
1233}
1234
1235/* Uses synchronous mcc */
1236int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed,
1237			     u16 *link_speed, u8 *link_status, u32 dom)
1238{
1239	struct be_mcc_wrb *wrb;
1240	struct be_cmd_req_link_status *req;
1241	int status;
1242
1243	spin_lock_bh(&adapter->mcc_lock);
1244
1245	if (link_status)
1246		*link_status = LINK_DOWN;
1247
1248	wrb = wrb_from_mccq(adapter);
1249	if (!wrb) {
1250		status = -EBUSY;
1251		goto err;
1252	}
1253	req = embedded_payload(wrb);
1254
1255	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1256		OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1257
1258	if (adapter->generation == BE_GEN3 || lancer_chip(adapter))
1259		req->hdr.version = 1;
1260
1261	req->hdr.domain = dom;
1262
1263	status = be_mcc_notify_wait(adapter);
1264	if (!status) {
1265		struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1266		if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
1267			if (link_speed)
1268				*link_speed = le16_to_cpu(resp->link_speed);
1269			if (mac_speed)
1270				*mac_speed = resp->mac_speed;
1271		}
1272		if (link_status)
1273			*link_status = resp->logical_link_status;
1274	}
1275
1276err:
1277	spin_unlock_bh(&adapter->mcc_lock);
1278	return status;
1279}
1280
1281/* Uses synchronous mcc */
1282int be_cmd_get_die_temperature(struct be_adapter *adapter)
1283{
1284	struct be_mcc_wrb *wrb;
1285	struct be_cmd_req_get_cntl_addnl_attribs *req;
1286	u16 mccq_index;
1287	int status;
1288
1289	spin_lock_bh(&adapter->mcc_lock);
1290
1291	mccq_index = adapter->mcc_obj.q.head;
1292
1293	wrb = wrb_from_mccq(adapter);
1294	if (!wrb) {
1295		status = -EBUSY;
1296		goto err;
1297	}
1298	req = embedded_payload(wrb);
1299
1300	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1301		OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1302		wrb, NULL);
1303
1304	wrb->tag1 = mccq_index;
1305
1306	be_mcc_notify(adapter);
1307
1308err:
1309	spin_unlock_bh(&adapter->mcc_lock);
1310	return status;
1311}
1312
1313/* Uses synchronous mcc */
1314int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1315{
1316	struct be_mcc_wrb *wrb;
1317	struct be_cmd_req_get_fat *req;
1318	int status;
1319
1320	spin_lock_bh(&adapter->mcc_lock);
1321
1322	wrb = wrb_from_mccq(adapter);
1323	if (!wrb) {
1324		status = -EBUSY;
1325		goto err;
1326	}
1327	req = embedded_payload(wrb);
1328
1329	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1330		OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1331	req->fat_operation = cpu_to_le32(QUERY_FAT);
1332	status = be_mcc_notify_wait(adapter);
1333	if (!status) {
1334		struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1335		if (log_size && resp->log_size)
1336			*log_size = le32_to_cpu(resp->log_size) -
1337					sizeof(u32);
1338	}
1339err:
1340	spin_unlock_bh(&adapter->mcc_lock);
1341	return status;
1342}
1343
1344void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1345{
1346	struct be_dma_mem get_fat_cmd;
1347	struct be_mcc_wrb *wrb;
1348	struct be_cmd_req_get_fat *req;
1349	u32 offset = 0, total_size, buf_size,
1350				log_offset = sizeof(u32), payload_len;
1351	int status;
1352
1353	if (buf_len == 0)
1354		return;
1355
1356	total_size = buf_len;
1357
1358	get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1359	get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1360			get_fat_cmd.size,
1361			&get_fat_cmd.dma);
1362	if (!get_fat_cmd.va) {
1363		status = -ENOMEM;
1364		dev_err(&adapter->pdev->dev,
1365		"Memory allocation failure while retrieving FAT data\n");
1366		return;
1367	}
1368
1369	spin_lock_bh(&adapter->mcc_lock);
1370
1371	while (total_size) {
1372		buf_size = min(total_size, (u32)60*1024);
1373		total_size -= buf_size;
1374
1375		wrb = wrb_from_mccq(adapter);
1376		if (!wrb) {
1377			status = -EBUSY;
1378			goto err;
1379		}
1380		req = get_fat_cmd.va;
1381
1382		payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1383		be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1384				OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1385				&get_fat_cmd);
1386
1387		req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1388		req->read_log_offset = cpu_to_le32(log_offset);
1389		req->read_log_length = cpu_to_le32(buf_size);
1390		req->data_buffer_size = cpu_to_le32(buf_size);
1391
1392		status = be_mcc_notify_wait(adapter);
1393		if (!status) {
1394			struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1395			memcpy(buf + offset,
1396				resp->data_buffer,
1397				le32_to_cpu(resp->read_log_length));
1398		} else {
1399			dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1400			goto err;
1401		}
1402		offset += buf_size;
1403		log_offset += buf_size;
1404	}
1405err:
1406	pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1407			get_fat_cmd.va,
1408			get_fat_cmd.dma);
1409	spin_unlock_bh(&adapter->mcc_lock);
1410}
1411
1412/* Uses synchronous mcc */
1413int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1414			char *fw_on_flash)
1415{
1416	struct be_mcc_wrb *wrb;
1417	struct be_cmd_req_get_fw_version *req;
1418	int status;
1419
1420	spin_lock_bh(&adapter->mcc_lock);
1421
1422	wrb = wrb_from_mccq(adapter);
1423	if (!wrb) {
1424		status = -EBUSY;
1425		goto err;
1426	}
1427
1428	req = embedded_payload(wrb);
1429
1430	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1431		OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1432	status = be_mcc_notify_wait(adapter);
1433	if (!status) {
1434		struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1435		strcpy(fw_ver, resp->firmware_version_string);
1436		if (fw_on_flash)
1437			strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1438	}
1439err:
1440	spin_unlock_bh(&adapter->mcc_lock);
1441	return status;
1442}
1443
1444/* set the EQ delay interval of an EQ to specified value
1445 * Uses async mcc
1446 */
1447int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1448{
1449	struct be_mcc_wrb *wrb;
1450	struct be_cmd_req_modify_eq_delay *req;
1451	int status = 0;
1452
1453	spin_lock_bh(&adapter->mcc_lock);
1454
1455	wrb = wrb_from_mccq(adapter);
1456	if (!wrb) {
1457		status = -EBUSY;
1458		goto err;
1459	}
1460	req = embedded_payload(wrb);
1461
1462	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1463		OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1464
1465	req->num_eq = cpu_to_le32(1);
1466	req->delay[0].eq_id = cpu_to_le32(eq_id);
1467	req->delay[0].phase = 0;
1468	req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1469
1470	be_mcc_notify(adapter);
1471
1472err:
1473	spin_unlock_bh(&adapter->mcc_lock);
1474	return status;
1475}
1476
1477/* Uses sycnhronous mcc */
1478int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1479			u32 num, bool untagged, bool promiscuous)
1480{
1481	struct be_mcc_wrb *wrb;
1482	struct be_cmd_req_vlan_config *req;
1483	int status;
1484
1485	spin_lock_bh(&adapter->mcc_lock);
1486
1487	wrb = wrb_from_mccq(adapter);
1488	if (!wrb) {
1489		status = -EBUSY;
1490		goto err;
1491	}
1492	req = embedded_payload(wrb);
1493
1494	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1495		OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1496
1497	req->interface_id = if_id;
1498	req->promiscuous = promiscuous;
1499	req->untagged = untagged;
1500	req->num_vlan = num;
1501	if (!promiscuous) {
1502		memcpy(req->normal_vlan, vtag_array,
1503			req->num_vlan * sizeof(vtag_array[0]));
1504	}
1505
1506	status = be_mcc_notify_wait(adapter);
1507
1508err:
1509	spin_unlock_bh(&adapter->mcc_lock);
1510	return status;
1511}
1512
1513int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1514{
1515	struct be_mcc_wrb *wrb;
1516	struct be_dma_mem *mem = &adapter->rx_filter;
1517	struct be_cmd_req_rx_filter *req = mem->va;
1518	int status;
1519
1520	spin_lock_bh(&adapter->mcc_lock);
1521
1522	wrb = wrb_from_mccq(adapter);
1523	if (!wrb) {
1524		status = -EBUSY;
1525		goto err;
1526	}
1527	memset(req, 0, sizeof(*req));
1528	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1529				OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1530				wrb, mem);
1531
1532	req->if_id = cpu_to_le32(adapter->if_handle);
1533	if (flags & IFF_PROMISC) {
1534		req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1535					BE_IF_FLAGS_VLAN_PROMISCUOUS);
1536		if (value == ON)
1537			req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1538						BE_IF_FLAGS_VLAN_PROMISCUOUS);
1539	} else if (flags & IFF_ALLMULTI) {
1540		req->if_flags_mask = req->if_flags =
1541				cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1542	} else {
1543		struct netdev_hw_addr *ha;
1544		int i = 0;
1545
1546		req->if_flags_mask = req->if_flags =
1547				cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1548
1549		/* Reset mcast promisc mode if already set by setting mask
1550		 * and not setting flags field
1551		 */
1552		req->if_flags_mask |=
1553				cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1554
1555		req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1556		netdev_for_each_mc_addr(ha, adapter->netdev)
1557			memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1558	}
1559
1560	status = be_mcc_notify_wait(adapter);
1561err:
1562	spin_unlock_bh(&adapter->mcc_lock);
1563	return status;
1564}
1565
1566/* Uses synchrounous mcc */
1567int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1568{
1569	struct be_mcc_wrb *wrb;
1570	struct be_cmd_req_set_flow_control *req;
1571	int status;
1572
1573	spin_lock_bh(&adapter->mcc_lock);
1574
1575	wrb = wrb_from_mccq(adapter);
1576	if (!wrb) {
1577		status = -EBUSY;
1578		goto err;
1579	}
1580	req = embedded_payload(wrb);
1581
1582	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1583		OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1584
1585	req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1586	req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1587
1588	status = be_mcc_notify_wait(adapter);
1589
1590err:
1591	spin_unlock_bh(&adapter->mcc_lock);
1592	return status;
1593}
1594
1595/* Uses sycn mcc */
1596int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1597{
1598	struct be_mcc_wrb *wrb;
1599	struct be_cmd_req_get_flow_control *req;
1600	int status;
1601
1602	spin_lock_bh(&adapter->mcc_lock);
1603
1604	wrb = wrb_from_mccq(adapter);
1605	if (!wrb) {
1606		status = -EBUSY;
1607		goto err;
1608	}
1609	req = embedded_payload(wrb);
1610
1611	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1612		OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1613
1614	status = be_mcc_notify_wait(adapter);
1615	if (!status) {
1616		struct be_cmd_resp_get_flow_control *resp =
1617						embedded_payload(wrb);
1618		*tx_fc = le16_to_cpu(resp->tx_flow_control);
1619		*rx_fc = le16_to_cpu(resp->rx_flow_control);
1620	}
1621
1622err:
1623	spin_unlock_bh(&adapter->mcc_lock);
1624	return status;
1625}
1626
1627/* Uses mbox */
1628int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1629		u32 *mode, u32 *caps)
1630{
1631	struct be_mcc_wrb *wrb;
1632	struct be_cmd_req_query_fw_cfg *req;
1633	int status;
1634
1635	if (mutex_lock_interruptible(&adapter->mbox_lock))
1636		return -1;
1637
1638	wrb = wrb_from_mbox(adapter);
1639	req = embedded_payload(wrb);
1640
1641	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1642		OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1643
1644	status = be_mbox_notify_wait(adapter);
1645	if (!status) {
1646		struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1647		*port_num = le32_to_cpu(resp->phys_port);
1648		*mode = le32_to_cpu(resp->function_mode);
1649		*caps = le32_to_cpu(resp->function_caps);
1650	}
1651
1652	mutex_unlock(&adapter->mbox_lock);
1653	return status;
1654}
1655
1656/* Uses mbox */
1657int be_cmd_reset_function(struct be_adapter *adapter)
1658{
1659	struct be_mcc_wrb *wrb;
1660	struct be_cmd_req_hdr *req;
1661	int status;
1662
1663	if (mutex_lock_interruptible(&adapter->mbox_lock))
1664		return -1;
1665
1666	wrb = wrb_from_mbox(adapter);
1667	req = embedded_payload(wrb);
1668
1669	be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1670		OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1671
1672	status = be_mbox_notify_wait(adapter);
1673
1674	mutex_unlock(&adapter->mbox_lock);
1675	return status;
1676}
1677
1678int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1679{
1680	struct be_mcc_wrb *wrb;
1681	struct be_cmd_req_rss_config *req;
1682	u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1683			0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1684			0x3ea83c02, 0x4a110304};
1685	int status;
1686
1687	if (mutex_lock_interruptible(&adapter->mbox_lock))
1688		return -1;
1689
1690	wrb = wrb_from_mbox(adapter);
1691	req = embedded_payload(wrb);
1692
1693	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1694		OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1695
1696	req->if_id = cpu_to_le32(adapter->if_handle);
1697	req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
1698				      RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
1699	req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1700	memcpy(req->cpu_table, rsstable, table_size);
1701	memcpy(req->hash, myhash, sizeof(myhash));
1702	be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1703
1704	status = be_mbox_notify_wait(adapter);
1705
1706	mutex_unlock(&adapter->mbox_lock);
1707	return status;
1708}
1709
1710/* Uses sync mcc */
1711int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1712			u8 bcn, u8 sts, u8 state)
1713{
1714	struct be_mcc_wrb *wrb;
1715	struct be_cmd_req_enable_disable_beacon *req;
1716	int status;
1717
1718	spin_lock_bh(&adapter->mcc_lock);
1719
1720	wrb = wrb_from_mccq(adapter);
1721	if (!wrb) {
1722		status = -EBUSY;
1723		goto err;
1724	}
1725	req = embedded_payload(wrb);
1726
1727	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1728		OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
1729
1730	req->port_num = port_num;
1731	req->beacon_state = state;
1732	req->beacon_duration = bcn;
1733	req->status_duration = sts;
1734
1735	status = be_mcc_notify_wait(adapter);
1736
1737err:
1738	spin_unlock_bh(&adapter->mcc_lock);
1739	return status;
1740}
1741
1742/* Uses sync mcc */
1743int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1744{
1745	struct be_mcc_wrb *wrb;
1746	struct be_cmd_req_get_beacon_state *req;
1747	int status;
1748
1749	spin_lock_bh(&adapter->mcc_lock);
1750
1751	wrb = wrb_from_mccq(adapter);
1752	if (!wrb) {
1753		status = -EBUSY;
1754		goto err;
1755	}
1756	req = embedded_payload(wrb);
1757
1758	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1759		OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
1760
1761	req->port_num = port_num;
1762
1763	status = be_mcc_notify_wait(adapter);
1764	if (!status) {
1765		struct be_cmd_resp_get_beacon_state *resp =
1766						embedded_payload(wrb);
1767		*state = resp->beacon_state;
1768	}
1769
1770err:
1771	spin_unlock_bh(&adapter->mcc_lock);
1772	return status;
1773}
1774
1775int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1776			u32 data_size, u32 data_offset, const char *obj_name,
1777			u32 *data_written, u8 *addn_status)
1778{
1779	struct be_mcc_wrb *wrb;
1780	struct lancer_cmd_req_write_object *req;
1781	struct lancer_cmd_resp_write_object *resp;
1782	void *ctxt = NULL;
1783	int status;
1784
1785	spin_lock_bh(&adapter->mcc_lock);
1786	adapter->flash_status = 0;
1787
1788	wrb = wrb_from_mccq(adapter);
1789	if (!wrb) {
1790		status = -EBUSY;
1791		goto err_unlock;
1792	}
1793
1794	req = embedded_payload(wrb);
1795
1796	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1797				OPCODE_COMMON_WRITE_OBJECT,
1798				sizeof(struct lancer_cmd_req_write_object), wrb,
1799				NULL);
1800
1801	ctxt = &req->context;
1802	AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1803			write_length, ctxt, data_size);
1804
1805	if (data_size == 0)
1806		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1807				eof, ctxt, 1);
1808	else
1809		AMAP_SET_BITS(struct amap_lancer_write_obj_context,
1810				eof, ctxt, 0);
1811
1812	be_dws_cpu_to_le(ctxt, sizeof(req->context));
1813	req->write_offset = cpu_to_le32(data_offset);
1814	strcpy(req->object_name, obj_name);
1815	req->descriptor_count = cpu_to_le32(1);
1816	req->buf_len = cpu_to_le32(data_size);
1817	req->addr_low = cpu_to_le32((cmd->dma +
1818				sizeof(struct lancer_cmd_req_write_object))
1819				& 0xFFFFFFFF);
1820	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
1821				sizeof(struct lancer_cmd_req_write_object)));
1822
1823	be_mcc_notify(adapter);
1824	spin_unlock_bh(&adapter->mcc_lock);
1825
1826	if (!wait_for_completion_timeout(&adapter->flash_compl,
1827			msecs_to_jiffies(12000)))
1828		status = -1;
1829	else
1830		status = adapter->flash_status;
1831
1832	resp = embedded_payload(wrb);
1833	if (!status) {
1834		*data_written = le32_to_cpu(resp->actual_write_len);
1835	} else {
1836		*addn_status = resp->additional_status;
1837		status = resp->status;
1838	}
1839
1840	return status;
1841
1842err_unlock:
1843	spin_unlock_bh(&adapter->mcc_lock);
1844	return status;
1845}
1846
1847int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
1848		u32 data_size, u32 data_offset, const char *obj_name,
1849		u32 *data_read, u32 *eof, u8 *addn_status)
1850{
1851	struct be_mcc_wrb *wrb;
1852	struct lancer_cmd_req_read_object *req;
1853	struct lancer_cmd_resp_read_object *resp;
1854	int status;
1855
1856	spin_lock_bh(&adapter->mcc_lock);
1857
1858	wrb = wrb_from_mccq(adapter);
1859	if (!wrb) {
1860		status = -EBUSY;
1861		goto err_unlock;
1862	}
1863
1864	req = embedded_payload(wrb);
1865
1866	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1867			OPCODE_COMMON_READ_OBJECT,
1868			sizeof(struct lancer_cmd_req_read_object), wrb,
1869			NULL);
1870
1871	req->desired_read_len = cpu_to_le32(data_size);
1872	req->read_offset = cpu_to_le32(data_offset);
1873	strcpy(req->object_name, obj_name);
1874	req->descriptor_count = cpu_to_le32(1);
1875	req->buf_len = cpu_to_le32(data_size);
1876	req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
1877	req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
1878
1879	status = be_mcc_notify_wait(adapter);
1880
1881	resp = embedded_payload(wrb);
1882	if (!status) {
1883		*data_read = le32_to_cpu(resp->actual_read_len);
1884		*eof = le32_to_cpu(resp->eof);
1885	} else {
1886		*addn_status = resp->additional_status;
1887	}
1888
1889err_unlock:
1890	spin_unlock_bh(&adapter->mcc_lock);
1891	return status;
1892}
1893
1894int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1895			u32 flash_type, u32 flash_opcode, u32 buf_size)
1896{
1897	struct be_mcc_wrb *wrb;
1898	struct be_cmd_write_flashrom *req;
1899	int status;
1900
1901	spin_lock_bh(&adapter->mcc_lock);
1902	adapter->flash_status = 0;
1903
1904	wrb = wrb_from_mccq(adapter);
1905	if (!wrb) {
1906		status = -EBUSY;
1907		goto err_unlock;
1908	}
1909	req = cmd->va;
1910
1911	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1912		OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
1913
1914	req->params.op_type = cpu_to_le32(flash_type);
1915	req->params.op_code = cpu_to_le32(flash_opcode);
1916	req->params.data_buf_size = cpu_to_le32(buf_size);
1917
1918	be_mcc_notify(adapter);
1919	spin_unlock_bh(&adapter->mcc_lock);
1920
1921	if (!wait_for_completion_timeout(&adapter->flash_compl,
1922			msecs_to_jiffies(40000)))
1923		status = -1;
1924	else
1925		status = adapter->flash_status;
1926
1927	return status;
1928
1929err_unlock:
1930	spin_unlock_bh(&adapter->mcc_lock);
1931	return status;
1932}
1933
1934int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1935			 int offset)
1936{
1937	struct be_mcc_wrb *wrb;
1938	struct be_cmd_write_flashrom *req;
1939	int status;
1940
1941	spin_lock_bh(&adapter->mcc_lock);
1942
1943	wrb = wrb_from_mccq(adapter);
1944	if (!wrb) {
1945		status = -EBUSY;
1946		goto err;
1947	}
1948	req = embedded_payload(wrb);
1949
1950	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1951		OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL);
1952
1953	req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1954	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1955	req->params.offset = cpu_to_le32(offset);
1956	req->params.data_buf_size = cpu_to_le32(0x4);
1957
1958	status = be_mcc_notify_wait(adapter);
1959	if (!status)
1960		memcpy(flashed_crc, req->params.data_buf, 4);
1961
1962err:
1963	spin_unlock_bh(&adapter->mcc_lock);
1964	return status;
1965}
1966
1967int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1968				struct be_dma_mem *nonemb_cmd)
1969{
1970	struct be_mcc_wrb *wrb;
1971	struct be_cmd_req_acpi_wol_magic_config *req;
1972	int status;
1973
1974	spin_lock_bh(&adapter->mcc_lock);
1975
1976	wrb = wrb_from_mccq(adapter);
1977	if (!wrb) {
1978		status = -EBUSY;
1979		goto err;
1980	}
1981	req = nonemb_cmd->va;
1982
1983	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1984		OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
1985		nonemb_cmd);
1986	memcpy(req->magic_mac, mac, ETH_ALEN);
1987
1988	status = be_mcc_notify_wait(adapter);
1989
1990err:
1991	spin_unlock_bh(&adapter->mcc_lock);
1992	return status;
1993}
1994
1995int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1996			u8 loopback_type, u8 enable)
1997{
1998	struct be_mcc_wrb *wrb;
1999	struct be_cmd_req_set_lmode *req;
2000	int status;
2001
2002	spin_lock_bh(&adapter->mcc_lock);
2003
2004	wrb = wrb_from_mccq(adapter);
2005	if (!wrb) {
2006		status = -EBUSY;
2007		goto err;
2008	}
2009
2010	req = embedded_payload(wrb);
2011
2012	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2013			OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2014			NULL);
2015
2016	req->src_port = port_num;
2017	req->dest_port = port_num;
2018	req->loopback_type = loopback_type;
2019	req->loopback_state = enable;
2020
2021	status = be_mcc_notify_wait(adapter);
2022err:
2023	spin_unlock_bh(&adapter->mcc_lock);
2024	return status;
2025}
2026
2027int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2028		u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2029{
2030	struct be_mcc_wrb *wrb;
2031	struct be_cmd_req_loopback_test *req;
2032	int status;
2033
2034	spin_lock_bh(&adapter->mcc_lock);
2035
2036	wrb = wrb_from_mccq(adapter);
2037	if (!wrb) {
2038		status = -EBUSY;
2039		goto err;
2040	}
2041
2042	req = embedded_payload(wrb);
2043
2044	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2045			OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2046	req->hdr.timeout = cpu_to_le32(4);
2047
2048	req->pattern = cpu_to_le64(pattern);
2049	req->src_port = cpu_to_le32(port_num);
2050	req->dest_port = cpu_to_le32(port_num);
2051	req->pkt_size = cpu_to_le32(pkt_size);
2052	req->num_pkts = cpu_to_le32(num_pkts);
2053	req->loopback_type = cpu_to_le32(loopback_type);
2054
2055	status = be_mcc_notify_wait(adapter);
2056	if (!status) {
2057		struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2058		status = le32_to_cpu(resp->status);
2059	}
2060
2061err:
2062	spin_unlock_bh(&adapter->mcc_lock);
2063	return status;
2064}
2065
2066int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2067				u32 byte_cnt, struct be_dma_mem *cmd)
2068{
2069	struct be_mcc_wrb *wrb;
2070	struct be_cmd_req_ddrdma_test *req;
2071	int status;
2072	int i, j = 0;
2073
2074	spin_lock_bh(&adapter->mcc_lock);
2075
2076	wrb = wrb_from_mccq(adapter);
2077	if (!wrb) {
2078		status = -EBUSY;
2079		goto err;
2080	}
2081	req = cmd->va;
2082	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2083			OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2084
2085	req->pattern = cpu_to_le64(pattern);
2086	req->byte_count = cpu_to_le32(byte_cnt);
2087	for (i = 0; i < byte_cnt; i++) {
2088		req->snd_buff[i] = (u8)(pattern >> (j*8));
2089		j++;
2090		if (j > 7)
2091			j = 0;
2092	}
2093
2094	status = be_mcc_notify_wait(adapter);
2095
2096	if (!status) {
2097		struct be_cmd_resp_ddrdma_test *resp;
2098		resp = cmd->va;
2099		if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2100				resp->snd_err) {
2101			status = -1;
2102		}
2103	}
2104
2105err:
2106	spin_unlock_bh(&adapter->mcc_lock);
2107	return status;
2108}
2109
2110int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2111				struct be_dma_mem *nonemb_cmd)
2112{
2113	struct be_mcc_wrb *wrb;
2114	struct be_cmd_req_seeprom_read *req;
2115	struct be_sge *sge;
2116	int status;
2117
2118	spin_lock_bh(&adapter->mcc_lock);
2119
2120	wrb = wrb_from_mccq(adapter);
2121	if (!wrb) {
2122		status = -EBUSY;
2123		goto err;
2124	}
2125	req = nonemb_cmd->va;
2126	sge = nonembedded_sgl(wrb);
2127
2128	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2129			OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2130			nonemb_cmd);
2131
2132	status = be_mcc_notify_wait(adapter);
2133
2134err:
2135	spin_unlock_bh(&adapter->mcc_lock);
2136	return status;
2137}
2138
2139int be_cmd_get_phy_info(struct be_adapter *adapter,
2140				struct be_phy_info *phy_info)
2141{
2142	struct be_mcc_wrb *wrb;
2143	struct be_cmd_req_get_phy_info *req;
2144	struct be_dma_mem cmd;
2145	int status;
2146
2147	spin_lock_bh(&adapter->mcc_lock);
2148
2149	wrb = wrb_from_mccq(adapter);
2150	if (!wrb) {
2151		status = -EBUSY;
2152		goto err;
2153	}
2154	cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2155	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2156					&cmd.dma);
2157	if (!cmd.va) {
2158		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2159		status = -ENOMEM;
2160		goto err;
2161	}
2162
2163	req = cmd.va;
2164
2165	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2166			OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2167			wrb, &cmd);
2168
2169	status = be_mcc_notify_wait(adapter);
2170	if (!status) {
2171		struct be_phy_info *resp_phy_info =
2172				cmd.va + sizeof(struct be_cmd_req_hdr);
2173		phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type);
2174		phy_info->interface_type =
2175			le16_to_cpu(resp_phy_info->interface_type);
2176	}
2177	pci_free_consistent(adapter->pdev, cmd.size,
2178				cmd.va, cmd.dma);
2179err:
2180	spin_unlock_bh(&adapter->mcc_lock);
2181	return status;
2182}
2183
2184int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2185{
2186	struct be_mcc_wrb *wrb;
2187	struct be_cmd_req_set_qos *req;
2188	int status;
2189
2190	spin_lock_bh(&adapter->mcc_lock);
2191
2192	wrb = wrb_from_mccq(adapter);
2193	if (!wrb) {
2194		status = -EBUSY;
2195		goto err;
2196	}
2197
2198	req = embedded_payload(wrb);
2199
2200	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2201			OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2202
2203	req->hdr.domain = domain;
2204	req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2205	req->max_bps_nic = cpu_to_le32(bps);
2206
2207	status = be_mcc_notify_wait(adapter);
2208
2209err:
2210	spin_unlock_bh(&adapter->mcc_lock);
2211	return status;
2212}
2213
2214int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2215{
2216	struct be_mcc_wrb *wrb;
2217	struct be_cmd_req_cntl_attribs *req;
2218	struct be_cmd_resp_cntl_attribs *resp;
2219	int status;
2220	int payload_len = max(sizeof(*req), sizeof(*resp));
2221	struct mgmt_controller_attrib *attribs;
2222	struct be_dma_mem attribs_cmd;
2223
2224	memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2225	attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2226	attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2227						&attribs_cmd.dma);
2228	if (!attribs_cmd.va) {
2229		dev_err(&adapter->pdev->dev,
2230				"Memory allocation failure\n");
2231		return -ENOMEM;
2232	}
2233
2234	if (mutex_lock_interruptible(&adapter->mbox_lock))
2235		return -1;
2236
2237	wrb = wrb_from_mbox(adapter);
2238	if (!wrb) {
2239		status = -EBUSY;
2240		goto err;
2241	}
2242	req = attribs_cmd.va;
2243
2244	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2245			 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2246			&attribs_cmd);
2247
2248	status = be_mbox_notify_wait(adapter);
2249	if (!status) {
2250		attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2251		adapter->hba_port_num = attribs->hba_attribs.phy_port;
2252	}
2253
2254err:
2255	mutex_unlock(&adapter->mbox_lock);
2256	pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2257					attribs_cmd.dma);
2258	return status;
2259}
2260
2261/* Uses mbox */
2262int be_cmd_req_native_mode(struct be_adapter *adapter)
2263{
2264	struct be_mcc_wrb *wrb;
2265	struct be_cmd_req_set_func_cap *req;
2266	int status;
2267
2268	if (mutex_lock_interruptible(&adapter->mbox_lock))
2269		return -1;
2270
2271	wrb = wrb_from_mbox(adapter);
2272	if (!wrb) {
2273		status = -EBUSY;
2274		goto err;
2275	}
2276
2277	req = embedded_payload(wrb);
2278
2279	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2280		OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2281
2282	req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2283				CAPABILITY_BE3_NATIVE_ERX_API);
2284	req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2285
2286	status = be_mbox_notify_wait(adapter);
2287	if (!status) {
2288		struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2289		adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2290					CAPABILITY_BE3_NATIVE_ERX_API;
2291	}
2292err:
2293	mutex_unlock(&adapter->mbox_lock);
2294	return status;
2295}
2296
2297/* Uses synchronous MCCQ */
2298int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain,
2299			bool *pmac_id_active, u32 *pmac_id, u8 *mac)
2300{
2301	struct be_mcc_wrb *wrb;
2302	struct be_cmd_req_get_mac_list *req;
2303	int status;
2304	int mac_count;
2305	struct be_dma_mem get_mac_list_cmd;
2306	int i;
2307
2308	memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2309	get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2310	get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2311			get_mac_list_cmd.size,
2312			&get_mac_list_cmd.dma);
2313
2314	if (!get_mac_list_cmd.va) {
2315		dev_err(&adapter->pdev->dev,
2316				"Memory allocation failure during GET_MAC_LIST\n");
2317		return -ENOMEM;
2318	}
2319
2320	spin_lock_bh(&adapter->mcc_lock);
2321
2322	wrb = wrb_from_mccq(adapter);
2323	if (!wrb) {
2324		status = -EBUSY;
2325		goto out;
2326	}
2327
2328	req = get_mac_list_cmd.va;
2329
2330	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2331				OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
2332				wrb, &get_mac_list_cmd);
2333
2334	req->hdr.domain = domain;
2335	req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2336	req->perm_override = 1;
2337
2338	status = be_mcc_notify_wait(adapter);
2339	if (!status) {
2340		struct be_cmd_resp_get_mac_list *resp =
2341						get_mac_list_cmd.va;
2342		mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2343		/* Mac list returned could contain one or more active mac_ids
2344		 * or one or more pseudo permanant mac addresses. If an active
2345		 * mac_id is present, return first active mac_id found
2346		 */
2347		for (i = 0; i < mac_count; i++) {
2348			struct get_list_macaddr *mac_entry;
2349			u16 mac_addr_size;
2350			u32 mac_id;
2351
2352			mac_entry = &resp->macaddr_list[i];
2353			mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2354			/* mac_id is a 32 bit value and mac_addr size
2355			 * is 6 bytes
2356			 */
2357			if (mac_addr_size == sizeof(u32)) {
2358				*pmac_id_active = true;
2359				mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2360				*pmac_id = le32_to_cpu(mac_id);
2361				goto out;
2362			}
2363		}
2364		/* If no active mac_id found, return first pseudo mac addr */
2365		*pmac_id_active = false;
2366		memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2367								ETH_ALEN);
2368	}
2369
2370out:
2371	spin_unlock_bh(&adapter->mcc_lock);
2372	pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2373			get_mac_list_cmd.va, get_mac_list_cmd.dma);
2374	return status;
2375}
2376
2377/* Uses synchronous MCCQ */
2378int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2379			u8 mac_count, u32 domain)
2380{
2381	struct be_mcc_wrb *wrb;
2382	struct be_cmd_req_set_mac_list *req;
2383	int status;
2384	struct be_dma_mem cmd;
2385
2386	memset(&cmd, 0, sizeof(struct be_dma_mem));
2387	cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2388	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2389			&cmd.dma, GFP_KERNEL);
2390	if (!cmd.va) {
2391		dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2392		return -ENOMEM;
2393	}
2394
2395	spin_lock_bh(&adapter->mcc_lock);
2396
2397	wrb = wrb_from_mccq(adapter);
2398	if (!wrb) {
2399		status = -EBUSY;
2400		goto err;
2401	}
2402
2403	req = cmd.va;
2404	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2405				OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2406				wrb, &cmd);
2407
2408	req->hdr.domain = domain;
2409	req->mac_count = mac_count;
2410	if (mac_count)
2411		memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2412
2413	status = be_mcc_notify_wait(adapter);
2414
2415err:
2416	dma_free_coherent(&adapter->pdev->dev, cmd.size,
2417				cmd.va, cmd.dma);
2418	spin_unlock_bh(&adapter->mcc_lock);
2419	return status;
2420}
2421
2422int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2423			u32 domain, u16 intf_id)
2424{
2425	struct be_mcc_wrb *wrb;
2426	struct be_cmd_req_set_hsw_config *req;
2427	void *ctxt;
2428	int status;
2429
2430	spin_lock_bh(&adapter->mcc_lock);
2431
2432	wrb = wrb_from_mccq(adapter);
2433	if (!wrb) {
2434		status = -EBUSY;
2435		goto err;
2436	}
2437
2438	req = embedded_payload(wrb);
2439	ctxt = &req->context;
2440
2441	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2442			OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2443
2444	req->hdr.domain = domain;
2445	AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2446	if (pvid) {
2447		AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2448		AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2449	}
2450
2451	be_dws_cpu_to_le(req->context, sizeof(req->context));
2452	status = be_mcc_notify_wait(adapter);
2453
2454err:
2455	spin_unlock_bh(&adapter->mcc_lock);
2456	return status;
2457}
2458
2459/* Get Hyper switch config */
2460int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2461			u32 domain, u16 intf_id)
2462{
2463	struct be_mcc_wrb *wrb;
2464	struct be_cmd_req_get_hsw_config *req;
2465	void *ctxt;
2466	int status;
2467	u16 vid;
2468
2469	spin_lock_bh(&adapter->mcc_lock);
2470
2471	wrb = wrb_from_mccq(adapter);
2472	if (!wrb) {
2473		status = -EBUSY;
2474		goto err;
2475	}
2476
2477	req = embedded_payload(wrb);
2478	ctxt = &req->context;
2479
2480	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2481			OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2482
2483	req->hdr.domain = domain;
2484	AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
2485								intf_id);
2486	AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2487	be_dws_cpu_to_le(req->context, sizeof(req->context));
2488
2489	status = be_mcc_notify_wait(adapter);
2490	if (!status) {
2491		struct be_cmd_resp_get_hsw_config *resp =
2492						embedded_payload(wrb);
2493		be_dws_le_to_cpu(&resp->context,
2494						sizeof(resp->context));
2495		vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2496							pvid, &resp->context);
2497		*pvid = le16_to_cpu(vid);
2498	}
2499
2500err:
2501	spin_unlock_bh(&adapter->mcc_lock);
2502	return status;
2503}
2504
2505int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2506{
2507	struct be_mcc_wrb *wrb;
2508	struct be_cmd_req_acpi_wol_magic_config_v1 *req;
2509	int status;
2510	int payload_len = sizeof(*req);
2511	struct be_dma_mem cmd;
2512
2513	memset(&cmd, 0, sizeof(struct be_dma_mem));
2514	cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2515	cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2516					       &cmd.dma);
2517	if (!cmd.va) {
2518		dev_err(&adapter->pdev->dev,
2519				"Memory allocation failure\n");
2520		return -ENOMEM;
2521	}
2522
2523	if (mutex_lock_interruptible(&adapter->mbox_lock))
2524		return -1;
2525
2526	wrb = wrb_from_mbox(adapter);
2527	if (!wrb) {
2528		status = -EBUSY;
2529		goto err;
2530	}
2531
2532	req = cmd.va;
2533
2534	be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2535			       OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2536			       payload_len, wrb, &cmd);
2537
2538	req->hdr.version = 1;
2539	req->query_options = BE_GET_WOL_CAP;
2540
2541	status = be_mbox_notify_wait(adapter);
2542	if (!status) {
2543		struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
2544		resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
2545
2546		/* the command could succeed misleadingly on old f/w
2547		 * which is not aware of the V1 version. fake an error. */
2548		if (resp->hdr.response_length < payload_len) {
2549			status = -1;
2550			goto err;
2551		}
2552		adapter->wol_cap = resp->wol_settings;
2553	}
2554err:
2555	mutex_unlock(&adapter->mbox_lock);
2556	pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2557	return status;
2558}
2559