bnx2x_sp.c revision eb2afd4a622985eaccfa8c7fc83e890b8930e0ab
1/* bnx2x_sp.c: Broadcom Everest network driver.
2 *
3 * Copyright 2011 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
17 *
18 */
19
20#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
22#include <linux/module.h>
23#include <linux/crc32.h>
24#include <linux/netdevice.h>
25#include <linux/etherdevice.h>
26#include <linux/crc32c.h>
27#include "bnx2x.h"
28#include "bnx2x_cmn.h"
29#include "bnx2x_sp.h"
30
31#define BNX2X_MAX_EMUL_MULTI		16
32
33/**** Exe Queue interfaces ****/
34
35/**
36 * bnx2x_exe_queue_init - init the Exe Queue object
37 *
38 * @o:		poiter to the object
39 * @exe_len:	length
40 * @owner:	poiter to the owner
41 * @validate:	validate function pointer
42 * @optimize:	optimize function pointer
43 * @exec:	execute function pointer
44 * @get:	get function pointer
45 */
46static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
47					struct bnx2x_exe_queue_obj *o,
48					int exe_len,
49					union bnx2x_qable_obj *owner,
50					exe_q_validate validate,
51					exe_q_optimize optimize,
52					exe_q_execute exec,
53					exe_q_get get)
54{
55	memset(o, 0, sizeof(*o));
56
57	INIT_LIST_HEAD(&o->exe_queue);
58	INIT_LIST_HEAD(&o->pending_comp);
59
60	spin_lock_init(&o->lock);
61
62	o->exe_chunk_len = exe_len;
63	o->owner         = owner;
64
65	/* Owner specific callbacks */
66	o->validate      = validate;
67	o->optimize      = optimize;
68	o->execute       = exec;
69	o->get           = get;
70
71	DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
72			 "length of %d\n", exe_len);
73}
74
75static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
76					     struct bnx2x_exeq_elem *elem)
77{
78	DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
79	kfree(elem);
80}
81
82static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
83{
84	struct bnx2x_exeq_elem *elem;
85	int cnt = 0;
86
87	spin_lock_bh(&o->lock);
88
89	list_for_each_entry(elem, &o->exe_queue, link)
90		cnt++;
91
92	spin_unlock_bh(&o->lock);
93
94	return cnt;
95}
96
97/**
98 * bnx2x_exe_queue_add - add a new element to the execution queue
99 *
100 * @bp:		driver handle
101 * @o:		queue
102 * @cmd:	new command to add
103 * @restore:	true - do not optimize the command
104 *
105 * If the element is optimized or is illegal, frees it.
106 */
107static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
108				      struct bnx2x_exe_queue_obj *o,
109				      struct bnx2x_exeq_elem *elem,
110				      bool restore)
111{
112	int rc;
113
114	spin_lock_bh(&o->lock);
115
116	if (!restore) {
117		/* Try to cancel this element queue */
118		rc = o->optimize(bp, o->owner, elem);
119		if (rc)
120			goto free_and_exit;
121
122		/* Check if this request is ok */
123		rc = o->validate(bp, o->owner, elem);
124		if (rc) {
125			BNX2X_ERR("Preamble failed: %d\n", rc);
126			goto free_and_exit;
127		}
128	}
129
130	/* If so, add it to the execution queue */
131	list_add_tail(&elem->link, &o->exe_queue);
132
133	spin_unlock_bh(&o->lock);
134
135	return 0;
136
137free_and_exit:
138	bnx2x_exe_queue_free_elem(bp, elem);
139
140	spin_unlock_bh(&o->lock);
141
142	return rc;
143
144}
145
146static inline void __bnx2x_exe_queue_reset_pending(
147	struct bnx2x *bp,
148	struct bnx2x_exe_queue_obj *o)
149{
150	struct bnx2x_exeq_elem *elem;
151
152	while (!list_empty(&o->pending_comp)) {
153		elem = list_first_entry(&o->pending_comp,
154					struct bnx2x_exeq_elem, link);
155
156		list_del(&elem->link);
157		bnx2x_exe_queue_free_elem(bp, elem);
158	}
159}
160
161static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
162						 struct bnx2x_exe_queue_obj *o)
163{
164
165	spin_lock_bh(&o->lock);
166
167	__bnx2x_exe_queue_reset_pending(bp, o);
168
169	spin_unlock_bh(&o->lock);
170
171}
172
173/**
174 * bnx2x_exe_queue_step - execute one execution chunk atomically
175 *
176 * @bp:			driver handle
177 * @o:			queue
178 * @ramrod_flags:	flags
179 *
180 * (Atomicy is ensured using the exe_queue->lock).
181 */
182static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
183				       struct bnx2x_exe_queue_obj *o,
184				       unsigned long *ramrod_flags)
185{
186	struct bnx2x_exeq_elem *elem, spacer;
187	int cur_len = 0, rc;
188
189	memset(&spacer, 0, sizeof(spacer));
190
191	spin_lock_bh(&o->lock);
192
193	/*
194	 * Next step should not be performed until the current is finished,
195	 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
196	 * properly clear object internals without sending any command to the FW
197	 * which also implies there won't be any completion to clear the
198	 * 'pending' list.
199	 */
200	if (!list_empty(&o->pending_comp)) {
201		if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
202			DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
203					 "resetting pending_comp\n");
204			__bnx2x_exe_queue_reset_pending(bp, o);
205		} else {
206			spin_unlock_bh(&o->lock);
207			return 1;
208		}
209	}
210
211	/*
212	 * Run through the pending commands list and create a next
213	 * execution chunk.
214	 */
215	while (!list_empty(&o->exe_queue)) {
216		elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
217					link);
218		WARN_ON(!elem->cmd_len);
219
220		if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
221			cur_len += elem->cmd_len;
222			/*
223			 * Prevent from both lists being empty when moving an
224			 * element. This will allow the call of
225			 * bnx2x_exe_queue_empty() without locking.
226			 */
227			list_add_tail(&spacer.link, &o->pending_comp);
228			mb();
229			list_del(&elem->link);
230			list_add_tail(&elem->link, &o->pending_comp);
231			list_del(&spacer.link);
232		} else
233			break;
234	}
235
236	/* Sanity check */
237	if (!cur_len) {
238		spin_unlock_bh(&o->lock);
239		return 0;
240	}
241
242	rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
243	if (rc < 0)
244		/*
245		 *  In case of an error return the commands back to the queue
246		 *  and reset the pending_comp.
247		 */
248		list_splice_init(&o->pending_comp, &o->exe_queue);
249	else if (!rc)
250		/*
251		 * If zero is returned, means there are no outstanding pending
252		 * completions and we may dismiss the pending list.
253		 */
254		__bnx2x_exe_queue_reset_pending(bp, o);
255
256	spin_unlock_bh(&o->lock);
257	return rc;
258}
259
260static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
261{
262	bool empty = list_empty(&o->exe_queue);
263
264	/* Don't reorder!!! */
265	mb();
266
267	return empty && list_empty(&o->pending_comp);
268}
269
270static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
271	struct bnx2x *bp)
272{
273	DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
274	return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
275}
276
277/************************ raw_obj functions ***********************************/
278static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
279{
280	return !!test_bit(o->state, o->pstate);
281}
282
283static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
284{
285	smp_mb__before_clear_bit();
286	clear_bit(o->state, o->pstate);
287	smp_mb__after_clear_bit();
288}
289
290static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
291{
292	smp_mb__before_clear_bit();
293	set_bit(o->state, o->pstate);
294	smp_mb__after_clear_bit();
295}
296
297/**
298 * bnx2x_state_wait - wait until the given bit(state) is cleared
299 *
300 * @bp:		device handle
301 * @state:	state which is to be cleared
302 * @state_p:	state buffer
303 *
304 */
305static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
306				   unsigned long *pstate)
307{
308	/* can take a while if any port is running */
309	int cnt = 5000;
310
311
312	if (CHIP_REV_IS_EMUL(bp))
313		cnt *= 20;
314
315	DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
316
317	might_sleep();
318	while (cnt--) {
319		if (!test_bit(state, pstate)) {
320#ifdef BNX2X_STOP_ON_ERROR
321			DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
322#endif
323			return 0;
324		}
325
326		usleep_range(1000, 1000);
327
328		if (bp->panic)
329			return -EIO;
330	}
331
332	/* timeout! */
333	BNX2X_ERR("timeout waiting for state %d\n", state);
334#ifdef BNX2X_STOP_ON_ERROR
335	bnx2x_panic();
336#endif
337
338	return -EBUSY;
339}
340
341static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
342{
343	return bnx2x_state_wait(bp, raw->state, raw->pstate);
344}
345
346/***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
347/* credit handling callbacks */
348static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
349{
350	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
351
352	WARN_ON(!mp);
353
354	return mp->get_entry(mp, offset);
355}
356
357static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
358{
359	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
360
361	WARN_ON(!mp);
362
363	return mp->get(mp, 1);
364}
365
366static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
367{
368	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
369
370	WARN_ON(!vp);
371
372	return vp->get_entry(vp, offset);
373}
374
375static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
376{
377	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
378
379	WARN_ON(!vp);
380
381	return vp->get(vp, 1);
382}
383
384static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
385{
386	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
387	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
388
389	if (!mp->get(mp, 1))
390		return false;
391
392	if (!vp->get(vp, 1)) {
393		mp->put(mp, 1);
394		return false;
395	}
396
397	return true;
398}
399
400static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
401{
402	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
403
404	return mp->put_entry(mp, offset);
405}
406
407static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
408{
409	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
410
411	return mp->put(mp, 1);
412}
413
414static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
415{
416	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
417
418	return vp->put_entry(vp, offset);
419}
420
421static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
422{
423	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
424
425	return vp->put(vp, 1);
426}
427
428static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
429{
430	struct bnx2x_credit_pool_obj *mp = o->macs_pool;
431	struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
432
433	if (!mp->put(mp, 1))
434		return false;
435
436	if (!vp->put(vp, 1)) {
437		mp->get(mp, 1);
438		return false;
439	}
440
441	return true;
442}
443
444/* check_add() callbacks */
445static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
446			       union bnx2x_classification_ramrod_data *data)
447{
448	struct bnx2x_vlan_mac_registry_elem *pos;
449
450	if (!is_valid_ether_addr(data->mac.mac))
451		return -EINVAL;
452
453	/* Check if a requested MAC already exists */
454	list_for_each_entry(pos, &o->head, link)
455		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
456			return -EEXIST;
457
458	return 0;
459}
460
461static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
462				union bnx2x_classification_ramrod_data *data)
463{
464	struct bnx2x_vlan_mac_registry_elem *pos;
465
466	list_for_each_entry(pos, &o->head, link)
467		if (data->vlan.vlan == pos->u.vlan.vlan)
468			return -EEXIST;
469
470	return 0;
471}
472
473static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
474				   union bnx2x_classification_ramrod_data *data)
475{
476	struct bnx2x_vlan_mac_registry_elem *pos;
477
478	list_for_each_entry(pos, &o->head, link)
479		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
480		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
481			     ETH_ALEN)))
482			return -EEXIST;
483
484	return 0;
485}
486
487
488/* check_del() callbacks */
489static struct bnx2x_vlan_mac_registry_elem *
490	bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
491			    union bnx2x_classification_ramrod_data *data)
492{
493	struct bnx2x_vlan_mac_registry_elem *pos;
494
495	list_for_each_entry(pos, &o->head, link)
496		if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
497			return pos;
498
499	return NULL;
500}
501
502static struct bnx2x_vlan_mac_registry_elem *
503	bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
504			     union bnx2x_classification_ramrod_data *data)
505{
506	struct bnx2x_vlan_mac_registry_elem *pos;
507
508	list_for_each_entry(pos, &o->head, link)
509		if (data->vlan.vlan == pos->u.vlan.vlan)
510			return pos;
511
512	return NULL;
513}
514
515static struct bnx2x_vlan_mac_registry_elem *
516	bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
517				 union bnx2x_classification_ramrod_data *data)
518{
519	struct bnx2x_vlan_mac_registry_elem *pos;
520
521	list_for_each_entry(pos, &o->head, link)
522		if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
523		    (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
524			     ETH_ALEN)))
525			return pos;
526
527	return NULL;
528}
529
530/* check_move() callback */
531static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
532			     struct bnx2x_vlan_mac_obj *dst_o,
533			     union bnx2x_classification_ramrod_data *data)
534{
535	struct bnx2x_vlan_mac_registry_elem *pos;
536	int rc;
537
538	/* Check if we can delete the requested configuration from the first
539	 * object.
540	 */
541	pos = src_o->check_del(src_o, data);
542
543	/*  check if configuration can be added */
544	rc = dst_o->check_add(dst_o, data);
545
546	/* If this classification can not be added (is already set)
547	 * or can't be deleted - return an error.
548	 */
549	if (rc || !pos)
550		return false;
551
552	return true;
553}
554
555static bool bnx2x_check_move_always_err(
556	struct bnx2x_vlan_mac_obj *src_o,
557	struct bnx2x_vlan_mac_obj *dst_o,
558	union bnx2x_classification_ramrod_data *data)
559{
560	return false;
561}
562
563
564static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
565{
566	struct bnx2x_raw_obj *raw = &o->raw;
567	u8 rx_tx_flag = 0;
568
569	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
570	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
571		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
572
573	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
574	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
575		rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
576
577	return rx_tx_flag;
578}
579
580/* LLH CAM line allocations */
581enum {
582	LLH_CAM_ISCSI_ETH_LINE = 0,
583	LLH_CAM_ETH_LINE,
584	LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
585};
586
587static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
588				 bool add, unsigned char *dev_addr, int index)
589{
590	u32 wb_data[2];
591	u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
592			 NIG_REG_LLH0_FUNC_MEM;
593
594	if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
595		return;
596
597	DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
598			 (add ? "ADD" : "DELETE"), index);
599
600	if (add) {
601		/* LLH_FUNC_MEM is a u64 WB register */
602		reg_offset += 8*index;
603
604		wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
605			      (dev_addr[4] <<  8) |  dev_addr[5]);
606		wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
607
608		REG_WR_DMAE(bp, reg_offset, wb_data, 2);
609	}
610
611	REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
612				  NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
613}
614
615/**
616 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
617 *
618 * @bp:		device handle
619 * @o:		queue for which we want to configure this rule
620 * @add:	if true the command is an ADD command, DEL otherwise
621 * @opcode:	CLASSIFY_RULE_OPCODE_XXX
622 * @hdr:	pointer to a header to setup
623 *
624 */
625static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
626	struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
627	struct eth_classify_cmd_header *hdr)
628{
629	struct bnx2x_raw_obj *raw = &o->raw;
630
631	hdr->client_id = raw->cl_id;
632	hdr->func_id = raw->func_id;
633
634	/* Rx or/and Tx (internal switching) configuration ? */
635	hdr->cmd_general_data |=
636		bnx2x_vlan_mac_get_rx_tx_flag(o);
637
638	if (add)
639		hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
640
641	hdr->cmd_general_data |=
642		(opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
643}
644
645/**
646 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
647 *
648 * @cid:	connection id
649 * @type:	BNX2X_FILTER_XXX_PENDING
650 * @hdr:	poiter to header to setup
651 * @rule_cnt:
652 *
653 * currently we always configure one rule and echo field to contain a CID and an
654 * opcode type.
655 */
656static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
657				struct eth_classify_header *hdr, int rule_cnt)
658{
659	hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
660	hdr->rule_cnt = (u8)rule_cnt;
661}
662
663
664/* hw_config() callbacks */
665static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
666				 struct bnx2x_vlan_mac_obj *o,
667				 struct bnx2x_exeq_elem *elem, int rule_idx,
668				 int cam_offset)
669{
670	struct bnx2x_raw_obj *raw = &o->raw;
671	struct eth_classify_rules_ramrod_data *data =
672		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
673	int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
674	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
675	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
676	unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
677	u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
678
679	/*
680	 * Set LLH CAM entry: currently only iSCSI and ETH macs are
681	 * relevant. In addition, current implementation is tuned for a
682	 * single ETH MAC.
683	 *
684	 * When multiple unicast ETH MACs PF configuration in switch
685	 * independent mode is required (NetQ, multiple netdev MACs,
686	 * etc.), consider better utilisation of 8 per function MAC
687	 * entries in the LLH register. There is also
688	 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
689	 * total number of CAM entries to 16.
690	 *
691	 * Currently we won't configure NIG for MACs other than a primary ETH
692	 * MAC and iSCSI L2 MAC.
693	 *
694	 * If this MAC is moving from one Queue to another, no need to change
695	 * NIG configuration.
696	 */
697	if (cmd != BNX2X_VLAN_MAC_MOVE) {
698		if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
699			bnx2x_set_mac_in_nig(bp, add, mac,
700					     LLH_CAM_ISCSI_ETH_LINE);
701		else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
702			bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
703	}
704
705	/* Reset the ramrod data buffer for the first rule */
706	if (rule_idx == 0)
707		memset(data, 0, sizeof(*data));
708
709	/* Setup a command header */
710	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
711				      &rule_entry->mac.header);
712
713	DP(BNX2X_MSG_SP, "About to %s MAC %pM for Queue %d\n",
714			 add ? "add" : "delete", mac, raw->cl_id);
715
716	/* Set a MAC itself */
717	bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
718			      &rule_entry->mac.mac_mid,
719			      &rule_entry->mac.mac_lsb, mac);
720
721	/* MOVE: Add a rule that will add this MAC to the target Queue */
722	if (cmd == BNX2X_VLAN_MAC_MOVE) {
723		rule_entry++;
724		rule_cnt++;
725
726		/* Setup ramrod data */
727		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
728					elem->cmd_data.vlan_mac.target_obj,
729					      true, CLASSIFY_RULE_OPCODE_MAC,
730					      &rule_entry->mac.header);
731
732		/* Set a MAC itself */
733		bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
734				      &rule_entry->mac.mac_mid,
735				      &rule_entry->mac.mac_lsb, mac);
736	}
737
738	/* Set the ramrod data header */
739	/* TODO: take this to the higher level in order to prevent multiple
740		 writing */
741	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
742					rule_cnt);
743}
744
745/**
746 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
747 *
748 * @bp:		device handle
749 * @o:		queue
750 * @type:
751 * @cam_offset:	offset in cam memory
752 * @hdr:	pointer to a header to setup
753 *
754 * E1/E1H
755 */
756static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
757	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
758	struct mac_configuration_hdr *hdr)
759{
760	struct bnx2x_raw_obj *r = &o->raw;
761
762	hdr->length = 1;
763	hdr->offset = (u8)cam_offset;
764	hdr->client_id = 0xff;
765	hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
766}
767
768static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
769	struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
770	u16 vlan_id, struct mac_configuration_entry *cfg_entry)
771{
772	struct bnx2x_raw_obj *r = &o->raw;
773	u32 cl_bit_vec = (1 << r->cl_id);
774
775	cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
776	cfg_entry->pf_id = r->func_id;
777	cfg_entry->vlan_id = cpu_to_le16(vlan_id);
778
779	if (add) {
780		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
781			 T_ETH_MAC_COMMAND_SET);
782		SET_FLAG(cfg_entry->flags,
783			 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
784
785		/* Set a MAC in a ramrod data */
786		bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
787				      &cfg_entry->middle_mac_addr,
788				      &cfg_entry->lsb_mac_addr, mac);
789	} else
790		SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
791			 T_ETH_MAC_COMMAND_INVALIDATE);
792}
793
794static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
795	struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
796	u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
797{
798	struct mac_configuration_entry *cfg_entry = &config->config_table[0];
799	struct bnx2x_raw_obj *raw = &o->raw;
800
801	bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
802					 &config->hdr);
803	bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
804					 cfg_entry);
805
806	DP(BNX2X_MSG_SP, "%s MAC %pM CLID %d CAM offset %d\n",
807			 add ? "setting" : "clearing",
808			 mac, raw->cl_id, cam_offset);
809}
810
811/**
812 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
813 *
814 * @bp:		device handle
815 * @o:		bnx2x_vlan_mac_obj
816 * @elem:	bnx2x_exeq_elem
817 * @rule_idx:	rule_idx
818 * @cam_offset: cam_offset
819 */
820static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
821				  struct bnx2x_vlan_mac_obj *o,
822				  struct bnx2x_exeq_elem *elem, int rule_idx,
823				  int cam_offset)
824{
825	struct bnx2x_raw_obj *raw = &o->raw;
826	struct mac_configuration_cmd *config =
827		(struct mac_configuration_cmd *)(raw->rdata);
828	/*
829	 * 57710 and 57711 do not support MOVE command,
830	 * so it's either ADD or DEL
831	 */
832	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
833		true : false;
834
835	/* Reset the ramrod data buffer */
836	memset(config, 0, sizeof(*config));
837
838	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
839				     cam_offset, add,
840				     elem->cmd_data.vlan_mac.u.mac.mac, 0,
841				     ETH_VLAN_FILTER_ANY_VLAN, config);
842}
843
844static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
845				  struct bnx2x_vlan_mac_obj *o,
846				  struct bnx2x_exeq_elem *elem, int rule_idx,
847				  int cam_offset)
848{
849	struct bnx2x_raw_obj *raw = &o->raw;
850	struct eth_classify_rules_ramrod_data *data =
851		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
852	int rule_cnt = rule_idx + 1;
853	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
854	int cmd = elem->cmd_data.vlan_mac.cmd;
855	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
856	u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
857
858	/* Reset the ramrod data buffer for the first rule */
859	if (rule_idx == 0)
860		memset(data, 0, sizeof(*data));
861
862	/* Set a rule header */
863	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
864				      &rule_entry->vlan.header);
865
866	DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
867			 vlan);
868
869	/* Set a VLAN itself */
870	rule_entry->vlan.vlan = cpu_to_le16(vlan);
871
872	/* MOVE: Add a rule that will add this MAC to the target Queue */
873	if (cmd == BNX2X_VLAN_MAC_MOVE) {
874		rule_entry++;
875		rule_cnt++;
876
877		/* Setup ramrod data */
878		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
879					elem->cmd_data.vlan_mac.target_obj,
880					      true, CLASSIFY_RULE_OPCODE_VLAN,
881					      &rule_entry->vlan.header);
882
883		/* Set a VLAN itself */
884		rule_entry->vlan.vlan = cpu_to_le16(vlan);
885	}
886
887	/* Set the ramrod data header */
888	/* TODO: take this to the higher level in order to prevent multiple
889		 writing */
890	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
891					rule_cnt);
892}
893
894static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
895				      struct bnx2x_vlan_mac_obj *o,
896				      struct bnx2x_exeq_elem *elem,
897				      int rule_idx, int cam_offset)
898{
899	struct bnx2x_raw_obj *raw = &o->raw;
900	struct eth_classify_rules_ramrod_data *data =
901		(struct eth_classify_rules_ramrod_data *)(raw->rdata);
902	int rule_cnt = rule_idx + 1;
903	union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
904	int cmd = elem->cmd_data.vlan_mac.cmd;
905	bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
906	u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
907	u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
908
909
910	/* Reset the ramrod data buffer for the first rule */
911	if (rule_idx == 0)
912		memset(data, 0, sizeof(*data));
913
914	/* Set a rule header */
915	bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
916				      &rule_entry->pair.header);
917
918	/* Set VLAN and MAC themselvs */
919	rule_entry->pair.vlan = cpu_to_le16(vlan);
920	bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
921			      &rule_entry->pair.mac_mid,
922			      &rule_entry->pair.mac_lsb, mac);
923
924	/* MOVE: Add a rule that will add this MAC to the target Queue */
925	if (cmd == BNX2X_VLAN_MAC_MOVE) {
926		rule_entry++;
927		rule_cnt++;
928
929		/* Setup ramrod data */
930		bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
931					elem->cmd_data.vlan_mac.target_obj,
932					      true, CLASSIFY_RULE_OPCODE_PAIR,
933					      &rule_entry->pair.header);
934
935		/* Set a VLAN itself */
936		rule_entry->pair.vlan = cpu_to_le16(vlan);
937		bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
938				      &rule_entry->pair.mac_mid,
939				      &rule_entry->pair.mac_lsb, mac);
940	}
941
942	/* Set the ramrod data header */
943	/* TODO: take this to the higher level in order to prevent multiple
944		 writing */
945	bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
946					rule_cnt);
947}
948
949/**
950 * bnx2x_set_one_vlan_mac_e1h -
951 *
952 * @bp:		device handle
953 * @o:		bnx2x_vlan_mac_obj
954 * @elem:	bnx2x_exeq_elem
955 * @rule_idx:	rule_idx
956 * @cam_offset:	cam_offset
957 */
958static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
959				       struct bnx2x_vlan_mac_obj *o,
960				       struct bnx2x_exeq_elem *elem,
961				       int rule_idx, int cam_offset)
962{
963	struct bnx2x_raw_obj *raw = &o->raw;
964	struct mac_configuration_cmd *config =
965		(struct mac_configuration_cmd *)(raw->rdata);
966	/*
967	 * 57710 and 57711 do not support MOVE command,
968	 * so it's either ADD or DEL
969	 */
970	bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
971		true : false;
972
973	/* Reset the ramrod data buffer */
974	memset(config, 0, sizeof(*config));
975
976	bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
977				     cam_offset, add,
978				     elem->cmd_data.vlan_mac.u.vlan_mac.mac,
979				     elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
980				     ETH_VLAN_FILTER_CLASSIFY, config);
981}
982
983#define list_next_entry(pos, member) \
984	list_entry((pos)->member.next, typeof(*(pos)), member)
985
986/**
987 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
988 *
989 * @bp:		device handle
990 * @p:		command parameters
991 * @ppos:	pointer to the cooky
992 *
993 * reconfigure next MAC/VLAN/VLAN-MAC element from the
994 * previously configured elements list.
995 *
996 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is	taken
997 * into an account
998 *
999 * pointer to the cooky  - that should be given back in the next call to make
1000 * function handle the next element. If *ppos is set to NULL it will restart the
1001 * iterator. If returned *ppos == NULL this means that the last element has been
1002 * handled.
1003 *
1004 */
1005static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1006			   struct bnx2x_vlan_mac_ramrod_params *p,
1007			   struct bnx2x_vlan_mac_registry_elem **ppos)
1008{
1009	struct bnx2x_vlan_mac_registry_elem *pos;
1010	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1011
1012	/* If list is empty - there is nothing to do here */
1013	if (list_empty(&o->head)) {
1014		*ppos = NULL;
1015		return 0;
1016	}
1017
1018	/* make a step... */
1019	if (*ppos == NULL)
1020		*ppos = list_first_entry(&o->head,
1021					 struct bnx2x_vlan_mac_registry_elem,
1022					 link);
1023	else
1024		*ppos = list_next_entry(*ppos, link);
1025
1026	pos = *ppos;
1027
1028	/* If it's the last step - return NULL */
1029	if (list_is_last(&pos->link, &o->head))
1030		*ppos = NULL;
1031
1032	/* Prepare a 'user_req' */
1033	memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1034
1035	/* Set the command */
1036	p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1037
1038	/* Set vlan_mac_flags */
1039	p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1040
1041	/* Set a restore bit */
1042	__set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1043
1044	return bnx2x_config_vlan_mac(bp, p);
1045}
1046
1047/*
1048 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1049 * pointer to an element with a specific criteria and NULL if such an element
1050 * hasn't been found.
1051 */
1052static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1053	struct bnx2x_exe_queue_obj *o,
1054	struct bnx2x_exeq_elem *elem)
1055{
1056	struct bnx2x_exeq_elem *pos;
1057	struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1058
1059	/* Check pending for execution commands */
1060	list_for_each_entry(pos, &o->exe_queue, link)
1061		if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1062			      sizeof(*data)) &&
1063		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1064			return pos;
1065
1066	return NULL;
1067}
1068
1069static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1070	struct bnx2x_exe_queue_obj *o,
1071	struct bnx2x_exeq_elem *elem)
1072{
1073	struct bnx2x_exeq_elem *pos;
1074	struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1075
1076	/* Check pending for execution commands */
1077	list_for_each_entry(pos, &o->exe_queue, link)
1078		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1079			      sizeof(*data)) &&
1080		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1081			return pos;
1082
1083	return NULL;
1084}
1085
1086static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1087	struct bnx2x_exe_queue_obj *o,
1088	struct bnx2x_exeq_elem *elem)
1089{
1090	struct bnx2x_exeq_elem *pos;
1091	struct bnx2x_vlan_mac_ramrod_data *data =
1092		&elem->cmd_data.vlan_mac.u.vlan_mac;
1093
1094	/* Check pending for execution commands */
1095	list_for_each_entry(pos, &o->exe_queue, link)
1096		if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1097			      sizeof(*data)) &&
1098		    (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1099			return pos;
1100
1101	return NULL;
1102}
1103
1104/**
1105 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1106 *
1107 * @bp:		device handle
1108 * @qo:		bnx2x_qable_obj
1109 * @elem:	bnx2x_exeq_elem
1110 *
1111 * Checks that the requested configuration can be added. If yes and if
1112 * requested, consume CAM credit.
1113 *
1114 * The 'validate' is run after the 'optimize'.
1115 *
1116 */
1117static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1118					      union bnx2x_qable_obj *qo,
1119					      struct bnx2x_exeq_elem *elem)
1120{
1121	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1122	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1123	int rc;
1124
1125	/* Check the registry */
1126	rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
1127	if (rc) {
1128		DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
1129				 "current registry state\n");
1130		return rc;
1131	}
1132
1133	/*
1134	 * Check if there is a pending ADD command for this
1135	 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1136	 */
1137	if (exeq->get(exeq, elem)) {
1138		DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1139		return -EEXIST;
1140	}
1141
1142	/*
1143	 * TODO: Check the pending MOVE from other objects where this
1144	 * object is a destination object.
1145	 */
1146
1147	/* Consume the credit if not requested not to */
1148	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1149		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1150	    o->get_credit(o)))
1151		return -EINVAL;
1152
1153	return 0;
1154}
1155
1156/**
1157 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1158 *
1159 * @bp:		device handle
1160 * @qo:		quable object to check
1161 * @elem:	element that needs to be deleted
1162 *
1163 * Checks that the requested configuration can be deleted. If yes and if
1164 * requested, returns a CAM credit.
1165 *
1166 * The 'validate' is run after the 'optimize'.
1167 */
1168static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1169					      union bnx2x_qable_obj *qo,
1170					      struct bnx2x_exeq_elem *elem)
1171{
1172	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1173	struct bnx2x_vlan_mac_registry_elem *pos;
1174	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1175	struct bnx2x_exeq_elem query_elem;
1176
1177	/* If this classification can not be deleted (doesn't exist)
1178	 * - return a BNX2X_EXIST.
1179	 */
1180	pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1181	if (!pos) {
1182		DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
1183				 "current registry state\n");
1184		return -EEXIST;
1185	}
1186
1187	/*
1188	 * Check if there are pending DEL or MOVE commands for this
1189	 * MAC/VLAN/VLAN-MAC. Return an error if so.
1190	 */
1191	memcpy(&query_elem, elem, sizeof(query_elem));
1192
1193	/* Check for MOVE commands */
1194	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1195	if (exeq->get(exeq, &query_elem)) {
1196		BNX2X_ERR("There is a pending MOVE command already\n");
1197		return -EINVAL;
1198	}
1199
1200	/* Check for DEL commands */
1201	if (exeq->get(exeq, elem)) {
1202		DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1203		return -EEXIST;
1204	}
1205
1206	/* Return the credit to the credit pool if not requested not to */
1207	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1208		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1209	    o->put_credit(o))) {
1210		BNX2X_ERR("Failed to return a credit\n");
1211		return -EINVAL;
1212	}
1213
1214	return 0;
1215}
1216
1217/**
1218 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1219 *
1220 * @bp:		device handle
1221 * @qo:		quable object to check (source)
1222 * @elem:	element that needs to be moved
1223 *
1224 * Checks that the requested configuration can be moved. If yes and if
1225 * requested, returns a CAM credit.
1226 *
1227 * The 'validate' is run after the 'optimize'.
1228 */
1229static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1230					       union bnx2x_qable_obj *qo,
1231					       struct bnx2x_exeq_elem *elem)
1232{
1233	struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1234	struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1235	struct bnx2x_exeq_elem query_elem;
1236	struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1237	struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1238
1239	/*
1240	 * Check if we can perform this operation based on the current registry
1241	 * state.
1242	 */
1243	if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1244		DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
1245				 "current registry state\n");
1246		return -EINVAL;
1247	}
1248
1249	/*
1250	 * Check if there is an already pending DEL or MOVE command for the
1251	 * source object or ADD command for a destination object. Return an
1252	 * error if so.
1253	 */
1254	memcpy(&query_elem, elem, sizeof(query_elem));
1255
1256	/* Check DEL on source */
1257	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1258	if (src_exeq->get(src_exeq, &query_elem)) {
1259		BNX2X_ERR("There is a pending DEL command on the source "
1260			  "queue already\n");
1261		return -EINVAL;
1262	}
1263
1264	/* Check MOVE on source */
1265	if (src_exeq->get(src_exeq, elem)) {
1266		DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1267		return -EEXIST;
1268	}
1269
1270	/* Check ADD on destination */
1271	query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1272	if (dest_exeq->get(dest_exeq, &query_elem)) {
1273		BNX2X_ERR("There is a pending ADD command on the "
1274			  "destination queue already\n");
1275		return -EINVAL;
1276	}
1277
1278	/* Consume the credit if not requested not to */
1279	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1280		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1281	    dest_o->get_credit(dest_o)))
1282		return -EINVAL;
1283
1284	if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1285		       &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1286	    src_o->put_credit(src_o))) {
1287		/* return the credit taken from dest... */
1288		dest_o->put_credit(dest_o);
1289		return -EINVAL;
1290	}
1291
1292	return 0;
1293}
1294
1295static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1296				   union bnx2x_qable_obj *qo,
1297				   struct bnx2x_exeq_elem *elem)
1298{
1299	switch (elem->cmd_data.vlan_mac.cmd) {
1300	case BNX2X_VLAN_MAC_ADD:
1301		return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1302	case BNX2X_VLAN_MAC_DEL:
1303		return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1304	case BNX2X_VLAN_MAC_MOVE:
1305		return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1306	default:
1307		return -EINVAL;
1308	}
1309}
1310
1311/**
1312 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1313 *
1314 * @bp:		device handle
1315 * @o:		bnx2x_vlan_mac_obj
1316 *
1317 */
1318static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1319			       struct bnx2x_vlan_mac_obj *o)
1320{
1321	int cnt = 5000, rc;
1322	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1323	struct bnx2x_raw_obj *raw = &o->raw;
1324
1325	while (cnt--) {
1326		/* Wait for the current command to complete */
1327		rc = raw->wait_comp(bp, raw);
1328		if (rc)
1329			return rc;
1330
1331		/* Wait until there are no pending commands */
1332		if (!bnx2x_exe_queue_empty(exeq))
1333			usleep_range(1000, 1000);
1334		else
1335			return 0;
1336	}
1337
1338	return -EBUSY;
1339}
1340
1341/**
1342 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1343 *
1344 * @bp:		device handle
1345 * @o:		bnx2x_vlan_mac_obj
1346 * @cqe:
1347 * @cont:	if true schedule next execution chunk
1348 *
1349 */
1350static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1351				   struct bnx2x_vlan_mac_obj *o,
1352				   union event_ring_elem *cqe,
1353				   unsigned long *ramrod_flags)
1354{
1355	struct bnx2x_raw_obj *r = &o->raw;
1356	int rc;
1357
1358	/* Reset pending list */
1359	bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1360
1361	/* Clear pending */
1362	r->clear_pending(r);
1363
1364	/* If ramrod failed this is most likely a SW bug */
1365	if (cqe->message.error)
1366		return -EINVAL;
1367
1368	/* Run the next bulk of pending commands if requeted */
1369	if (test_bit(RAMROD_CONT, ramrod_flags)) {
1370		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1371		if (rc < 0)
1372			return rc;
1373	}
1374
1375	/* If there is more work to do return PENDING */
1376	if (!bnx2x_exe_queue_empty(&o->exe_queue))
1377		return 1;
1378
1379	return 0;
1380}
1381
1382/**
1383 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1384 *
1385 * @bp:		device handle
1386 * @o:		bnx2x_qable_obj
1387 * @elem:	bnx2x_exeq_elem
1388 */
1389static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1390				   union bnx2x_qable_obj *qo,
1391				   struct bnx2x_exeq_elem *elem)
1392{
1393	struct bnx2x_exeq_elem query, *pos;
1394	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1395	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1396
1397	memcpy(&query, elem, sizeof(query));
1398
1399	switch (elem->cmd_data.vlan_mac.cmd) {
1400	case BNX2X_VLAN_MAC_ADD:
1401		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1402		break;
1403	case BNX2X_VLAN_MAC_DEL:
1404		query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1405		break;
1406	default:
1407		/* Don't handle anything other than ADD or DEL */
1408		return 0;
1409	}
1410
1411	/* If we found the appropriate element - delete it */
1412	pos = exeq->get(exeq, &query);
1413	if (pos) {
1414
1415		/* Return the credit of the optimized command */
1416		if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1417			      &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1418			if ((query.cmd_data.vlan_mac.cmd ==
1419			     BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1420				BNX2X_ERR("Failed to return the credit for the "
1421					  "optimized ADD command\n");
1422				return -EINVAL;
1423			} else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1424				BNX2X_ERR("Failed to recover the credit from "
1425					  "the optimized DEL command\n");
1426				return -EINVAL;
1427			}
1428		}
1429
1430		DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1431			   (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1432			   "ADD" : "DEL");
1433
1434		list_del(&pos->link);
1435		bnx2x_exe_queue_free_elem(bp, pos);
1436		return 1;
1437	}
1438
1439	return 0;
1440}
1441
1442/**
1443 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1444 *
1445 * @bp:	  device handle
1446 * @o:
1447 * @elem:
1448 * @restore:
1449 * @re:
1450 *
1451 * prepare a registry element according to the current command request.
1452 */
1453static inline int bnx2x_vlan_mac_get_registry_elem(
1454	struct bnx2x *bp,
1455	struct bnx2x_vlan_mac_obj *o,
1456	struct bnx2x_exeq_elem *elem,
1457	bool restore,
1458	struct bnx2x_vlan_mac_registry_elem **re)
1459{
1460	int cmd = elem->cmd_data.vlan_mac.cmd;
1461	struct bnx2x_vlan_mac_registry_elem *reg_elem;
1462
1463	/* Allocate a new registry element if needed. */
1464	if (!restore &&
1465	    ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1466		reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1467		if (!reg_elem)
1468			return -ENOMEM;
1469
1470		/* Get a new CAM offset */
1471		if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1472			/*
1473			 * This shell never happen, because we have checked the
1474			 * CAM availiability in the 'validate'.
1475			 */
1476			WARN_ON(1);
1477			kfree(reg_elem);
1478			return -EINVAL;
1479		}
1480
1481		DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1482
1483		/* Set a VLAN-MAC data */
1484		memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1485			  sizeof(reg_elem->u));
1486
1487		/* Copy the flags (needed for DEL and RESTORE flows) */
1488		reg_elem->vlan_mac_flags =
1489			elem->cmd_data.vlan_mac.vlan_mac_flags;
1490	} else /* DEL, RESTORE */
1491		reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1492
1493	*re = reg_elem;
1494	return 0;
1495}
1496
1497/**
1498 * bnx2x_execute_vlan_mac - execute vlan mac command
1499 *
1500 * @bp:			device handle
1501 * @qo:
1502 * @exe_chunk:
1503 * @ramrod_flags:
1504 *
1505 * go and send a ramrod!
1506 */
1507static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1508				  union bnx2x_qable_obj *qo,
1509				  struct list_head *exe_chunk,
1510				  unsigned long *ramrod_flags)
1511{
1512	struct bnx2x_exeq_elem *elem;
1513	struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1514	struct bnx2x_raw_obj *r = &o->raw;
1515	int rc, idx = 0;
1516	bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1517	bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1518	struct bnx2x_vlan_mac_registry_elem *reg_elem;
1519	int cmd;
1520
1521	/*
1522	 * If DRIVER_ONLY execution is requested, cleanup a registry
1523	 * and exit. Otherwise send a ramrod to FW.
1524	 */
1525	if (!drv_only) {
1526		WARN_ON(r->check_pending(r));
1527
1528		/* Set pending */
1529		r->set_pending(r);
1530
1531		/* Fill tha ramrod data */
1532		list_for_each_entry(elem, exe_chunk, link) {
1533			cmd = elem->cmd_data.vlan_mac.cmd;
1534			/*
1535			 * We will add to the target object in MOVE command, so
1536			 * change the object for a CAM search.
1537			 */
1538			if (cmd == BNX2X_VLAN_MAC_MOVE)
1539				cam_obj = elem->cmd_data.vlan_mac.target_obj;
1540			else
1541				cam_obj = o;
1542
1543			rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1544							      elem, restore,
1545							      &reg_elem);
1546			if (rc)
1547				goto error_exit;
1548
1549			WARN_ON(!reg_elem);
1550
1551			/* Push a new entry into the registry */
1552			if (!restore &&
1553			    ((cmd == BNX2X_VLAN_MAC_ADD) ||
1554			    (cmd == BNX2X_VLAN_MAC_MOVE)))
1555				list_add(&reg_elem->link, &cam_obj->head);
1556
1557			/* Configure a single command in a ramrod data buffer */
1558			o->set_one_rule(bp, o, elem, idx,
1559					reg_elem->cam_offset);
1560
1561			/* MOVE command consumes 2 entries in the ramrod data */
1562			if (cmd == BNX2X_VLAN_MAC_MOVE)
1563				idx += 2;
1564			else
1565				idx++;
1566		}
1567
1568		/*
1569		 *  No need for an explicit memory barrier here as long we would
1570		 *  need to ensure the ordering of writing to the SPQ element
1571		 *  and updating of the SPQ producer which involves a memory
1572		 *  read and we will have to put a full memory barrier there
1573		 *  (inside bnx2x_sp_post()).
1574		 */
1575
1576		rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1577				   U64_HI(r->rdata_mapping),
1578				   U64_LO(r->rdata_mapping),
1579				   ETH_CONNECTION_TYPE);
1580		if (rc)
1581			goto error_exit;
1582	}
1583
1584	/* Now, when we are done with the ramrod - clean up the registry */
1585	list_for_each_entry(elem, exe_chunk, link) {
1586		cmd = elem->cmd_data.vlan_mac.cmd;
1587		if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1588		    (cmd == BNX2X_VLAN_MAC_MOVE)) {
1589			reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1590
1591			WARN_ON(!reg_elem);
1592
1593			o->put_cam_offset(o, reg_elem->cam_offset);
1594			list_del(&reg_elem->link);
1595			kfree(reg_elem);
1596		}
1597	}
1598
1599	if (!drv_only)
1600		return 1;
1601	else
1602		return 0;
1603
1604error_exit:
1605	r->clear_pending(r);
1606
1607	/* Cleanup a registry in case of a failure */
1608	list_for_each_entry(elem, exe_chunk, link) {
1609		cmd = elem->cmd_data.vlan_mac.cmd;
1610
1611		if (cmd == BNX2X_VLAN_MAC_MOVE)
1612			cam_obj = elem->cmd_data.vlan_mac.target_obj;
1613		else
1614			cam_obj = o;
1615
1616		/* Delete all newly added above entries */
1617		if (!restore &&
1618		    ((cmd == BNX2X_VLAN_MAC_ADD) ||
1619		    (cmd == BNX2X_VLAN_MAC_MOVE))) {
1620			reg_elem = o->check_del(cam_obj,
1621						&elem->cmd_data.vlan_mac.u);
1622			if (reg_elem) {
1623				list_del(&reg_elem->link);
1624				kfree(reg_elem);
1625			}
1626		}
1627	}
1628
1629	return rc;
1630}
1631
1632static inline int bnx2x_vlan_mac_push_new_cmd(
1633	struct bnx2x *bp,
1634	struct bnx2x_vlan_mac_ramrod_params *p)
1635{
1636	struct bnx2x_exeq_elem *elem;
1637	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1638	bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1639
1640	/* Allocate the execution queue element */
1641	elem = bnx2x_exe_queue_alloc_elem(bp);
1642	if (!elem)
1643		return -ENOMEM;
1644
1645	/* Set the command 'length' */
1646	switch (p->user_req.cmd) {
1647	case BNX2X_VLAN_MAC_MOVE:
1648		elem->cmd_len = 2;
1649		break;
1650	default:
1651		elem->cmd_len = 1;
1652	}
1653
1654	/* Fill the object specific info */
1655	memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1656
1657	/* Try to add a new command to the pending list */
1658	return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1659}
1660
1661/**
1662 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1663 *
1664 * @bp:	  device handle
1665 * @p:
1666 *
1667 */
1668int bnx2x_config_vlan_mac(
1669	struct bnx2x *bp,
1670	struct bnx2x_vlan_mac_ramrod_params *p)
1671{
1672	int rc = 0;
1673	struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1674	unsigned long *ramrod_flags = &p->ramrod_flags;
1675	bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1676	struct bnx2x_raw_obj *raw = &o->raw;
1677
1678	/*
1679	 * Add new elements to the execution list for commands that require it.
1680	 */
1681	if (!cont) {
1682		rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1683		if (rc)
1684			return rc;
1685	}
1686
1687	/*
1688	 * If nothing will be executed further in this iteration we want to
1689	 * return PENDING if there are pending commands
1690	 */
1691	if (!bnx2x_exe_queue_empty(&o->exe_queue))
1692		rc = 1;
1693
1694	if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags))  {
1695		DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
1696				 "clearing a pending bit.\n");
1697		raw->clear_pending(raw);
1698	}
1699
1700	/* Execute commands if required */
1701	if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1702	    test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1703		rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1704		if (rc < 0)
1705			return rc;
1706	}
1707
1708	/*
1709	 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1710	 * then user want to wait until the last command is done.
1711	 */
1712	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1713		/*
1714		 * Wait maximum for the current exe_queue length iterations plus
1715		 * one (for the current pending command).
1716		 */
1717		int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1718
1719		while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1720		       max_iterations--) {
1721
1722			/* Wait for the current command to complete */
1723			rc = raw->wait_comp(bp, raw);
1724			if (rc)
1725				return rc;
1726
1727			/* Make a next step */
1728			rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1729						  ramrod_flags);
1730			if (rc < 0)
1731				return rc;
1732		}
1733
1734		return 0;
1735	}
1736
1737	return rc;
1738}
1739
1740
1741
1742/**
1743 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1744 *
1745 * @bp:			device handle
1746 * @o:
1747 * @vlan_mac_flags:
1748 * @ramrod_flags:	execution flags to be used for this deletion
1749 *
1750 * if the last operation has completed successfully and there are no
1751 * moreelements left, positive value if the last operation has completed
1752 * successfully and there are more previously configured elements, negative
1753 * value is current operation has failed.
1754 */
1755static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1756				  struct bnx2x_vlan_mac_obj *o,
1757				  unsigned long *vlan_mac_flags,
1758				  unsigned long *ramrod_flags)
1759{
1760	struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1761	int rc = 0;
1762	struct bnx2x_vlan_mac_ramrod_params p;
1763	struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1764	struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1765
1766	/* Clear pending commands first */
1767
1768	spin_lock_bh(&exeq->lock);
1769
1770	list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1771		if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1772		    *vlan_mac_flags)
1773			list_del(&exeq_pos->link);
1774	}
1775
1776	spin_unlock_bh(&exeq->lock);
1777
1778	/* Prepare a command request */
1779	memset(&p, 0, sizeof(p));
1780	p.vlan_mac_obj = o;
1781	p.ramrod_flags = *ramrod_flags;
1782	p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1783
1784	/*
1785	 * Add all but the last VLAN-MAC to the execution queue without actually
1786	 * execution anything.
1787	 */
1788	__clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1789	__clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1790	__clear_bit(RAMROD_CONT, &p.ramrod_flags);
1791
1792	list_for_each_entry(pos, &o->head, link) {
1793		if (pos->vlan_mac_flags == *vlan_mac_flags) {
1794			p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1795			memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1796			rc = bnx2x_config_vlan_mac(bp, &p);
1797			if (rc < 0) {
1798				BNX2X_ERR("Failed to add a new DEL command\n");
1799				return rc;
1800			}
1801		}
1802	}
1803
1804	p.ramrod_flags = *ramrod_flags;
1805	__set_bit(RAMROD_CONT, &p.ramrod_flags);
1806
1807	return bnx2x_config_vlan_mac(bp, &p);
1808}
1809
1810static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1811	u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1812	unsigned long *pstate, bnx2x_obj_type type)
1813{
1814	raw->func_id = func_id;
1815	raw->cid = cid;
1816	raw->cl_id = cl_id;
1817	raw->rdata = rdata;
1818	raw->rdata_mapping = rdata_mapping;
1819	raw->state = state;
1820	raw->pstate = pstate;
1821	raw->obj_type = type;
1822	raw->check_pending = bnx2x_raw_check_pending;
1823	raw->clear_pending = bnx2x_raw_clear_pending;
1824	raw->set_pending = bnx2x_raw_set_pending;
1825	raw->wait_comp = bnx2x_raw_wait;
1826}
1827
1828static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1829	u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1830	int state, unsigned long *pstate, bnx2x_obj_type type,
1831	struct bnx2x_credit_pool_obj *macs_pool,
1832	struct bnx2x_credit_pool_obj *vlans_pool)
1833{
1834	INIT_LIST_HEAD(&o->head);
1835
1836	o->macs_pool = macs_pool;
1837	o->vlans_pool = vlans_pool;
1838
1839	o->delete_all = bnx2x_vlan_mac_del_all;
1840	o->restore = bnx2x_vlan_mac_restore;
1841	o->complete = bnx2x_complete_vlan_mac;
1842	o->wait = bnx2x_wait_vlan_mac;
1843
1844	bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1845			   state, pstate, type);
1846}
1847
1848
1849void bnx2x_init_mac_obj(struct bnx2x *bp,
1850			struct bnx2x_vlan_mac_obj *mac_obj,
1851			u8 cl_id, u32 cid, u8 func_id, void *rdata,
1852			dma_addr_t rdata_mapping, int state,
1853			unsigned long *pstate, bnx2x_obj_type type,
1854			struct bnx2x_credit_pool_obj *macs_pool)
1855{
1856	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1857
1858	bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1859				   rdata_mapping, state, pstate, type,
1860				   macs_pool, NULL);
1861
1862	/* CAM credit pool handling */
1863	mac_obj->get_credit = bnx2x_get_credit_mac;
1864	mac_obj->put_credit = bnx2x_put_credit_mac;
1865	mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1866	mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1867
1868	if (CHIP_IS_E1x(bp)) {
1869		mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
1870		mac_obj->check_del         = bnx2x_check_mac_del;
1871		mac_obj->check_add         = bnx2x_check_mac_add;
1872		mac_obj->check_move        = bnx2x_check_move_always_err;
1873		mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
1874
1875		/* Exe Queue */
1876		bnx2x_exe_queue_init(bp,
1877				     &mac_obj->exe_queue, 1, qable_obj,
1878				     bnx2x_validate_vlan_mac,
1879				     bnx2x_optimize_vlan_mac,
1880				     bnx2x_execute_vlan_mac,
1881				     bnx2x_exeq_get_mac);
1882	} else {
1883		mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
1884		mac_obj->check_del         = bnx2x_check_mac_del;
1885		mac_obj->check_add         = bnx2x_check_mac_add;
1886		mac_obj->check_move        = bnx2x_check_move;
1887		mac_obj->ramrod_cmd        =
1888			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1889
1890		/* Exe Queue */
1891		bnx2x_exe_queue_init(bp,
1892				     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1893				     qable_obj, bnx2x_validate_vlan_mac,
1894				     bnx2x_optimize_vlan_mac,
1895				     bnx2x_execute_vlan_mac,
1896				     bnx2x_exeq_get_mac);
1897	}
1898}
1899
1900void bnx2x_init_vlan_obj(struct bnx2x *bp,
1901			 struct bnx2x_vlan_mac_obj *vlan_obj,
1902			 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1903			 dma_addr_t rdata_mapping, int state,
1904			 unsigned long *pstate, bnx2x_obj_type type,
1905			 struct bnx2x_credit_pool_obj *vlans_pool)
1906{
1907	union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1908
1909	bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1910				   rdata_mapping, state, pstate, type, NULL,
1911				   vlans_pool);
1912
1913	vlan_obj->get_credit = bnx2x_get_credit_vlan;
1914	vlan_obj->put_credit = bnx2x_put_credit_vlan;
1915	vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1916	vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1917
1918	if (CHIP_IS_E1x(bp)) {
1919		BNX2X_ERR("Do not support chips others than E2 and newer\n");
1920		BUG();
1921	} else {
1922		vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
1923		vlan_obj->check_del         = bnx2x_check_vlan_del;
1924		vlan_obj->check_add         = bnx2x_check_vlan_add;
1925		vlan_obj->check_move        = bnx2x_check_move;
1926		vlan_obj->ramrod_cmd        =
1927			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1928
1929		/* Exe Queue */
1930		bnx2x_exe_queue_init(bp,
1931				     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
1932				     qable_obj, bnx2x_validate_vlan_mac,
1933				     bnx2x_optimize_vlan_mac,
1934				     bnx2x_execute_vlan_mac,
1935				     bnx2x_exeq_get_vlan);
1936	}
1937}
1938
1939void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
1940			     struct bnx2x_vlan_mac_obj *vlan_mac_obj,
1941			     u8 cl_id, u32 cid, u8 func_id, void *rdata,
1942			     dma_addr_t rdata_mapping, int state,
1943			     unsigned long *pstate, bnx2x_obj_type type,
1944			     struct bnx2x_credit_pool_obj *macs_pool,
1945			     struct bnx2x_credit_pool_obj *vlans_pool)
1946{
1947	union bnx2x_qable_obj *qable_obj =
1948		(union bnx2x_qable_obj *)vlan_mac_obj;
1949
1950	bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
1951				   rdata_mapping, state, pstate, type,
1952				   macs_pool, vlans_pool);
1953
1954	/* CAM pool handling */
1955	vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
1956	vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
1957	/*
1958	 * CAM offset is relevant for 57710 and 57711 chips only which have a
1959	 * single CAM for both MACs and VLAN-MAC pairs. So the offset
1960	 * will be taken from MACs' pool object only.
1961	 */
1962	vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1963	vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1964
1965	if (CHIP_IS_E1(bp)) {
1966		BNX2X_ERR("Do not support chips others than E2\n");
1967		BUG();
1968	} else if (CHIP_IS_E1H(bp)) {
1969		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
1970		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
1971		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
1972		vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
1973		vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
1974
1975		/* Exe Queue */
1976		bnx2x_exe_queue_init(bp,
1977				     &vlan_mac_obj->exe_queue, 1, qable_obj,
1978				     bnx2x_validate_vlan_mac,
1979				     bnx2x_optimize_vlan_mac,
1980				     bnx2x_execute_vlan_mac,
1981				     bnx2x_exeq_get_vlan_mac);
1982	} else {
1983		vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
1984		vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
1985		vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
1986		vlan_mac_obj->check_move        = bnx2x_check_move;
1987		vlan_mac_obj->ramrod_cmd        =
1988			RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1989
1990		/* Exe Queue */
1991		bnx2x_exe_queue_init(bp,
1992				     &vlan_mac_obj->exe_queue,
1993				     CLASSIFY_RULES_COUNT,
1994				     qable_obj, bnx2x_validate_vlan_mac,
1995				     bnx2x_optimize_vlan_mac,
1996				     bnx2x_execute_vlan_mac,
1997				     bnx2x_exeq_get_vlan_mac);
1998	}
1999
2000}
2001
2002/* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
2003static inline void __storm_memset_mac_filters(struct bnx2x *bp,
2004			struct tstorm_eth_mac_filter_config *mac_filters,
2005			u16 pf_id)
2006{
2007	size_t size = sizeof(struct tstorm_eth_mac_filter_config);
2008
2009	u32 addr = BAR_TSTRORM_INTMEM +
2010			TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2011
2012	__storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2013}
2014
2015static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2016				 struct bnx2x_rx_mode_ramrod_params *p)
2017{
2018	/* update the bp MAC filter structure  */
2019	u32 mask = (1 << p->cl_id);
2020
2021	struct tstorm_eth_mac_filter_config *mac_filters =
2022		(struct tstorm_eth_mac_filter_config *)p->rdata;
2023
2024	/* initial seeting is drop-all */
2025	u8 drop_all_ucast = 1, drop_all_mcast = 1;
2026	u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2027	u8 unmatched_unicast = 0;
2028
2029    /* In e1x there we only take into account rx acceot flag since tx switching
2030     * isn't enabled. */
2031	if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2032		/* accept matched ucast */
2033		drop_all_ucast = 0;
2034
2035	if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2036		/* accept matched mcast */
2037		drop_all_mcast = 0;
2038
2039	if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2040		/* accept all mcast */
2041		drop_all_ucast = 0;
2042		accp_all_ucast = 1;
2043	}
2044	if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2045		/* accept all mcast */
2046		drop_all_mcast = 0;
2047		accp_all_mcast = 1;
2048	}
2049	if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2050		/* accept (all) bcast */
2051		accp_all_bcast = 1;
2052	if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2053		/* accept unmatched unicasts */
2054		unmatched_unicast = 1;
2055
2056	mac_filters->ucast_drop_all = drop_all_ucast ?
2057		mac_filters->ucast_drop_all | mask :
2058		mac_filters->ucast_drop_all & ~mask;
2059
2060	mac_filters->mcast_drop_all = drop_all_mcast ?
2061		mac_filters->mcast_drop_all | mask :
2062		mac_filters->mcast_drop_all & ~mask;
2063
2064	mac_filters->ucast_accept_all = accp_all_ucast ?
2065		mac_filters->ucast_accept_all | mask :
2066		mac_filters->ucast_accept_all & ~mask;
2067
2068	mac_filters->mcast_accept_all = accp_all_mcast ?
2069		mac_filters->mcast_accept_all | mask :
2070		mac_filters->mcast_accept_all & ~mask;
2071
2072	mac_filters->bcast_accept_all = accp_all_bcast ?
2073		mac_filters->bcast_accept_all | mask :
2074		mac_filters->bcast_accept_all & ~mask;
2075
2076	mac_filters->unmatched_unicast = unmatched_unicast ?
2077		mac_filters->unmatched_unicast | mask :
2078		mac_filters->unmatched_unicast & ~mask;
2079
2080	DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2081			 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2082			 mac_filters->ucast_drop_all,
2083			 mac_filters->mcast_drop_all,
2084			 mac_filters->ucast_accept_all,
2085			 mac_filters->mcast_accept_all,
2086			 mac_filters->bcast_accept_all);
2087
2088	/* write the MAC filter structure*/
2089	__storm_memset_mac_filters(bp, mac_filters, p->func_id);
2090
2091	/* The operation is completed */
2092	clear_bit(p->state, p->pstate);
2093	smp_mb__after_clear_bit();
2094
2095	return 0;
2096}
2097
2098/* Setup ramrod data */
2099static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2100				struct eth_classify_header *hdr,
2101				u8 rule_cnt)
2102{
2103	hdr->echo = cid;
2104	hdr->rule_cnt = rule_cnt;
2105}
2106
2107static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2108				unsigned long accept_flags,
2109				struct eth_filter_rules_cmd *cmd,
2110				bool clear_accept_all)
2111{
2112	u16 state;
2113
2114	/* start with 'drop-all' */
2115	state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2116		ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2117
2118	if (accept_flags) {
2119		if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
2120			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2121
2122		if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
2123			state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2124
2125		if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
2126			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2127			state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2128		}
2129
2130		if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
2131			state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2132			state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2133		}
2134		if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
2135			state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2136
2137		if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
2138			state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2139			state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2140		}
2141		if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
2142			state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2143	}
2144
2145	/* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2146	if (clear_accept_all) {
2147		state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2148		state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2149		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2150		state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2151	}
2152
2153	cmd->state = cpu_to_le16(state);
2154
2155}
2156
2157static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2158				struct bnx2x_rx_mode_ramrod_params *p)
2159{
2160	struct eth_filter_rules_ramrod_data *data = p->rdata;
2161	int rc;
2162	u8 rule_idx = 0;
2163
2164	/* Reset the ramrod data buffer */
2165	memset(data, 0, sizeof(*data));
2166
2167	/* Setup ramrod data */
2168
2169	/* Tx (internal switching) */
2170	if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2171		data->rules[rule_idx].client_id = p->cl_id;
2172		data->rules[rule_idx].func_id = p->func_id;
2173
2174		data->rules[rule_idx].cmd_general_data =
2175			ETH_FILTER_RULES_CMD_TX_CMD;
2176
2177		bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2178			&(data->rules[rule_idx++]), false);
2179	}
2180
2181	/* Rx */
2182	if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2183		data->rules[rule_idx].client_id = p->cl_id;
2184		data->rules[rule_idx].func_id = p->func_id;
2185
2186		data->rules[rule_idx].cmd_general_data =
2187			ETH_FILTER_RULES_CMD_RX_CMD;
2188
2189		bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2190			&(data->rules[rule_idx++]), false);
2191	}
2192
2193
2194	/*
2195	 * If FCoE Queue configuration has been requested configure the Rx and
2196	 * internal switching modes for this queue in separate rules.
2197	 *
2198	 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2199	 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2200	 */
2201	if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2202		/*  Tx (internal switching) */
2203		if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2204			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2205			data->rules[rule_idx].func_id = p->func_id;
2206
2207			data->rules[rule_idx].cmd_general_data =
2208						ETH_FILTER_RULES_CMD_TX_CMD;
2209
2210			bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2211						     &(data->rules[rule_idx++]),
2212						       true);
2213		}
2214
2215		/* Rx */
2216		if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2217			data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2218			data->rules[rule_idx].func_id = p->func_id;
2219
2220			data->rules[rule_idx].cmd_general_data =
2221						ETH_FILTER_RULES_CMD_RX_CMD;
2222
2223			bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2224						     &(data->rules[rule_idx++]),
2225						       true);
2226		}
2227	}
2228
2229	/*
2230	 * Set the ramrod header (most importantly - number of rules to
2231	 * configure).
2232	 */
2233	bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2234
2235	DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
2236			 "tx_accept_flags 0x%lx\n",
2237			 data->header.rule_cnt, p->rx_accept_flags,
2238			 p->tx_accept_flags);
2239
2240	/*
2241	 *  No need for an explicit memory barrier here as long we would
2242	 *  need to ensure the ordering of writing to the SPQ element
2243	 *  and updating of the SPQ producer which involves a memory
2244	 *  read and we will have to put a full memory barrier there
2245	 *  (inside bnx2x_sp_post()).
2246	 */
2247
2248	/* Send a ramrod */
2249	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2250			   U64_HI(p->rdata_mapping),
2251			   U64_LO(p->rdata_mapping),
2252			   ETH_CONNECTION_TYPE);
2253	if (rc)
2254		return rc;
2255
2256	/* Ramrod completion is pending */
2257	return 1;
2258}
2259
2260static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2261				      struct bnx2x_rx_mode_ramrod_params *p)
2262{
2263	return bnx2x_state_wait(bp, p->state, p->pstate);
2264}
2265
2266static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2267				    struct bnx2x_rx_mode_ramrod_params *p)
2268{
2269	/* Do nothing */
2270	return 0;
2271}
2272
2273int bnx2x_config_rx_mode(struct bnx2x *bp,
2274			 struct bnx2x_rx_mode_ramrod_params *p)
2275{
2276	int rc;
2277
2278	/* Configure the new classification in the chip */
2279	rc = p->rx_mode_obj->config_rx_mode(bp, p);
2280	if (rc < 0)
2281		return rc;
2282
2283	/* Wait for a ramrod completion if was requested */
2284	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2285		rc = p->rx_mode_obj->wait_comp(bp, p);
2286		if (rc)
2287			return rc;
2288	}
2289
2290	return rc;
2291}
2292
2293void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2294			    struct bnx2x_rx_mode_obj *o)
2295{
2296	if (CHIP_IS_E1x(bp)) {
2297		o->wait_comp      = bnx2x_empty_rx_mode_wait;
2298		o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2299	} else {
2300		o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
2301		o->config_rx_mode = bnx2x_set_rx_mode_e2;
2302	}
2303}
2304
2305/********************* Multicast verbs: SET, CLEAR ****************************/
2306static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2307{
2308	return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2309}
2310
2311struct bnx2x_mcast_mac_elem {
2312	struct list_head link;
2313	u8 mac[ETH_ALEN];
2314	u8 pad[2]; /* For a natural alignment of the following buffer */
2315};
2316
2317struct bnx2x_pending_mcast_cmd {
2318	struct list_head link;
2319	int type; /* BNX2X_MCAST_CMD_X */
2320	union {
2321		struct list_head macs_head;
2322		u32 macs_num; /* Needed for DEL command */
2323		int next_bin; /* Needed for RESTORE flow with aprox match */
2324	} data;
2325
2326	bool done; /* set to true, when the command has been handled,
2327		    * practically used in 57712 handling only, where one pending
2328		    * command may be handled in a few operations. As long as for
2329		    * other chips every operation handling is completed in a
2330		    * single ramrod, there is no need to utilize this field.
2331		    */
2332};
2333
2334static int bnx2x_mcast_wait(struct bnx2x *bp,
2335			    struct bnx2x_mcast_obj *o)
2336{
2337	if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2338			o->raw.wait_comp(bp, &o->raw))
2339		return -EBUSY;
2340
2341	return 0;
2342}
2343
2344static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2345				   struct bnx2x_mcast_obj *o,
2346				   struct bnx2x_mcast_ramrod_params *p,
2347				   int cmd)
2348{
2349	int total_sz;
2350	struct bnx2x_pending_mcast_cmd *new_cmd;
2351	struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2352	struct bnx2x_mcast_list_elem *pos;
2353	int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2354			     p->mcast_list_len : 0);
2355
2356	/* If the command is empty ("handle pending commands only"), break */
2357	if (!p->mcast_list_len)
2358		return 0;
2359
2360	total_sz = sizeof(*new_cmd) +
2361		macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2362
2363	/* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2364	new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2365
2366	if (!new_cmd)
2367		return -ENOMEM;
2368
2369	DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
2370			 "macs_list_len=%d\n", cmd, macs_list_len);
2371
2372	INIT_LIST_HEAD(&new_cmd->data.macs_head);
2373
2374	new_cmd->type = cmd;
2375	new_cmd->done = false;
2376
2377	switch (cmd) {
2378	case BNX2X_MCAST_CMD_ADD:
2379		cur_mac = (struct bnx2x_mcast_mac_elem *)
2380			  ((u8 *)new_cmd + sizeof(*new_cmd));
2381
2382		/* Push the MACs of the current command into the pendig command
2383		 * MACs list: FIFO
2384		 */
2385		list_for_each_entry(pos, &p->mcast_list, link) {
2386			memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2387			list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2388			cur_mac++;
2389		}
2390
2391		break;
2392
2393	case BNX2X_MCAST_CMD_DEL:
2394		new_cmd->data.macs_num = p->mcast_list_len;
2395		break;
2396
2397	case BNX2X_MCAST_CMD_RESTORE:
2398		new_cmd->data.next_bin = 0;
2399		break;
2400
2401	default:
2402		BNX2X_ERR("Unknown command: %d\n", cmd);
2403		return -EINVAL;
2404	}
2405
2406	/* Push the new pending command to the tail of the pending list: FIFO */
2407	list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2408
2409	o->set_sched(o);
2410
2411	return 1;
2412}
2413
2414/**
2415 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2416 *
2417 * @o:
2418 * @last:	index to start looking from (including)
2419 *
2420 * returns the next found (set) bin or a negative value if none is found.
2421 */
2422static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2423{
2424	int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2425
2426	for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2427		if (o->registry.aprox_match.vec[i])
2428			for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2429				int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2430				if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2431						       vec, cur_bit)) {
2432					return cur_bit;
2433				}
2434			}
2435		inner_start = 0;
2436	}
2437
2438	/* None found */
2439	return -1;
2440}
2441
2442/**
2443 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2444 *
2445 * @o:
2446 *
2447 * returns the index of the found bin or -1 if none is found
2448 */
2449static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2450{
2451	int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2452
2453	if (cur_bit >= 0)
2454		BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2455
2456	return cur_bit;
2457}
2458
2459static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2460{
2461	struct bnx2x_raw_obj *raw = &o->raw;
2462	u8 rx_tx_flag = 0;
2463
2464	if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2465	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2466		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2467
2468	if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2469	    (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2470		rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2471
2472	return rx_tx_flag;
2473}
2474
2475static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2476					struct bnx2x_mcast_obj *o, int idx,
2477					union bnx2x_mcast_config_data *cfg_data,
2478					int cmd)
2479{
2480	struct bnx2x_raw_obj *r = &o->raw;
2481	struct eth_multicast_rules_ramrod_data *data =
2482		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
2483	u8 func_id = r->func_id;
2484	u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2485	int bin;
2486
2487	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2488		rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2489
2490	data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2491
2492	/* Get a bin and update a bins' vector */
2493	switch (cmd) {
2494	case BNX2X_MCAST_CMD_ADD:
2495		bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2496		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2497		break;
2498
2499	case BNX2X_MCAST_CMD_DEL:
2500		/* If there were no more bins to clear
2501		 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2502		 * clear any (0xff) bin.
2503		 * See bnx2x_mcast_validate_e2() for explanation when it may
2504		 * happen.
2505		 */
2506		bin = bnx2x_mcast_clear_first_bin(o);
2507		break;
2508
2509	case BNX2X_MCAST_CMD_RESTORE:
2510		bin = cfg_data->bin;
2511		break;
2512
2513	default:
2514		BNX2X_ERR("Unknown command: %d\n", cmd);
2515		return;
2516	}
2517
2518	DP(BNX2X_MSG_SP, "%s bin %d\n",
2519			 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2520			 "Setting"  : "Clearing"), bin);
2521
2522	data->rules[idx].bin_id    = (u8)bin;
2523	data->rules[idx].func_id   = func_id;
2524	data->rules[idx].engine_id = o->engine_id;
2525}
2526
2527/**
2528 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2529 *
2530 * @bp:		device handle
2531 * @o:
2532 * @start_bin:	index in the registry to start from (including)
2533 * @rdata_idx:	index in the ramrod data to start from
2534 *
2535 * returns last handled bin index or -1 if all bins have been handled
2536 */
2537static inline int bnx2x_mcast_handle_restore_cmd_e2(
2538	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2539	int *rdata_idx)
2540{
2541	int cur_bin, cnt = *rdata_idx;
2542	union bnx2x_mcast_config_data cfg_data = {0};
2543
2544	/* go through the registry and configure the bins from it */
2545	for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2546	    cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2547
2548		cfg_data.bin = (u8)cur_bin;
2549		o->set_one_rule(bp, o, cnt, &cfg_data,
2550				BNX2X_MCAST_CMD_RESTORE);
2551
2552		cnt++;
2553
2554		DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2555
2556		/* Break if we reached the maximum number
2557		 * of rules.
2558		 */
2559		if (cnt >= o->max_cmd_len)
2560			break;
2561	}
2562
2563	*rdata_idx = cnt;
2564
2565	return cur_bin;
2566}
2567
2568static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2569	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2570	int *line_idx)
2571{
2572	struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2573	int cnt = *line_idx;
2574	union bnx2x_mcast_config_data cfg_data = {0};
2575
2576	list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2577				 link) {
2578
2579		cfg_data.mac = &pmac_pos->mac[0];
2580		o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2581
2582		cnt++;
2583
2584		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2585				 pmac_pos->mac);
2586
2587		list_del(&pmac_pos->link);
2588
2589		/* Break if we reached the maximum number
2590		 * of rules.
2591		 */
2592		if (cnt >= o->max_cmd_len)
2593			break;
2594	}
2595
2596	*line_idx = cnt;
2597
2598	/* if no more MACs to configure - we are done */
2599	if (list_empty(&cmd_pos->data.macs_head))
2600		cmd_pos->done = true;
2601}
2602
2603static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2604	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2605	int *line_idx)
2606{
2607	int cnt = *line_idx;
2608
2609	while (cmd_pos->data.macs_num) {
2610		o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2611
2612		cnt++;
2613
2614		cmd_pos->data.macs_num--;
2615
2616		  DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2617				   cmd_pos->data.macs_num, cnt);
2618
2619		/* Break if we reached the maximum
2620		 * number of rules.
2621		 */
2622		if (cnt >= o->max_cmd_len)
2623			break;
2624	}
2625
2626	*line_idx = cnt;
2627
2628	/* If we cleared all bins - we are done */
2629	if (!cmd_pos->data.macs_num)
2630		cmd_pos->done = true;
2631}
2632
2633static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2634	struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2635	int *line_idx)
2636{
2637	cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2638						line_idx);
2639
2640	if (cmd_pos->data.next_bin < 0)
2641		/* If o->set_restore returned -1 we are done */
2642		cmd_pos->done = true;
2643	else
2644		/* Start from the next bin next time */
2645		cmd_pos->data.next_bin++;
2646}
2647
2648static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2649				struct bnx2x_mcast_ramrod_params *p)
2650{
2651	struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2652	int cnt = 0;
2653	struct bnx2x_mcast_obj *o = p->mcast_obj;
2654
2655	list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2656				 link) {
2657		switch (cmd_pos->type) {
2658		case BNX2X_MCAST_CMD_ADD:
2659			bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2660			break;
2661
2662		case BNX2X_MCAST_CMD_DEL:
2663			bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2664			break;
2665
2666		case BNX2X_MCAST_CMD_RESTORE:
2667			bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2668							   &cnt);
2669			break;
2670
2671		default:
2672			BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2673			return -EINVAL;
2674		}
2675
2676		/* If the command has been completed - remove it from the list
2677		 * and free the memory
2678		 */
2679		if (cmd_pos->done) {
2680			list_del(&cmd_pos->link);
2681			kfree(cmd_pos);
2682		}
2683
2684		/* Break if we reached the maximum number of rules */
2685		if (cnt >= o->max_cmd_len)
2686			break;
2687	}
2688
2689	return cnt;
2690}
2691
2692static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2693	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2694	int *line_idx)
2695{
2696	struct bnx2x_mcast_list_elem *mlist_pos;
2697	union bnx2x_mcast_config_data cfg_data = {0};
2698	int cnt = *line_idx;
2699
2700	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2701		cfg_data.mac = mlist_pos->mac;
2702		o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2703
2704		cnt++;
2705
2706		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
2707				 mlist_pos->mac);
2708	}
2709
2710	*line_idx = cnt;
2711}
2712
2713static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2714	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2715	int *line_idx)
2716{
2717	int cnt = *line_idx, i;
2718
2719	for (i = 0; i < p->mcast_list_len; i++) {
2720		o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2721
2722		cnt++;
2723
2724		DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2725				 p->mcast_list_len - i - 1);
2726	}
2727
2728	*line_idx = cnt;
2729}
2730
2731/**
2732 * bnx2x_mcast_handle_current_cmd -
2733 *
2734 * @bp:		device handle
2735 * @p:
2736 * @cmd:
2737 * @start_cnt:	first line in the ramrod data that may be used
2738 *
2739 * This function is called iff there is enough place for the current command in
2740 * the ramrod data.
2741 * Returns number of lines filled in the ramrod data in total.
2742 */
2743static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2744			struct bnx2x_mcast_ramrod_params *p, int cmd,
2745			int start_cnt)
2746{
2747	struct bnx2x_mcast_obj *o = p->mcast_obj;
2748	int cnt = start_cnt;
2749
2750	DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2751
2752	switch (cmd) {
2753	case BNX2X_MCAST_CMD_ADD:
2754		bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2755		break;
2756
2757	case BNX2X_MCAST_CMD_DEL:
2758		bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2759		break;
2760
2761	case BNX2X_MCAST_CMD_RESTORE:
2762		o->hdl_restore(bp, o, 0, &cnt);
2763		break;
2764
2765	default:
2766		BNX2X_ERR("Unknown command: %d\n", cmd);
2767		return -EINVAL;
2768	}
2769
2770	/* The current command has been handled */
2771	p->mcast_list_len = 0;
2772
2773	return cnt;
2774}
2775
2776static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2777				   struct bnx2x_mcast_ramrod_params *p,
2778				   int cmd)
2779{
2780	struct bnx2x_mcast_obj *o = p->mcast_obj;
2781	int reg_sz = o->get_registry_size(o);
2782
2783	switch (cmd) {
2784	/* DEL command deletes all currently configured MACs */
2785	case BNX2X_MCAST_CMD_DEL:
2786		o->set_registry_size(o, 0);
2787		/* Don't break */
2788
2789	/* RESTORE command will restore the entire multicast configuration */
2790	case BNX2X_MCAST_CMD_RESTORE:
2791		/* Here we set the approximate amount of work to do, which in
2792		 * fact may be only less as some MACs in postponed ADD
2793		 * command(s) scheduled before this command may fall into
2794		 * the same bin and the actual number of bins set in the
2795		 * registry would be less than we estimated here. See
2796		 * bnx2x_mcast_set_one_rule_e2() for further details.
2797		 */
2798		p->mcast_list_len = reg_sz;
2799		break;
2800
2801	case BNX2X_MCAST_CMD_ADD:
2802	case BNX2X_MCAST_CMD_CONT:
2803		/* Here we assume that all new MACs will fall into new bins.
2804		 * However we will correct the real registry size after we
2805		 * handle all pending commands.
2806		 */
2807		o->set_registry_size(o, reg_sz + p->mcast_list_len);
2808		break;
2809
2810	default:
2811		BNX2X_ERR("Unknown command: %d\n", cmd);
2812		return -EINVAL;
2813
2814	}
2815
2816	/* Increase the total number of MACs pending to be configured */
2817	o->total_pending_num += p->mcast_list_len;
2818
2819	return 0;
2820}
2821
2822static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2823				      struct bnx2x_mcast_ramrod_params *p,
2824				      int old_num_bins)
2825{
2826	struct bnx2x_mcast_obj *o = p->mcast_obj;
2827
2828	o->set_registry_size(o, old_num_bins);
2829	o->total_pending_num -= p->mcast_list_len;
2830}
2831
2832/**
2833 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2834 *
2835 * @bp:		device handle
2836 * @p:
2837 * @len:	number of rules to handle
2838 */
2839static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2840					struct bnx2x_mcast_ramrod_params *p,
2841					u8 len)
2842{
2843	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2844	struct eth_multicast_rules_ramrod_data *data =
2845		(struct eth_multicast_rules_ramrod_data *)(r->rdata);
2846
2847	data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
2848			  (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
2849	data->header.rule_cnt = len;
2850}
2851
2852/**
2853 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2854 *
2855 * @bp:		device handle
2856 * @o:
2857 *
2858 * Recalculate the actual number of set bins in the registry using Brian
2859 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2860 *
2861 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2862 */
2863static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2864						  struct bnx2x_mcast_obj *o)
2865{
2866	int i, cnt = 0;
2867	u64 elem;
2868
2869	for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2870		elem = o->registry.aprox_match.vec[i];
2871		for (; elem; cnt++)
2872			elem &= elem - 1;
2873	}
2874
2875	o->set_registry_size(o, cnt);
2876
2877	return 0;
2878}
2879
2880static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2881				struct bnx2x_mcast_ramrod_params *p,
2882				int cmd)
2883{
2884	struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2885	struct bnx2x_mcast_obj *o = p->mcast_obj;
2886	struct eth_multicast_rules_ramrod_data *data =
2887		(struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2888	int cnt = 0, rc;
2889
2890	/* Reset the ramrod data buffer */
2891	memset(data, 0, sizeof(*data));
2892
2893	cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2894
2895	/* If there are no more pending commands - clear SCHEDULED state */
2896	if (list_empty(&o->pending_cmds_head))
2897		o->clear_sched(o);
2898
2899	/* The below may be true iff there was enough room in ramrod
2900	 * data for all pending commands and for the current
2901	 * command. Otherwise the current command would have been added
2902	 * to the pending commands and p->mcast_list_len would have been
2903	 * zeroed.
2904	 */
2905	if (p->mcast_list_len > 0)
2906		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2907
2908	/* We've pulled out some MACs - update the total number of
2909	 * outstanding.
2910	 */
2911	o->total_pending_num -= cnt;
2912
2913	/* send a ramrod */
2914	WARN_ON(o->total_pending_num < 0);
2915	WARN_ON(cnt > o->max_cmd_len);
2916
2917	bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2918
2919	/* Update a registry size if there are no more pending operations.
2920	 *
2921	 * We don't want to change the value of the registry size if there are
2922	 * pending operations because we want it to always be equal to the
2923	 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2924	 * set bins after the last requested operation in order to properly
2925	 * evaluate the size of the next DEL/RESTORE operation.
2926	 *
2927	 * Note that we update the registry itself during command(s) handling
2928	 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
2929	 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2930	 * with a limited amount of update commands (per MAC/bin) and we don't
2931	 * know in this scope what the actual state of bins configuration is
2932	 * going to be after this ramrod.
2933	 */
2934	if (!o->total_pending_num)
2935		bnx2x_mcast_refresh_registry_e2(bp, o);
2936
2937	/*
2938	 * If CLEAR_ONLY was requested - don't send a ramrod and clear
2939	 * RAMROD_PENDING status immediately.
2940	 */
2941	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2942		raw->clear_pending(raw);
2943		return 0;
2944	} else {
2945		/*
2946		 *  No need for an explicit memory barrier here as long we would
2947		 *  need to ensure the ordering of writing to the SPQ element
2948		 *  and updating of the SPQ producer which involves a memory
2949		 *  read and we will have to put a full memory barrier there
2950		 *  (inside bnx2x_sp_post()).
2951		 */
2952
2953		/* Send a ramrod */
2954		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2955				   raw->cid, U64_HI(raw->rdata_mapping),
2956				   U64_LO(raw->rdata_mapping),
2957				   ETH_CONNECTION_TYPE);
2958		if (rc)
2959			return rc;
2960
2961		/* Ramrod completion is pending */
2962		return 1;
2963	}
2964}
2965
2966static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
2967				    struct bnx2x_mcast_ramrod_params *p,
2968				    int cmd)
2969{
2970	/* Mark, that there is a work to do */
2971	if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2972		p->mcast_list_len = 1;
2973
2974	return 0;
2975}
2976
2977static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
2978				       struct bnx2x_mcast_ramrod_params *p,
2979				       int old_num_bins)
2980{
2981	/* Do nothing */
2982}
2983
2984#define BNX2X_57711_SET_MC_FILTER(filter, bit) \
2985do { \
2986	(filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2987} while (0)
2988
2989static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
2990					   struct bnx2x_mcast_obj *o,
2991					   struct bnx2x_mcast_ramrod_params *p,
2992					   u32 *mc_filter)
2993{
2994	struct bnx2x_mcast_list_elem *mlist_pos;
2995	int bit;
2996
2997	list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2998		bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
2999		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3000
3001		DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC, bin %d\n",
3002				 mlist_pos->mac, bit);
3003
3004		/* bookkeeping... */
3005		BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
3006				  bit);
3007	}
3008}
3009
3010static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
3011	struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
3012	u32 *mc_filter)
3013{
3014	int bit;
3015
3016	for (bit = bnx2x_mcast_get_next_bin(o, 0);
3017	     bit >= 0;
3018	     bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
3019		BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3020		DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3021	}
3022}
3023
3024/* On 57711 we write the multicast MACs' aproximate match
3025 * table by directly into the TSTORM's internal RAM. So we don't
3026 * really need to handle any tricks to make it work.
3027 */
3028static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3029				 struct bnx2x_mcast_ramrod_params *p,
3030				 int cmd)
3031{
3032	int i;
3033	struct bnx2x_mcast_obj *o = p->mcast_obj;
3034	struct bnx2x_raw_obj *r = &o->raw;
3035
3036	/* If CLEAR_ONLY has been requested - clear the registry
3037	 * and clear a pending bit.
3038	 */
3039	if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3040		u32 mc_filter[MC_HASH_SIZE] = {0};
3041
3042		/* Set the multicast filter bits before writing it into
3043		 * the internal memory.
3044		 */
3045		switch (cmd) {
3046		case BNX2X_MCAST_CMD_ADD:
3047			bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3048			break;
3049
3050		case BNX2X_MCAST_CMD_DEL:
3051			DP(BNX2X_MSG_SP,
3052			   "Invalidating multicast MACs configuration\n");
3053
3054			/* clear the registry */
3055			memset(o->registry.aprox_match.vec, 0,
3056			       sizeof(o->registry.aprox_match.vec));
3057			break;
3058
3059		case BNX2X_MCAST_CMD_RESTORE:
3060			bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3061			break;
3062
3063		default:
3064			BNX2X_ERR("Unknown command: %d\n", cmd);
3065			return -EINVAL;
3066		}
3067
3068		/* Set the mcast filter in the internal memory */
3069		for (i = 0; i < MC_HASH_SIZE; i++)
3070			REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3071	} else
3072		/* clear the registry */
3073		memset(o->registry.aprox_match.vec, 0,
3074		       sizeof(o->registry.aprox_match.vec));
3075
3076	/* We are done */
3077	r->clear_pending(r);
3078
3079	return 0;
3080}
3081
3082static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3083				   struct bnx2x_mcast_ramrod_params *p,
3084				   int cmd)
3085{
3086	struct bnx2x_mcast_obj *o = p->mcast_obj;
3087	int reg_sz = o->get_registry_size(o);
3088
3089	switch (cmd) {
3090	/* DEL command deletes all currently configured MACs */
3091	case BNX2X_MCAST_CMD_DEL:
3092		o->set_registry_size(o, 0);
3093		/* Don't break */
3094
3095	/* RESTORE command will restore the entire multicast configuration */
3096	case BNX2X_MCAST_CMD_RESTORE:
3097		p->mcast_list_len = reg_sz;
3098		  DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3099				   cmd, p->mcast_list_len);
3100		break;
3101
3102	case BNX2X_MCAST_CMD_ADD:
3103	case BNX2X_MCAST_CMD_CONT:
3104		/* Multicast MACs on 57710 are configured as unicast MACs and
3105		 * there is only a limited number of CAM entries for that
3106		 * matter.
3107		 */
3108		if (p->mcast_list_len > o->max_cmd_len) {
3109			BNX2X_ERR("Can't configure more than %d multicast MACs"
3110				   "on 57710\n", o->max_cmd_len);
3111			return -EINVAL;
3112		}
3113		/* Every configured MAC should be cleared if DEL command is
3114		 * called. Only the last ADD command is relevant as long as
3115		 * every ADD commands overrides the previous configuration.
3116		 */
3117		DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3118		if (p->mcast_list_len > 0)
3119			o->set_registry_size(o, p->mcast_list_len);
3120
3121		break;
3122
3123	default:
3124		BNX2X_ERR("Unknown command: %d\n", cmd);
3125		return -EINVAL;
3126
3127	}
3128
3129	/* We want to ensure that commands are executed one by one for 57710.
3130	 * Therefore each none-empty command will consume o->max_cmd_len.
3131	 */
3132	if (p->mcast_list_len)
3133		o->total_pending_num += o->max_cmd_len;
3134
3135	return 0;
3136}
3137
3138static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3139				      struct bnx2x_mcast_ramrod_params *p,
3140				      int old_num_macs)
3141{
3142	struct bnx2x_mcast_obj *o = p->mcast_obj;
3143
3144	o->set_registry_size(o, old_num_macs);
3145
3146	/* If current command hasn't been handled yet and we are
3147	 * here means that it's meant to be dropped and we have to
3148	 * update the number of outstandling MACs accordingly.
3149	 */
3150	if (p->mcast_list_len)
3151		o->total_pending_num -= o->max_cmd_len;
3152}
3153
3154static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3155					struct bnx2x_mcast_obj *o, int idx,
3156					union bnx2x_mcast_config_data *cfg_data,
3157					int cmd)
3158{
3159	struct bnx2x_raw_obj *r = &o->raw;
3160	struct mac_configuration_cmd *data =
3161		(struct mac_configuration_cmd *)(r->rdata);
3162
3163	/* copy mac */
3164	if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3165		bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3166				      &data->config_table[idx].middle_mac_addr,
3167				      &data->config_table[idx].lsb_mac_addr,
3168				      cfg_data->mac);
3169
3170		data->config_table[idx].vlan_id = 0;
3171		data->config_table[idx].pf_id = r->func_id;
3172		data->config_table[idx].clients_bit_vector =
3173			cpu_to_le32(1 << r->cl_id);
3174
3175		SET_FLAG(data->config_table[idx].flags,
3176			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3177			 T_ETH_MAC_COMMAND_SET);
3178	}
3179}
3180
3181/**
3182 * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3183 *
3184 * @bp:		device handle
3185 * @p:
3186 * @len:	number of rules to handle
3187 */
3188static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3189					struct bnx2x_mcast_ramrod_params *p,
3190					u8 len)
3191{
3192	struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3193	struct mac_configuration_cmd *data =
3194		(struct mac_configuration_cmd *)(r->rdata);
3195
3196	u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3197		     BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3198		     BNX2X_MAX_MULTICAST*(1 + r->func_id));
3199
3200	data->hdr.offset = offset;
3201	data->hdr.client_id = 0xff;
3202	data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
3203			  (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
3204	data->hdr.length = len;
3205}
3206
3207/**
3208 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3209 *
3210 * @bp:		device handle
3211 * @o:
3212 * @start_idx:	index in the registry to start from
3213 * @rdata_idx:	index in the ramrod data to start from
3214 *
3215 * restore command for 57710 is like all other commands - always a stand alone
3216 * command - start_idx and rdata_idx will always be 0. This function will always
3217 * succeed.
3218 * returns -1 to comply with 57712 variant.
3219 */
3220static inline int bnx2x_mcast_handle_restore_cmd_e1(
3221	struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3222	int *rdata_idx)
3223{
3224	struct bnx2x_mcast_mac_elem *elem;
3225	int i = 0;
3226	union bnx2x_mcast_config_data cfg_data = {0};
3227
3228	/* go through the registry and configure the MACs from it. */
3229	list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3230		cfg_data.mac = &elem->mac[0];
3231		o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3232
3233		i++;
3234
3235		  DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3236				   cfg_data.mac);
3237	}
3238
3239	*rdata_idx = i;
3240
3241	return -1;
3242}
3243
3244
3245static inline int bnx2x_mcast_handle_pending_cmds_e1(
3246	struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3247{
3248	struct bnx2x_pending_mcast_cmd *cmd_pos;
3249	struct bnx2x_mcast_mac_elem *pmac_pos;
3250	struct bnx2x_mcast_obj *o = p->mcast_obj;
3251	union bnx2x_mcast_config_data cfg_data = {0};
3252	int cnt = 0;
3253
3254
3255	/* If nothing to be done - return */
3256	if (list_empty(&o->pending_cmds_head))
3257		return 0;
3258
3259	/* Handle the first command */
3260	cmd_pos = list_first_entry(&o->pending_cmds_head,
3261				   struct bnx2x_pending_mcast_cmd, link);
3262
3263	switch (cmd_pos->type) {
3264	case BNX2X_MCAST_CMD_ADD:
3265		list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3266			cfg_data.mac = &pmac_pos->mac[0];
3267			o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3268
3269			cnt++;
3270
3271			DP(BNX2X_MSG_SP, "About to configure %pM mcast MAC\n",
3272					 pmac_pos->mac);
3273		}
3274		break;
3275
3276	case BNX2X_MCAST_CMD_DEL:
3277		cnt = cmd_pos->data.macs_num;
3278		DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3279		break;
3280
3281	case BNX2X_MCAST_CMD_RESTORE:
3282		o->hdl_restore(bp, o, 0, &cnt);
3283		break;
3284
3285	default:
3286		BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3287		return -EINVAL;
3288	}
3289
3290	list_del(&cmd_pos->link);
3291	kfree(cmd_pos);
3292
3293	return cnt;
3294}
3295
3296/**
3297 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3298 *
3299 * @fw_hi:
3300 * @fw_mid:
3301 * @fw_lo:
3302 * @mac:
3303 */
3304static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3305					 __le16 *fw_lo, u8 *mac)
3306{
3307	mac[1] = ((u8 *)fw_hi)[0];
3308	mac[0] = ((u8 *)fw_hi)[1];
3309	mac[3] = ((u8 *)fw_mid)[0];
3310	mac[2] = ((u8 *)fw_mid)[1];
3311	mac[5] = ((u8 *)fw_lo)[0];
3312	mac[4] = ((u8 *)fw_lo)[1];
3313}
3314
3315/**
3316 * bnx2x_mcast_refresh_registry_e1 -
3317 *
3318 * @bp:		device handle
3319 * @cnt:
3320 *
3321 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3322 * and update the registry correspondingly: if ADD - allocate a memory and add
3323 * the entries to the registry (list), if DELETE - clear the registry and free
3324 * the memory.
3325 */
3326static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3327						  struct bnx2x_mcast_obj *o)
3328{
3329	struct bnx2x_raw_obj *raw = &o->raw;
3330	struct bnx2x_mcast_mac_elem *elem;
3331	struct mac_configuration_cmd *data =
3332			(struct mac_configuration_cmd *)(raw->rdata);
3333
3334	/* If first entry contains a SET bit - the command was ADD,
3335	 * otherwise - DEL_ALL
3336	 */
3337	if (GET_FLAG(data->config_table[0].flags,
3338			MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3339		int i, len = data->hdr.length;
3340
3341		/* Break if it was a RESTORE command */
3342		if (!list_empty(&o->registry.exact_match.macs))
3343			return 0;
3344
3345		elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
3346		if (!elem) {
3347			BNX2X_ERR("Failed to allocate registry memory\n");
3348			return -ENOMEM;
3349		}
3350
3351		for (i = 0; i < len; i++, elem++) {
3352			bnx2x_get_fw_mac_addr(
3353				&data->config_table[i].msb_mac_addr,
3354				&data->config_table[i].middle_mac_addr,
3355				&data->config_table[i].lsb_mac_addr,
3356				elem->mac);
3357			DP(BNX2X_MSG_SP, "Adding registry entry for [%pM]\n",
3358					 elem->mac);
3359			list_add_tail(&elem->link,
3360				      &o->registry.exact_match.macs);
3361		}
3362	} else {
3363		elem = list_first_entry(&o->registry.exact_match.macs,
3364					struct bnx2x_mcast_mac_elem, link);
3365		DP(BNX2X_MSG_SP, "Deleting a registry\n");
3366		kfree(elem);
3367		INIT_LIST_HEAD(&o->registry.exact_match.macs);
3368	}
3369
3370	return 0;
3371}
3372
3373static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3374				struct bnx2x_mcast_ramrod_params *p,
3375				int cmd)
3376{
3377	struct bnx2x_mcast_obj *o = p->mcast_obj;
3378	struct bnx2x_raw_obj *raw = &o->raw;
3379	struct mac_configuration_cmd *data =
3380		(struct mac_configuration_cmd *)(raw->rdata);
3381	int cnt = 0, i, rc;
3382
3383	/* Reset the ramrod data buffer */
3384	memset(data, 0, sizeof(*data));
3385
3386	/* First set all entries as invalid */
3387	for (i = 0; i < o->max_cmd_len ; i++)
3388		SET_FLAG(data->config_table[i].flags,
3389			 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3390			 T_ETH_MAC_COMMAND_INVALIDATE);
3391
3392	/* Handle pending commands first */
3393	cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3394
3395	/* If there are no more pending commands - clear SCHEDULED state */
3396	if (list_empty(&o->pending_cmds_head))
3397		o->clear_sched(o);
3398
3399	/* The below may be true iff there were no pending commands */
3400	if (!cnt)
3401		cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3402
3403	/* For 57710 every command has o->max_cmd_len length to ensure that
3404	 * commands are done one at a time.
3405	 */
3406	o->total_pending_num -= o->max_cmd_len;
3407
3408	/* send a ramrod */
3409
3410	WARN_ON(cnt > o->max_cmd_len);
3411
3412	/* Set ramrod header (in particular, a number of entries to update) */
3413	bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3414
3415	/* update a registry: we need the registry contents to be always up
3416	 * to date in order to be able to execute a RESTORE opcode. Here
3417	 * we use the fact that for 57710 we sent one command at a time
3418	 * hence we may take the registry update out of the command handling
3419	 * and do it in a simpler way here.
3420	 */
3421	rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3422	if (rc)
3423		return rc;
3424
3425	/*
3426	 * If CLEAR_ONLY was requested - don't send a ramrod and clear
3427	 * RAMROD_PENDING status immediately.
3428	 */
3429	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3430		raw->clear_pending(raw);
3431		return 0;
3432	} else {
3433		/*
3434		 *  No need for an explicit memory barrier here as long we would
3435		 *  need to ensure the ordering of writing to the SPQ element
3436		 *  and updating of the SPQ producer which involves a memory
3437		 *  read and we will have to put a full memory barrier there
3438		 *  (inside bnx2x_sp_post()).
3439		 */
3440
3441		/* Send a ramrod */
3442		rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3443				   U64_HI(raw->rdata_mapping),
3444				   U64_LO(raw->rdata_mapping),
3445				   ETH_CONNECTION_TYPE);
3446		if (rc)
3447			return rc;
3448
3449		/* Ramrod completion is pending */
3450		return 1;
3451	}
3452
3453}
3454
3455static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3456{
3457	return o->registry.exact_match.num_macs_set;
3458}
3459
3460static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3461{
3462	return o->registry.aprox_match.num_bins_set;
3463}
3464
3465static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3466						int n)
3467{
3468	o->registry.exact_match.num_macs_set = n;
3469}
3470
3471static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3472						int n)
3473{
3474	o->registry.aprox_match.num_bins_set = n;
3475}
3476
3477int bnx2x_config_mcast(struct bnx2x *bp,
3478		       struct bnx2x_mcast_ramrod_params *p,
3479		       int cmd)
3480{
3481	struct bnx2x_mcast_obj *o = p->mcast_obj;
3482	struct bnx2x_raw_obj *r = &o->raw;
3483	int rc = 0, old_reg_size;
3484
3485	/* This is needed to recover number of currently configured mcast macs
3486	 * in case of failure.
3487	 */
3488	old_reg_size = o->get_registry_size(o);
3489
3490	/* Do some calculations and checks */
3491	rc = o->validate(bp, p, cmd);
3492	if (rc)
3493		return rc;
3494
3495	/* Return if there is no work to do */
3496	if ((!p->mcast_list_len) && (!o->check_sched(o)))
3497		return 0;
3498
3499	DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
3500			 "o->max_cmd_len=%d\n", o->total_pending_num,
3501			 p->mcast_list_len, o->max_cmd_len);
3502
3503	/* Enqueue the current command to the pending list if we can't complete
3504	 * it in the current iteration
3505	 */
3506	if (r->check_pending(r) ||
3507	    ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3508		rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3509		if (rc < 0)
3510			goto error_exit1;
3511
3512		/* As long as the current command is in a command list we
3513		 * don't need to handle it separately.
3514		 */
3515		p->mcast_list_len = 0;
3516	}
3517
3518	if (!r->check_pending(r)) {
3519
3520		/* Set 'pending' state */
3521		r->set_pending(r);
3522
3523		/* Configure the new classification in the chip */
3524		rc = o->config_mcast(bp, p, cmd);
3525		if (rc < 0)
3526			goto error_exit2;
3527
3528		/* Wait for a ramrod completion if was requested */
3529		if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3530			rc = o->wait_comp(bp, o);
3531	}
3532
3533	return rc;
3534
3535error_exit2:
3536	r->clear_pending(r);
3537
3538error_exit1:
3539	o->revert(bp, p, old_reg_size);
3540
3541	return rc;
3542}
3543
3544static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3545{
3546	smp_mb__before_clear_bit();
3547	clear_bit(o->sched_state, o->raw.pstate);
3548	smp_mb__after_clear_bit();
3549}
3550
3551static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3552{
3553	smp_mb__before_clear_bit();
3554	set_bit(o->sched_state, o->raw.pstate);
3555	smp_mb__after_clear_bit();
3556}
3557
3558static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3559{
3560	return !!test_bit(o->sched_state, o->raw.pstate);
3561}
3562
3563static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3564{
3565	return o->raw.check_pending(&o->raw) || o->check_sched(o);
3566}
3567
3568void bnx2x_init_mcast_obj(struct bnx2x *bp,
3569			  struct bnx2x_mcast_obj *mcast_obj,
3570			  u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3571			  u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3572			  int state, unsigned long *pstate, bnx2x_obj_type type)
3573{
3574	memset(mcast_obj, 0, sizeof(*mcast_obj));
3575
3576	bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3577			   rdata, rdata_mapping, state, pstate, type);
3578
3579	mcast_obj->engine_id = engine_id;
3580
3581	INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3582
3583	mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3584	mcast_obj->check_sched = bnx2x_mcast_check_sched;
3585	mcast_obj->set_sched = bnx2x_mcast_set_sched;
3586	mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3587
3588	if (CHIP_IS_E1(bp)) {
3589		mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
3590		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3591		mcast_obj->hdl_restore       =
3592			bnx2x_mcast_handle_restore_cmd_e1;
3593		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3594
3595		if (CHIP_REV_IS_SLOW(bp))
3596			mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3597		else
3598			mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3599
3600		mcast_obj->wait_comp         = bnx2x_mcast_wait;
3601		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
3602		mcast_obj->validate          = bnx2x_mcast_validate_e1;
3603		mcast_obj->revert            = bnx2x_mcast_revert_e1;
3604		mcast_obj->get_registry_size =
3605			bnx2x_mcast_get_registry_size_exact;
3606		mcast_obj->set_registry_size =
3607			bnx2x_mcast_set_registry_size_exact;
3608
3609		/* 57710 is the only chip that uses the exact match for mcast
3610		 * at the moment.
3611		 */
3612		INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3613
3614	} else if (CHIP_IS_E1H(bp)) {
3615		mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
3616		mcast_obj->enqueue_cmd   = NULL;
3617		mcast_obj->hdl_restore   = NULL;
3618		mcast_obj->check_pending = bnx2x_mcast_check_pending;
3619
3620		/* 57711 doesn't send a ramrod, so it has unlimited credit
3621		 * for one command.
3622		 */
3623		mcast_obj->max_cmd_len       = -1;
3624		mcast_obj->wait_comp         = bnx2x_mcast_wait;
3625		mcast_obj->set_one_rule      = NULL;
3626		mcast_obj->validate          = bnx2x_mcast_validate_e1h;
3627		mcast_obj->revert            = bnx2x_mcast_revert_e1h;
3628		mcast_obj->get_registry_size =
3629			bnx2x_mcast_get_registry_size_aprox;
3630		mcast_obj->set_registry_size =
3631			bnx2x_mcast_set_registry_size_aprox;
3632	} else {
3633		mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
3634		mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3635		mcast_obj->hdl_restore       =
3636			bnx2x_mcast_handle_restore_cmd_e2;
3637		mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3638		/* TODO: There should be a proper HSI define for this number!!!
3639		 */
3640		mcast_obj->max_cmd_len       = 16;
3641		mcast_obj->wait_comp         = bnx2x_mcast_wait;
3642		mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
3643		mcast_obj->validate          = bnx2x_mcast_validate_e2;
3644		mcast_obj->revert            = bnx2x_mcast_revert_e2;
3645		mcast_obj->get_registry_size =
3646			bnx2x_mcast_get_registry_size_aprox;
3647		mcast_obj->set_registry_size =
3648			bnx2x_mcast_set_registry_size_aprox;
3649	}
3650}
3651
3652/*************************** Credit handling **********************************/
3653
3654/**
3655 * atomic_add_ifless - add if the result is less than a given value.
3656 *
3657 * @v:	pointer of type atomic_t
3658 * @a:	the amount to add to v...
3659 * @u:	...if (v + a) is less than u.
3660 *
3661 * returns true if (v + a) was less than u, and false otherwise.
3662 *
3663 */
3664static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3665{
3666	int c, old;
3667
3668	c = atomic_read(v);
3669	for (;;) {
3670		if (unlikely(c + a >= u))
3671			return false;
3672
3673		old = atomic_cmpxchg((v), c, c + a);
3674		if (likely(old == c))
3675			break;
3676		c = old;
3677	}
3678
3679	return true;
3680}
3681
3682/**
3683 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3684 *
3685 * @v:	pointer of type atomic_t
3686 * @a:	the amount to dec from v...
3687 * @u:	...if (v - a) is more or equal than u.
3688 *
3689 * returns true if (v - a) was more or equal than u, and false
3690 * otherwise.
3691 */
3692static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3693{
3694	int c, old;
3695
3696	c = atomic_read(v);
3697	for (;;) {
3698		if (unlikely(c - a < u))
3699			return false;
3700
3701		old = atomic_cmpxchg((v), c, c - a);
3702		if (likely(old == c))
3703			break;
3704		c = old;
3705	}
3706
3707	return true;
3708}
3709
3710static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3711{
3712	bool rc;
3713
3714	smp_mb();
3715	rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3716	smp_mb();
3717
3718	return rc;
3719}
3720
3721static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3722{
3723	bool rc;
3724
3725	smp_mb();
3726
3727	/* Don't let to refill if credit + cnt > pool_sz */
3728	rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3729
3730	smp_mb();
3731
3732	return rc;
3733}
3734
3735static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3736{
3737	int cur_credit;
3738
3739	smp_mb();
3740	cur_credit = atomic_read(&o->credit);
3741
3742	return cur_credit;
3743}
3744
3745static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3746					  int cnt)
3747{
3748	return true;
3749}
3750
3751
3752static bool bnx2x_credit_pool_get_entry(
3753	struct bnx2x_credit_pool_obj *o,
3754	int *offset)
3755{
3756	int idx, vec, i;
3757
3758	*offset = -1;
3759
3760	/* Find "internal cam-offset" then add to base for this object... */
3761	for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3762
3763		/* Skip the current vector if there are no free entries in it */
3764		if (!o->pool_mirror[vec])
3765			continue;
3766
3767		/* If we've got here we are going to find a free entry */
3768		for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
3769		      i < BIT_VEC64_ELEM_SZ; idx++, i++)
3770
3771			if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3772				/* Got one!! */
3773				BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3774				*offset = o->base_pool_offset + idx;
3775				return true;
3776			}
3777	}
3778
3779	return false;
3780}
3781
3782static bool bnx2x_credit_pool_put_entry(
3783	struct bnx2x_credit_pool_obj *o,
3784	int offset)
3785{
3786	if (offset < o->base_pool_offset)
3787		return false;
3788
3789	offset -= o->base_pool_offset;
3790
3791	if (offset >= o->pool_sz)
3792		return false;
3793
3794	/* Return the entry to the pool */
3795	BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3796
3797	return true;
3798}
3799
3800static bool bnx2x_credit_pool_put_entry_always_true(
3801	struct bnx2x_credit_pool_obj *o,
3802	int offset)
3803{
3804	return true;
3805}
3806
3807static bool bnx2x_credit_pool_get_entry_always_true(
3808	struct bnx2x_credit_pool_obj *o,
3809	int *offset)
3810{
3811	*offset = -1;
3812	return true;
3813}
3814/**
3815 * bnx2x_init_credit_pool - initialize credit pool internals.
3816 *
3817 * @p:
3818 * @base:	Base entry in the CAM to use.
3819 * @credit:	pool size.
3820 *
3821 * If base is negative no CAM entries handling will be performed.
3822 * If credit is negative pool operations will always succeed (unlimited pool).
3823 *
3824 */
3825static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3826					  int base, int credit)
3827{
3828	/* Zero the object first */
3829	memset(p, 0, sizeof(*p));
3830
3831	/* Set the table to all 1s */
3832	memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3833
3834	/* Init a pool as full */
3835	atomic_set(&p->credit, credit);
3836
3837	/* The total poll size */
3838	p->pool_sz = credit;
3839
3840	p->base_pool_offset = base;
3841
3842	/* Commit the change */
3843	smp_mb();
3844
3845	p->check = bnx2x_credit_pool_check;
3846
3847	/* if pool credit is negative - disable the checks */
3848	if (credit >= 0) {
3849		p->put      = bnx2x_credit_pool_put;
3850		p->get      = bnx2x_credit_pool_get;
3851		p->put_entry = bnx2x_credit_pool_put_entry;
3852		p->get_entry = bnx2x_credit_pool_get_entry;
3853	} else {
3854		p->put      = bnx2x_credit_pool_always_true;
3855		p->get      = bnx2x_credit_pool_always_true;
3856		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3857		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3858	}
3859
3860	/* If base is negative - disable entries handling */
3861	if (base < 0) {
3862		p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3863		p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3864	}
3865}
3866
3867void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3868				struct bnx2x_credit_pool_obj *p, u8 func_id,
3869				u8 func_num)
3870{
3871/* TODO: this will be defined in consts as well... */
3872#define BNX2X_CAM_SIZE_EMUL 5
3873
3874	int cam_sz;
3875
3876	if (CHIP_IS_E1(bp)) {
3877		/* In E1, Multicast is saved in cam... */
3878		if (!CHIP_REV_IS_SLOW(bp))
3879			cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3880		else
3881			cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3882
3883		bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3884
3885	} else if (CHIP_IS_E1H(bp)) {
3886		/* CAM credit is equaly divided between all active functions
3887		 * on the PORT!.
3888		 */
3889		if ((func_num > 0)) {
3890			if (!CHIP_REV_IS_SLOW(bp))
3891				cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3892			else
3893				cam_sz = BNX2X_CAM_SIZE_EMUL;
3894			bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3895		} else {
3896			/* this should never happen! Block MAC operations. */
3897			bnx2x_init_credit_pool(p, 0, 0);
3898		}
3899
3900	} else {
3901
3902		/*
3903		 * CAM credit is equaly divided between all active functions
3904		 * on the PATH.
3905		 */
3906		if ((func_num > 0)) {
3907			if (!CHIP_REV_IS_SLOW(bp))
3908				cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3909			else
3910				cam_sz = BNX2X_CAM_SIZE_EMUL;
3911
3912			/*
3913			 * No need for CAM entries handling for 57712 and
3914			 * newer.
3915			 */
3916			bnx2x_init_credit_pool(p, -1, cam_sz);
3917		} else {
3918			/* this should never happen! Block MAC operations. */
3919			bnx2x_init_credit_pool(p, 0, 0);
3920		}
3921
3922	}
3923}
3924
3925void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
3926				 struct bnx2x_credit_pool_obj *p,
3927				 u8 func_id,
3928				 u8 func_num)
3929{
3930	if (CHIP_IS_E1x(bp)) {
3931		/*
3932		 * There is no VLAN credit in HW on 57710 and 57711 only
3933		 * MAC / MAC-VLAN can be set
3934		 */
3935		bnx2x_init_credit_pool(p, 0, -1);
3936	} else {
3937		/*
3938		 * CAM credit is equaly divided between all active functions
3939		 * on the PATH.
3940		 */
3941		if (func_num > 0) {
3942			int credit = MAX_VLAN_CREDIT_E2 / func_num;
3943			bnx2x_init_credit_pool(p, func_id * credit, credit);
3944		} else
3945			/* this should never happen! Block VLAN operations. */
3946			bnx2x_init_credit_pool(p, 0, 0);
3947	}
3948}
3949
3950/****************** RSS Configuration ******************/
3951/**
3952 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
3953 *
3954 * @bp:		driver hanlde
3955 * @p:		pointer to rss configuration
3956 *
3957 * Prints it when NETIF_MSG_IFUP debug level is configured.
3958 */
3959static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
3960					struct bnx2x_config_rss_params *p)
3961{
3962	int i;
3963
3964	DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
3965	DP(BNX2X_MSG_SP, "0x0000: ");
3966	for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
3967		DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
3968
3969		/* Print 4 bytes in a line */
3970		if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
3971		    (((i + 1) & 0x3) == 0)) {
3972			DP_CONT(BNX2X_MSG_SP, "\n");
3973			DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
3974		}
3975	}
3976
3977	DP_CONT(BNX2X_MSG_SP, "\n");
3978}
3979
3980/**
3981 * bnx2x_setup_rss - configure RSS
3982 *
3983 * @bp:		device handle
3984 * @p:		rss configuration
3985 *
3986 * sends on UPDATE ramrod for that matter.
3987 */
3988static int bnx2x_setup_rss(struct bnx2x *bp,
3989			   struct bnx2x_config_rss_params *p)
3990{
3991	struct bnx2x_rss_config_obj *o = p->rss_obj;
3992	struct bnx2x_raw_obj *r = &o->raw;
3993	struct eth_rss_update_ramrod_data *data =
3994		(struct eth_rss_update_ramrod_data *)(r->rdata);
3995	u8 rss_mode = 0;
3996	int rc;
3997
3998	memset(data, 0, sizeof(*data));
3999
4000	DP(BNX2X_MSG_SP, "Configuring RSS\n");
4001
4002	/* Set an echo field */
4003	data->echo = (r->cid & BNX2X_SWCID_MASK) |
4004		     (r->state << BNX2X_SWCID_SHIFT);
4005
4006	/* RSS mode */
4007	if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
4008		rss_mode = ETH_RSS_MODE_DISABLED;
4009	else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
4010		rss_mode = ETH_RSS_MODE_REGULAR;
4011	else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
4012		rss_mode = ETH_RSS_MODE_VLAN_PRI;
4013	else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
4014		rss_mode = ETH_RSS_MODE_E1HOV_PRI;
4015	else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
4016		rss_mode = ETH_RSS_MODE_IP_DSCP;
4017
4018	data->rss_mode = rss_mode;
4019
4020	DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
4021
4022	/* RSS capabilities */
4023	if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4024		data->capabilities |=
4025			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4026
4027	if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4028		data->capabilities |=
4029			ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4030
4031	if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4032		data->capabilities |=
4033			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4034
4035	if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4036		data->capabilities |=
4037			ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4038
4039	/* Hashing mask */
4040	data->rss_result_mask = p->rss_result_mask;
4041
4042	/* RSS engine ID */
4043	data->rss_engine_id = o->engine_id;
4044
4045	DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4046
4047	/* Indirection table */
4048	memcpy(data->indirection_table, p->ind_table,
4049		  T_ETH_INDIRECTION_TABLE_SIZE);
4050
4051	/* Remember the last configuration */
4052	memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4053
4054	/* Print the indirection table */
4055	if (netif_msg_ifup(bp))
4056		bnx2x_debug_print_ind_table(bp, p);
4057
4058	/* RSS keys */
4059	if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4060		memcpy(&data->rss_key[0], &p->rss_key[0],
4061		       sizeof(data->rss_key));
4062		data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4063	}
4064
4065	/*
4066	 *  No need for an explicit memory barrier here as long we would
4067	 *  need to ensure the ordering of writing to the SPQ element
4068	 *  and updating of the SPQ producer which involves a memory
4069	 *  read and we will have to put a full memory barrier there
4070	 *  (inside bnx2x_sp_post()).
4071	 */
4072
4073	/* Send a ramrod */
4074	rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4075			   U64_HI(r->rdata_mapping),
4076			   U64_LO(r->rdata_mapping),
4077			   ETH_CONNECTION_TYPE);
4078
4079	if (rc < 0)
4080		return rc;
4081
4082	return 1;
4083}
4084
4085void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4086			     u8 *ind_table)
4087{
4088	memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4089}
4090
4091int bnx2x_config_rss(struct bnx2x *bp,
4092		     struct bnx2x_config_rss_params *p)
4093{
4094	int rc;
4095	struct bnx2x_rss_config_obj *o = p->rss_obj;
4096	struct bnx2x_raw_obj *r = &o->raw;
4097
4098	/* Do nothing if only driver cleanup was requested */
4099	if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4100		return 0;
4101
4102	r->set_pending(r);
4103
4104	rc = o->config_rss(bp, p);
4105	if (rc < 0) {
4106		r->clear_pending(r);
4107		return rc;
4108	}
4109
4110	if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4111		rc = r->wait_comp(bp, r);
4112
4113	return rc;
4114}
4115
4116
4117void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4118			       struct bnx2x_rss_config_obj *rss_obj,
4119			       u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4120			       void *rdata, dma_addr_t rdata_mapping,
4121			       int state, unsigned long *pstate,
4122			       bnx2x_obj_type type)
4123{
4124	bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4125			   rdata_mapping, state, pstate, type);
4126
4127	rss_obj->engine_id  = engine_id;
4128	rss_obj->config_rss = bnx2x_setup_rss;
4129}
4130
4131/********************** Queue state object ***********************************/
4132
4133/**
4134 * bnx2x_queue_state_change - perform Queue state change transition
4135 *
4136 * @bp:		device handle
4137 * @params:	parameters to perform the transition
4138 *
4139 * returns 0 in case of successfully completed transition, negative error
4140 * code in case of failure, positive (EBUSY) value if there is a completion
4141 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4142 * not set in params->ramrod_flags for asynchronous commands).
4143 *
4144 */
4145int bnx2x_queue_state_change(struct bnx2x *bp,
4146			     struct bnx2x_queue_state_params *params)
4147{
4148	struct bnx2x_queue_sp_obj *o = params->q_obj;
4149	int rc, pending_bit;
4150	unsigned long *pending = &o->pending;
4151
4152	/* Check that the requested transition is legal */
4153	if (o->check_transition(bp, o, params))
4154		return -EINVAL;
4155
4156	/* Set "pending" bit */
4157	pending_bit = o->set_pending(o, params);
4158
4159	/* Don't send a command if only driver cleanup was requested */
4160	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4161		o->complete_cmd(bp, o, pending_bit);
4162	else {
4163		/* Send a ramrod */
4164		rc = o->send_cmd(bp, params);
4165		if (rc) {
4166			o->next_state = BNX2X_Q_STATE_MAX;
4167			clear_bit(pending_bit, pending);
4168			smp_mb__after_clear_bit();
4169			return rc;
4170		}
4171
4172		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4173			rc = o->wait_comp(bp, o, pending_bit);
4174			if (rc)
4175				return rc;
4176
4177			return 0;
4178		}
4179	}
4180
4181	return !!test_bit(pending_bit, pending);
4182}
4183
4184
4185static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4186				   struct bnx2x_queue_state_params *params)
4187{
4188	enum bnx2x_queue_cmd cmd = params->cmd, bit;
4189
4190	/* ACTIVATE and DEACTIVATE commands are implemented on top of
4191	 * UPDATE command.
4192	 */
4193	if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4194	    (cmd == BNX2X_Q_CMD_DEACTIVATE))
4195		bit = BNX2X_Q_CMD_UPDATE;
4196	else
4197		bit = cmd;
4198
4199	set_bit(bit, &obj->pending);
4200	return bit;
4201}
4202
4203static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4204				 struct bnx2x_queue_sp_obj *o,
4205				 enum bnx2x_queue_cmd cmd)
4206{
4207	return bnx2x_state_wait(bp, cmd, &o->pending);
4208}
4209
4210/**
4211 * bnx2x_queue_comp_cmd - complete the state change command.
4212 *
4213 * @bp:		device handle
4214 * @o:
4215 * @cmd:
4216 *
4217 * Checks that the arrived completion is expected.
4218 */
4219static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4220				struct bnx2x_queue_sp_obj *o,
4221				enum bnx2x_queue_cmd cmd)
4222{
4223	unsigned long cur_pending = o->pending;
4224
4225	if (!test_and_clear_bit(cmd, &cur_pending)) {
4226		BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
4227			  "pending 0x%lx, next_state %d\n", cmd,
4228			  o->cids[BNX2X_PRIMARY_CID_INDEX],
4229			  o->state, cur_pending, o->next_state);
4230		return -EINVAL;
4231	}
4232
4233	if (o->next_tx_only >= o->max_cos)
4234		/* >= becuase tx only must always be smaller than cos since the
4235		 * primary connection suports COS 0
4236		 */
4237		BNX2X_ERR("illegal value for next tx_only: %d. max cos was %d",
4238			   o->next_tx_only, o->max_cos);
4239
4240	DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
4241			 "setting state to %d\n", cmd,
4242			 o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_state);
4243
4244	if (o->next_tx_only)  /* print num tx-only if any exist */
4245		DP(BNX2X_MSG_SP, "primary cid %d: num tx-only cons %d\n",
4246			   o->cids[BNX2X_PRIMARY_CID_INDEX], o->next_tx_only);
4247
4248	o->state = o->next_state;
4249	o->num_tx_only = o->next_tx_only;
4250	o->next_state = BNX2X_Q_STATE_MAX;
4251
4252	/* It's important that o->state and o->next_state are
4253	 * updated before o->pending.
4254	 */
4255	wmb();
4256
4257	clear_bit(cmd, &o->pending);
4258	smp_mb__after_clear_bit();
4259
4260	return 0;
4261}
4262
4263static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4264				struct bnx2x_queue_state_params *cmd_params,
4265				struct client_init_ramrod_data *data)
4266{
4267	struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4268
4269	/* Rx data */
4270
4271	/* IPv6 TPA supported for E2 and above only */
4272	data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA_IPV6, &params->flags) *
4273				CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4274}
4275
4276static void bnx2x_q_fill_init_general_data(struct bnx2x *bp,
4277				struct bnx2x_queue_sp_obj *o,
4278				struct bnx2x_general_setup_params *params,
4279				struct client_init_general_data *gen_data,
4280				unsigned long *flags)
4281{
4282	gen_data->client_id = o->cl_id;
4283
4284	if (test_bit(BNX2X_Q_FLG_STATS, flags)) {
4285		gen_data->statistics_counter_id =
4286					params->stat_id;
4287		gen_data->statistics_en_flg = 1;
4288		gen_data->statistics_zero_flg =
4289			test_bit(BNX2X_Q_FLG_ZERO_STATS, flags);
4290	} else
4291		gen_data->statistics_counter_id =
4292					DISABLE_STATISTIC_COUNTER_ID_VALUE;
4293
4294	gen_data->is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, flags);
4295	gen_data->activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE, flags);
4296	gen_data->sp_client_id = params->spcl_id;
4297	gen_data->mtu = cpu_to_le16(params->mtu);
4298	gen_data->func_id = o->func_id;
4299
4300
4301	gen_data->cos = params->cos;
4302
4303	gen_data->traffic_type =
4304		test_bit(BNX2X_Q_FLG_FCOE, flags) ?
4305		LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4306
4307	DP(BNX2X_MSG_SP, "flags: active %d, cos %d, stats en %d\n",
4308	   gen_data->activate_flg, gen_data->cos, gen_data->statistics_en_flg);
4309}
4310
4311static void bnx2x_q_fill_init_tx_data(struct bnx2x_queue_sp_obj *o,
4312				struct bnx2x_txq_setup_params *params,
4313				struct client_init_tx_data *tx_data,
4314				unsigned long *flags)
4315{
4316	tx_data->enforce_security_flg =
4317		test_bit(BNX2X_Q_FLG_TX_SEC, flags);
4318	tx_data->default_vlan =
4319		cpu_to_le16(params->default_vlan);
4320	tx_data->default_vlan_flg =
4321		test_bit(BNX2X_Q_FLG_DEF_VLAN, flags);
4322	tx_data->tx_switching_flg =
4323		test_bit(BNX2X_Q_FLG_TX_SWITCH, flags);
4324	tx_data->anti_spoofing_flg =
4325		test_bit(BNX2X_Q_FLG_ANTI_SPOOF, flags);
4326	tx_data->tx_status_block_id = params->fw_sb_id;
4327	tx_data->tx_sb_index_number = params->sb_cq_index;
4328	tx_data->tss_leading_client_id = params->tss_leading_cl_id;
4329
4330	tx_data->tx_bd_page_base.lo =
4331		cpu_to_le32(U64_LO(params->dscr_map));
4332	tx_data->tx_bd_page_base.hi =
4333		cpu_to_le32(U64_HI(params->dscr_map));
4334
4335	/* Don't configure any Tx switching mode during queue SETUP */
4336	tx_data->state = 0;
4337}
4338
4339static void bnx2x_q_fill_init_pause_data(struct bnx2x_queue_sp_obj *o,
4340				struct rxq_pause_params *params,
4341				struct client_init_rx_data *rx_data)
4342{
4343	/* flow control data */
4344	rx_data->cqe_pause_thr_low = cpu_to_le16(params->rcq_th_lo);
4345	rx_data->cqe_pause_thr_high = cpu_to_le16(params->rcq_th_hi);
4346	rx_data->bd_pause_thr_low = cpu_to_le16(params->bd_th_lo);
4347	rx_data->bd_pause_thr_high = cpu_to_le16(params->bd_th_hi);
4348	rx_data->sge_pause_thr_low = cpu_to_le16(params->sge_th_lo);
4349	rx_data->sge_pause_thr_high = cpu_to_le16(params->sge_th_hi);
4350	rx_data->rx_cos_mask = cpu_to_le16(params->pri_map);
4351}
4352
4353static void bnx2x_q_fill_init_rx_data(struct bnx2x_queue_sp_obj *o,
4354				struct bnx2x_rxq_setup_params *params,
4355				struct client_init_rx_data *rx_data,
4356				unsigned long *flags)
4357{
4358		/* Rx data */
4359	rx_data->tpa_en = test_bit(BNX2X_Q_FLG_TPA, flags) *
4360				CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4361	rx_data->vmqueue_mode_en_flg = 0;
4362
4363	rx_data->cache_line_alignment_log_size =
4364		params->cache_line_log;
4365	rx_data->enable_dynamic_hc =
4366		test_bit(BNX2X_Q_FLG_DHC, flags);
4367	rx_data->max_sges_for_packet = params->max_sges_pkt;
4368	rx_data->client_qzone_id = params->cl_qzone_id;
4369	rx_data->max_agg_size = cpu_to_le16(params->tpa_agg_sz);
4370
4371	/* Always start in DROP_ALL mode */
4372	rx_data->state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4373				     CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4374
4375	/* We don't set drop flags */
4376	rx_data->drop_ip_cs_err_flg = 0;
4377	rx_data->drop_tcp_cs_err_flg = 0;
4378	rx_data->drop_ttl0_flg = 0;
4379	rx_data->drop_udp_cs_err_flg = 0;
4380	rx_data->inner_vlan_removal_enable_flg =
4381		test_bit(BNX2X_Q_FLG_VLAN, flags);
4382	rx_data->outer_vlan_removal_enable_flg =
4383		test_bit(BNX2X_Q_FLG_OV, flags);
4384	rx_data->status_block_id = params->fw_sb_id;
4385	rx_data->rx_sb_index_number = params->sb_cq_index;
4386	rx_data->max_tpa_queues = params->max_tpa_queues;
4387	rx_data->max_bytes_on_bd = cpu_to_le16(params->buf_sz);
4388	rx_data->sge_buff_size = cpu_to_le16(params->sge_buf_sz);
4389	rx_data->bd_page_base.lo =
4390		cpu_to_le32(U64_LO(params->dscr_map));
4391	rx_data->bd_page_base.hi =
4392		cpu_to_le32(U64_HI(params->dscr_map));
4393	rx_data->sge_page_base.lo =
4394		cpu_to_le32(U64_LO(params->sge_map));
4395	rx_data->sge_page_base.hi =
4396		cpu_to_le32(U64_HI(params->sge_map));
4397	rx_data->cqe_page_base.lo =
4398		cpu_to_le32(U64_LO(params->rcq_map));
4399	rx_data->cqe_page_base.hi =
4400		cpu_to_le32(U64_HI(params->rcq_map));
4401	rx_data->is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS, flags);
4402
4403	if (test_bit(BNX2X_Q_FLG_MCAST, flags)) {
4404		rx_data->approx_mcast_engine_id = o->func_id;
4405		rx_data->is_approx_mcast = 1;
4406	}
4407
4408	rx_data->rss_engine_id = params->rss_engine_id;
4409
4410	/* silent vlan removal */
4411	rx_data->silent_vlan_removal_flg =
4412		test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, flags);
4413	rx_data->silent_vlan_value =
4414		cpu_to_le16(params->silent_removal_value);
4415	rx_data->silent_vlan_mask =
4416		cpu_to_le16(params->silent_removal_mask);
4417
4418}
4419
4420/* initialize the general, tx and rx parts of a queue object */
4421static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4422				struct bnx2x_queue_state_params *cmd_params,
4423				struct client_init_ramrod_data *data)
4424{
4425	bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4426				       &cmd_params->params.setup.gen_params,
4427				       &data->general,
4428				       &cmd_params->params.setup.flags);
4429
4430	bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4431				  &cmd_params->params.setup.txq_params,
4432				  &data->tx,
4433				  &cmd_params->params.setup.flags);
4434
4435	bnx2x_q_fill_init_rx_data(cmd_params->q_obj,
4436				  &cmd_params->params.setup.rxq_params,
4437				  &data->rx,
4438				  &cmd_params->params.setup.flags);
4439
4440	bnx2x_q_fill_init_pause_data(cmd_params->q_obj,
4441				     &cmd_params->params.setup.pause_params,
4442				     &data->rx);
4443}
4444
4445/* initialize the general and tx parts of a tx-only queue object */
4446static void bnx2x_q_fill_setup_tx_only(struct bnx2x *bp,
4447				struct bnx2x_queue_state_params *cmd_params,
4448				struct tx_queue_init_ramrod_data *data)
4449{
4450	bnx2x_q_fill_init_general_data(bp, cmd_params->q_obj,
4451				       &cmd_params->params.tx_only.gen_params,
4452				       &data->general,
4453				       &cmd_params->params.tx_only.flags);
4454
4455	bnx2x_q_fill_init_tx_data(cmd_params->q_obj,
4456				  &cmd_params->params.tx_only.txq_params,
4457				  &data->tx,
4458				  &cmd_params->params.tx_only.flags);
4459
4460	DP(BNX2X_MSG_SP, "cid %d, tx bd page lo %x hi %x\n",cmd_params->q_obj->cids[0],
4461	   data->tx.tx_bd_page_base.lo, data->tx.tx_bd_page_base.hi);
4462}
4463
4464/**
4465 * bnx2x_q_init - init HW/FW queue
4466 *
4467 * @bp:		device handle
4468 * @params:
4469 *
4470 * HW/FW initial Queue configuration:
4471 *      - HC: Rx and Tx
4472 *      - CDU context validation
4473 *
4474 */
4475static inline int bnx2x_q_init(struct bnx2x *bp,
4476			       struct bnx2x_queue_state_params *params)
4477{
4478	struct bnx2x_queue_sp_obj *o = params->q_obj;
4479	struct bnx2x_queue_init_params *init = &params->params.init;
4480	u16 hc_usec;
4481	u8 cos;
4482
4483	/* Tx HC configuration */
4484	if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4485	    test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4486		hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4487
4488		bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4489			init->tx.sb_cq_index,
4490			!test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4491			hc_usec);
4492	}
4493
4494	/* Rx HC configuration */
4495	if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4496	    test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4497		hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4498
4499		bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4500			init->rx.sb_cq_index,
4501			!test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4502			hc_usec);
4503	}
4504
4505	/* Set CDU context validation values */
4506	for (cos = 0; cos < o->max_cos; cos++) {
4507		DP(BNX2X_MSG_SP, "setting context validation. cid %d, cos %d\n",
4508				 o->cids[cos], cos);
4509		DP(BNX2X_MSG_SP, "context pointer %p\n", init->cxts[cos]);
4510		bnx2x_set_ctx_validation(bp, init->cxts[cos], o->cids[cos]);
4511	}
4512
4513	/* As no ramrod is sent, complete the command immediately  */
4514	o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4515
4516	mmiowb();
4517	smp_mb();
4518
4519	return 0;
4520}
4521
4522static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4523					struct bnx2x_queue_state_params *params)
4524{
4525	struct bnx2x_queue_sp_obj *o = params->q_obj;
4526	struct client_init_ramrod_data *rdata =
4527		(struct client_init_ramrod_data *)o->rdata;
4528	dma_addr_t data_mapping = o->rdata_mapping;
4529	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4530
4531	/* Clear the ramrod data */
4532	memset(rdata, 0, sizeof(*rdata));
4533
4534	/* Fill the ramrod data */
4535	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4536
4537	/*
4538	 *  No need for an explicit memory barrier here as long we would
4539	 *  need to ensure the ordering of writing to the SPQ element
4540	 *  and updating of the SPQ producer which involves a memory
4541	 *  read and we will have to put a full memory barrier there
4542	 *  (inside bnx2x_sp_post()).
4543	 */
4544
4545	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4546			     U64_HI(data_mapping),
4547			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4548}
4549
4550static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4551					struct bnx2x_queue_state_params *params)
4552{
4553	struct bnx2x_queue_sp_obj *o = params->q_obj;
4554	struct client_init_ramrod_data *rdata =
4555		(struct client_init_ramrod_data *)o->rdata;
4556	dma_addr_t data_mapping = o->rdata_mapping;
4557	int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4558
4559	/* Clear the ramrod data */
4560	memset(rdata, 0, sizeof(*rdata));
4561
4562	/* Fill the ramrod data */
4563	bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4564	bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4565
4566	/*
4567	 *  No need for an explicit memory barrier here as long we would
4568	 *  need to ensure the ordering of writing to the SPQ element
4569	 *  and updating of the SPQ producer which involves a memory
4570	 *  read and we will have to put a full memory barrier there
4571	 *  (inside bnx2x_sp_post()).
4572	 */
4573
4574	return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
4575			     U64_HI(data_mapping),
4576			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4577}
4578
4579static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
4580				  struct bnx2x_queue_state_params *params)
4581{
4582	struct bnx2x_queue_sp_obj *o = params->q_obj;
4583	struct tx_queue_init_ramrod_data *rdata =
4584		(struct tx_queue_init_ramrod_data *)o->rdata;
4585	dma_addr_t data_mapping = o->rdata_mapping;
4586	int ramrod = RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP;
4587	struct bnx2x_queue_setup_tx_only_params *tx_only_params =
4588		&params->params.tx_only;
4589	u8 cid_index = tx_only_params->cid_index;
4590
4591
4592	if (cid_index >= o->max_cos) {
4593		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4594			  o->cl_id, cid_index);
4595		return -EINVAL;
4596	}
4597
4598	DP(BNX2X_MSG_SP, "parameters received: cos: %d sp-id: %d\n",
4599			 tx_only_params->gen_params.cos,
4600			 tx_only_params->gen_params.spcl_id);
4601
4602	/* Clear the ramrod data */
4603	memset(rdata, 0, sizeof(*rdata));
4604
4605	/* Fill the ramrod data */
4606	bnx2x_q_fill_setup_tx_only(bp, params, rdata);
4607
4608	DP(BNX2X_MSG_SP, "sending tx-only ramrod: cid %d, client-id %d,"
4609			 "sp-client id %d, cos %d\n",
4610			 o->cids[cid_index],
4611			 rdata->general.client_id,
4612			 rdata->general.sp_client_id, rdata->general.cos);
4613
4614	/*
4615	 *  No need for an explicit memory barrier here as long we would
4616	 *  need to ensure the ordering of writing to the SPQ element
4617	 *  and updating of the SPQ producer which involves a memory
4618	 *  read and we will have to put a full memory barrier there
4619	 *  (inside bnx2x_sp_post()).
4620	 */
4621
4622	return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
4623			     U64_HI(data_mapping),
4624			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4625}
4626
4627static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4628				     struct bnx2x_queue_sp_obj *obj,
4629				     struct bnx2x_queue_update_params *params,
4630				     struct client_update_ramrod_data *data)
4631{
4632	/* Client ID of the client to update */
4633	data->client_id = obj->cl_id;
4634
4635	/* Function ID of the client to update */
4636	data->func_id = obj->func_id;
4637
4638	/* Default VLAN value */
4639	data->default_vlan = cpu_to_le16(params->def_vlan);
4640
4641	/* Inner VLAN stripping */
4642	data->inner_vlan_removal_enable_flg =
4643		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4644	data->inner_vlan_removal_change_flg =
4645		test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4646			 &params->update_flags);
4647
4648	/* Outer VLAN sripping */
4649	data->outer_vlan_removal_enable_flg =
4650		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4651	data->outer_vlan_removal_change_flg =
4652		test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4653			 &params->update_flags);
4654
4655	/* Drop packets that have source MAC that doesn't belong to this
4656	 * Queue.
4657	 */
4658	data->anti_spoofing_enable_flg =
4659		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4660	data->anti_spoofing_change_flg =
4661		test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4662
4663	/* Activate/Deactivate */
4664	data->activate_flg =
4665		test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4666	data->activate_change_flg =
4667		test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4668
4669	/* Enable default VLAN */
4670	data->default_vlan_enable_flg =
4671		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4672	data->default_vlan_change_flg =
4673		test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4674			 &params->update_flags);
4675
4676	/* silent vlan removal */
4677	data->silent_vlan_change_flg =
4678		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4679			 &params->update_flags);
4680	data->silent_vlan_removal_flg =
4681		test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4682	data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4683	data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4684}
4685
4686static inline int bnx2x_q_send_update(struct bnx2x *bp,
4687				      struct bnx2x_queue_state_params *params)
4688{
4689	struct bnx2x_queue_sp_obj *o = params->q_obj;
4690	struct client_update_ramrod_data *rdata =
4691		(struct client_update_ramrod_data *)o->rdata;
4692	dma_addr_t data_mapping = o->rdata_mapping;
4693	struct bnx2x_queue_update_params *update_params =
4694		&params->params.update;
4695	u8 cid_index = update_params->cid_index;
4696
4697	if (cid_index >= o->max_cos) {
4698		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4699			  o->cl_id, cid_index);
4700		return -EINVAL;
4701	}
4702
4703
4704	/* Clear the ramrod data */
4705	memset(rdata, 0, sizeof(*rdata));
4706
4707	/* Fill the ramrod data */
4708	bnx2x_q_fill_update_data(bp, o, update_params, rdata);
4709
4710	/*
4711	 *  No need for an explicit memory barrier here as long we would
4712	 *  need to ensure the ordering of writing to the SPQ element
4713	 *  and updating of the SPQ producer which involves a memory
4714	 *  read and we will have to put a full memory barrier there
4715	 *  (inside bnx2x_sp_post()).
4716	 */
4717
4718	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
4719			     o->cids[cid_index], U64_HI(data_mapping),
4720			     U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4721}
4722
4723/**
4724 * bnx2x_q_send_deactivate - send DEACTIVATE command
4725 *
4726 * @bp:		device handle
4727 * @params:
4728 *
4729 * implemented using the UPDATE command.
4730 */
4731static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4732					struct bnx2x_queue_state_params *params)
4733{
4734	struct bnx2x_queue_update_params *update = &params->params.update;
4735
4736	memset(update, 0, sizeof(*update));
4737
4738	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4739
4740	return bnx2x_q_send_update(bp, params);
4741}
4742
4743/**
4744 * bnx2x_q_send_activate - send ACTIVATE command
4745 *
4746 * @bp:		device handle
4747 * @params:
4748 *
4749 * implemented using the UPDATE command.
4750 */
4751static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4752					struct bnx2x_queue_state_params *params)
4753{
4754	struct bnx2x_queue_update_params *update = &params->params.update;
4755
4756	memset(update, 0, sizeof(*update));
4757
4758	__set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4759	__set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4760
4761	return bnx2x_q_send_update(bp, params);
4762}
4763
4764static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4765					struct bnx2x_queue_state_params *params)
4766{
4767	/* TODO: Not implemented yet. */
4768	return -1;
4769}
4770
4771static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4772				    struct bnx2x_queue_state_params *params)
4773{
4774	struct bnx2x_queue_sp_obj *o = params->q_obj;
4775
4776	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT,
4777			     o->cids[BNX2X_PRIMARY_CID_INDEX], 0, o->cl_id,
4778			     ETH_CONNECTION_TYPE);
4779}
4780
4781static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4782				       struct bnx2x_queue_state_params *params)
4783{
4784	struct bnx2x_queue_sp_obj *o = params->q_obj;
4785	u8 cid_idx = params->params.cfc_del.cid_index;
4786
4787	if (cid_idx >= o->max_cos) {
4788		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4789			  o->cl_id, cid_idx);
4790		return -EINVAL;
4791	}
4792
4793	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL,
4794			     o->cids[cid_idx], 0, 0, NONE_CONNECTION_TYPE);
4795}
4796
4797static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4798					struct bnx2x_queue_state_params *params)
4799{
4800	struct bnx2x_queue_sp_obj *o = params->q_obj;
4801	u8 cid_index = params->params.terminate.cid_index;
4802
4803	if (cid_index >= o->max_cos) {
4804		BNX2X_ERR("queue[%d]: cid_index (%d) is out of range\n",
4805			  o->cl_id, cid_index);
4806		return -EINVAL;
4807	}
4808
4809	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE,
4810			     o->cids[cid_index], 0, 0, ETH_CONNECTION_TYPE);
4811}
4812
4813static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4814				     struct bnx2x_queue_state_params *params)
4815{
4816	struct bnx2x_queue_sp_obj *o = params->q_obj;
4817
4818	return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY,
4819			     o->cids[BNX2X_PRIMARY_CID_INDEX], 0, 0,
4820			     ETH_CONNECTION_TYPE);
4821}
4822
4823static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4824					struct bnx2x_queue_state_params *params)
4825{
4826	switch (params->cmd) {
4827	case BNX2X_Q_CMD_INIT:
4828		return bnx2x_q_init(bp, params);
4829	case BNX2X_Q_CMD_SETUP_TX_ONLY:
4830		return bnx2x_q_send_setup_tx_only(bp, params);
4831	case BNX2X_Q_CMD_DEACTIVATE:
4832		return bnx2x_q_send_deactivate(bp, params);
4833	case BNX2X_Q_CMD_ACTIVATE:
4834		return bnx2x_q_send_activate(bp, params);
4835	case BNX2X_Q_CMD_UPDATE:
4836		return bnx2x_q_send_update(bp, params);
4837	case BNX2X_Q_CMD_UPDATE_TPA:
4838		return bnx2x_q_send_update_tpa(bp, params);
4839	case BNX2X_Q_CMD_HALT:
4840		return bnx2x_q_send_halt(bp, params);
4841	case BNX2X_Q_CMD_CFC_DEL:
4842		return bnx2x_q_send_cfc_del(bp, params);
4843	case BNX2X_Q_CMD_TERMINATE:
4844		return bnx2x_q_send_terminate(bp, params);
4845	case BNX2X_Q_CMD_EMPTY:
4846		return bnx2x_q_send_empty(bp, params);
4847	default:
4848		BNX2X_ERR("Unknown command: %d\n", params->cmd);
4849		return -EINVAL;
4850	}
4851}
4852
4853static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4854				    struct bnx2x_queue_state_params *params)
4855{
4856	switch (params->cmd) {
4857	case BNX2X_Q_CMD_SETUP:
4858		return bnx2x_q_send_setup_e1x(bp, params);
4859	case BNX2X_Q_CMD_INIT:
4860	case BNX2X_Q_CMD_SETUP_TX_ONLY:
4861	case BNX2X_Q_CMD_DEACTIVATE:
4862	case BNX2X_Q_CMD_ACTIVATE:
4863	case BNX2X_Q_CMD_UPDATE:
4864	case BNX2X_Q_CMD_UPDATE_TPA:
4865	case BNX2X_Q_CMD_HALT:
4866	case BNX2X_Q_CMD_CFC_DEL:
4867	case BNX2X_Q_CMD_TERMINATE:
4868	case BNX2X_Q_CMD_EMPTY:
4869		return bnx2x_queue_send_cmd_cmn(bp, params);
4870	default:
4871		BNX2X_ERR("Unknown command: %d\n", params->cmd);
4872		return -EINVAL;
4873	}
4874}
4875
4876static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4877				   struct bnx2x_queue_state_params *params)
4878{
4879	switch (params->cmd) {
4880	case BNX2X_Q_CMD_SETUP:
4881		return bnx2x_q_send_setup_e2(bp, params);
4882	case BNX2X_Q_CMD_INIT:
4883	case BNX2X_Q_CMD_SETUP_TX_ONLY:
4884	case BNX2X_Q_CMD_DEACTIVATE:
4885	case BNX2X_Q_CMD_ACTIVATE:
4886	case BNX2X_Q_CMD_UPDATE:
4887	case BNX2X_Q_CMD_UPDATE_TPA:
4888	case BNX2X_Q_CMD_HALT:
4889	case BNX2X_Q_CMD_CFC_DEL:
4890	case BNX2X_Q_CMD_TERMINATE:
4891	case BNX2X_Q_CMD_EMPTY:
4892		return bnx2x_queue_send_cmd_cmn(bp, params);
4893	default:
4894		BNX2X_ERR("Unknown command: %d\n", params->cmd);
4895		return -EINVAL;
4896	}
4897}
4898
4899/**
4900 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4901 *
4902 * @bp:		device handle
4903 * @o:
4904 * @params:
4905 *
4906 * (not Forwarding)
4907 * It both checks if the requested command is legal in a current
4908 * state and, if it's legal, sets a `next_state' in the object
4909 * that will be used in the completion flow to set the `state'
4910 * of the object.
4911 *
4912 * returns 0 if a requested command is a legal transition,
4913 *         -EINVAL otherwise.
4914 */
4915static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4916				      struct bnx2x_queue_sp_obj *o,
4917				      struct bnx2x_queue_state_params *params)
4918{
4919	enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
4920	enum bnx2x_queue_cmd cmd = params->cmd;
4921	struct bnx2x_queue_update_params *update_params =
4922		 &params->params.update;
4923	u8 next_tx_only = o->num_tx_only;
4924
4925	/*
4926	 * Forget all pending for completion commands if a driver only state
4927	 * transition has been requested.
4928	 */
4929	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
4930		o->pending = 0;
4931		o->next_state = BNX2X_Q_STATE_MAX;
4932	}
4933
4934	/*
4935	 * Don't allow a next state transition if we are in the middle of
4936	 * the previous one.
4937	 */
4938	if (o->pending)
4939		return -EBUSY;
4940
4941	switch (state) {
4942	case BNX2X_Q_STATE_RESET:
4943		if (cmd == BNX2X_Q_CMD_INIT)
4944			next_state = BNX2X_Q_STATE_INITIALIZED;
4945
4946		break;
4947	case BNX2X_Q_STATE_INITIALIZED:
4948		if (cmd == BNX2X_Q_CMD_SETUP) {
4949			if (test_bit(BNX2X_Q_FLG_ACTIVE,
4950				     &params->params.setup.flags))
4951				next_state = BNX2X_Q_STATE_ACTIVE;
4952			else
4953				next_state = BNX2X_Q_STATE_INACTIVE;
4954		}
4955
4956		break;
4957	case BNX2X_Q_STATE_ACTIVE:
4958		if (cmd == BNX2X_Q_CMD_DEACTIVATE)
4959			next_state = BNX2X_Q_STATE_INACTIVE;
4960
4961		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
4962			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4963			next_state = BNX2X_Q_STATE_ACTIVE;
4964
4965		else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
4966			next_state = BNX2X_Q_STATE_MULTI_COS;
4967			next_tx_only = 1;
4968		}
4969
4970		else if (cmd == BNX2X_Q_CMD_HALT)
4971			next_state = BNX2X_Q_STATE_STOPPED;
4972
4973		else if (cmd == BNX2X_Q_CMD_UPDATE) {
4974			/* If "active" state change is requested, update the
4975			 *  state accordingly.
4976			 */
4977			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
4978				     &update_params->update_flags) &&
4979			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
4980				      &update_params->update_flags))
4981				next_state = BNX2X_Q_STATE_INACTIVE;
4982			else
4983				next_state = BNX2X_Q_STATE_ACTIVE;
4984		}
4985
4986		break;
4987	case BNX2X_Q_STATE_MULTI_COS:
4988		if (cmd == BNX2X_Q_CMD_TERMINATE)
4989			next_state = BNX2X_Q_STATE_MCOS_TERMINATED;
4990
4991		else if (cmd == BNX2X_Q_CMD_SETUP_TX_ONLY) {
4992			next_state = BNX2X_Q_STATE_MULTI_COS;
4993			next_tx_only = o->num_tx_only + 1;
4994		}
4995
4996		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
4997			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4998			next_state = BNX2X_Q_STATE_MULTI_COS;
4999
5000		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5001			/* If "active" state change is requested, update the
5002			 *  state accordingly.
5003			 */
5004			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5005				     &update_params->update_flags) &&
5006			    !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5007				      &update_params->update_flags))
5008				next_state = BNX2X_Q_STATE_INACTIVE;
5009			else
5010				next_state = BNX2X_Q_STATE_MULTI_COS;
5011		}
5012
5013		break;
5014	case BNX2X_Q_STATE_MCOS_TERMINATED:
5015		if (cmd == BNX2X_Q_CMD_CFC_DEL) {
5016			next_tx_only = o->num_tx_only - 1;
5017			if (next_tx_only == 0)
5018				next_state = BNX2X_Q_STATE_ACTIVE;
5019			else
5020				next_state = BNX2X_Q_STATE_MULTI_COS;
5021		}
5022
5023		break;
5024	case BNX2X_Q_STATE_INACTIVE:
5025		if (cmd == BNX2X_Q_CMD_ACTIVATE)
5026			next_state = BNX2X_Q_STATE_ACTIVE;
5027
5028		else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
5029			 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
5030			next_state = BNX2X_Q_STATE_INACTIVE;
5031
5032		else if (cmd == BNX2X_Q_CMD_HALT)
5033			next_state = BNX2X_Q_STATE_STOPPED;
5034
5035		else if (cmd == BNX2X_Q_CMD_UPDATE) {
5036			/* If "active" state change is requested, update the
5037			 * state accordingly.
5038			 */
5039			if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
5040				     &update_params->update_flags) &&
5041			    test_bit(BNX2X_Q_UPDATE_ACTIVATE,
5042				     &update_params->update_flags)){
5043				if (o->num_tx_only == 0)
5044					next_state = BNX2X_Q_STATE_ACTIVE;
5045				else /* tx only queues exist for this queue */
5046					next_state = BNX2X_Q_STATE_MULTI_COS;
5047			} else
5048				next_state = BNX2X_Q_STATE_INACTIVE;
5049		}
5050
5051		break;
5052	case BNX2X_Q_STATE_STOPPED:
5053		if (cmd == BNX2X_Q_CMD_TERMINATE)
5054			next_state = BNX2X_Q_STATE_TERMINATED;
5055
5056		break;
5057	case BNX2X_Q_STATE_TERMINATED:
5058		if (cmd == BNX2X_Q_CMD_CFC_DEL)
5059			next_state = BNX2X_Q_STATE_RESET;
5060
5061		break;
5062	default:
5063		BNX2X_ERR("Illegal state: %d\n", state);
5064	}
5065
5066	/* Transition is assured */
5067	if (next_state != BNX2X_Q_STATE_MAX) {
5068		DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
5069				 state, cmd, next_state);
5070		o->next_state = next_state;
5071		o->next_tx_only = next_tx_only;
5072		return 0;
5073	}
5074
5075	DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
5076
5077	return -EINVAL;
5078}
5079
5080void bnx2x_init_queue_obj(struct bnx2x *bp,
5081			  struct bnx2x_queue_sp_obj *obj,
5082			  u8 cl_id, u32 *cids, u8 cid_cnt, u8 func_id,
5083			  void *rdata,
5084			  dma_addr_t rdata_mapping, unsigned long type)
5085{
5086	memset(obj, 0, sizeof(*obj));
5087
5088	/* We support only BNX2X_MULTI_TX_COS Tx CoS at the moment */
5089	BUG_ON(BNX2X_MULTI_TX_COS < cid_cnt);
5090
5091	memcpy(obj->cids, cids, sizeof(obj->cids[0]) * cid_cnt);
5092	obj->max_cos = cid_cnt;
5093	obj->cl_id = cl_id;
5094	obj->func_id = func_id;
5095	obj->rdata = rdata;
5096	obj->rdata_mapping = rdata_mapping;
5097	obj->type = type;
5098	obj->next_state = BNX2X_Q_STATE_MAX;
5099
5100	if (CHIP_IS_E1x(bp))
5101		obj->send_cmd = bnx2x_queue_send_cmd_e1x;
5102	else
5103		obj->send_cmd = bnx2x_queue_send_cmd_e2;
5104
5105	obj->check_transition = bnx2x_queue_chk_transition;
5106
5107	obj->complete_cmd = bnx2x_queue_comp_cmd;
5108	obj->wait_comp = bnx2x_queue_wait_comp;
5109	obj->set_pending = bnx2x_queue_set_pending;
5110}
5111
5112void bnx2x_queue_set_cos_cid(struct bnx2x *bp,
5113			     struct bnx2x_queue_sp_obj *obj,
5114			     u32 cid, u8 index)
5115{
5116	obj->cids[index] = cid;
5117}
5118
5119/********************** Function state object *********************************/
5120enum bnx2x_func_state bnx2x_func_get_state(struct bnx2x *bp,
5121					   struct bnx2x_func_sp_obj *o)
5122{
5123	/* in the middle of transaction - return INVALID state */
5124	if (o->pending)
5125		return BNX2X_F_STATE_MAX;
5126
5127	/*
5128	 * unsure the order of reading of o->pending and o->state
5129	 * o->pending should be read first
5130	 */
5131	rmb();
5132
5133	return o->state;
5134}
5135
5136static int bnx2x_func_wait_comp(struct bnx2x *bp,
5137				struct bnx2x_func_sp_obj *o,
5138				enum bnx2x_func_cmd cmd)
5139{
5140	return bnx2x_state_wait(bp, cmd, &o->pending);
5141}
5142
5143/**
5144 * bnx2x_func_state_change_comp - complete the state machine transition
5145 *
5146 * @bp:		device handle
5147 * @o:
5148 * @cmd:
5149 *
5150 * Called on state change transition. Completes the state
5151 * machine transition only - no HW interaction.
5152 */
5153static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
5154					       struct bnx2x_func_sp_obj *o,
5155					       enum bnx2x_func_cmd cmd)
5156{
5157	unsigned long cur_pending = o->pending;
5158
5159	if (!test_and_clear_bit(cmd, &cur_pending)) {
5160		BNX2X_ERR("Bad MC reply %d for func %d in state %d "
5161			  "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
5162			  o->state, cur_pending, o->next_state);
5163		return -EINVAL;
5164	}
5165
5166	DP(BNX2X_MSG_SP,
5167	   "Completing command %d for func %d, setting state to %d\n",
5168	   cmd, BP_FUNC(bp), o->next_state);
5169
5170	o->state = o->next_state;
5171	o->next_state = BNX2X_F_STATE_MAX;
5172
5173	/* It's important that o->state and o->next_state are
5174	 * updated before o->pending.
5175	 */
5176	wmb();
5177
5178	clear_bit(cmd, &o->pending);
5179	smp_mb__after_clear_bit();
5180
5181	return 0;
5182}
5183
5184/**
5185 * bnx2x_func_comp_cmd - complete the state change command
5186 *
5187 * @bp:		device handle
5188 * @o:
5189 * @cmd:
5190 *
5191 * Checks that the arrived completion is expected.
5192 */
5193static int bnx2x_func_comp_cmd(struct bnx2x *bp,
5194			       struct bnx2x_func_sp_obj *o,
5195			       enum bnx2x_func_cmd cmd)
5196{
5197	/* Complete the state machine part first, check if it's a
5198	 * legal completion.
5199	 */
5200	int rc = bnx2x_func_state_change_comp(bp, o, cmd);
5201	return rc;
5202}
5203
5204/**
5205 * bnx2x_func_chk_transition - perform function state machine transition
5206 *
5207 * @bp:		device handle
5208 * @o:
5209 * @params:
5210 *
5211 * It both checks if the requested command is legal in a current
5212 * state and, if it's legal, sets a `next_state' in the object
5213 * that will be used in the completion flow to set the `state'
5214 * of the object.
5215 *
5216 * returns 0 if a requested command is a legal transition,
5217 *         -EINVAL otherwise.
5218 */
5219static int bnx2x_func_chk_transition(struct bnx2x *bp,
5220				     struct bnx2x_func_sp_obj *o,
5221				     struct bnx2x_func_state_params *params)
5222{
5223	enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
5224	enum bnx2x_func_cmd cmd = params->cmd;
5225
5226	/*
5227	 * Forget all pending for completion commands if a driver only state
5228	 * transition has been requested.
5229	 */
5230	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5231		o->pending = 0;
5232		o->next_state = BNX2X_F_STATE_MAX;
5233	}
5234
5235	/*
5236	 * Don't allow a next state transition if we are in the middle of
5237	 * the previous one.
5238	 */
5239	if (o->pending)
5240		return -EBUSY;
5241
5242	switch (state) {
5243	case BNX2X_F_STATE_RESET:
5244		if (cmd == BNX2X_F_CMD_HW_INIT)
5245			next_state = BNX2X_F_STATE_INITIALIZED;
5246
5247		break;
5248	case BNX2X_F_STATE_INITIALIZED:
5249		if (cmd == BNX2X_F_CMD_START)
5250			next_state = BNX2X_F_STATE_STARTED;
5251
5252		else if (cmd == BNX2X_F_CMD_HW_RESET)
5253			next_state = BNX2X_F_STATE_RESET;
5254
5255		break;
5256	case BNX2X_F_STATE_STARTED:
5257		if (cmd == BNX2X_F_CMD_STOP)
5258			next_state = BNX2X_F_STATE_INITIALIZED;
5259		else if (cmd == BNX2X_F_CMD_TX_STOP)
5260			next_state = BNX2X_F_STATE_TX_STOPPED;
5261
5262		break;
5263	case BNX2X_F_STATE_TX_STOPPED:
5264		if (cmd == BNX2X_F_CMD_TX_START)
5265			next_state = BNX2X_F_STATE_STARTED;
5266
5267		break;
5268	default:
5269		BNX2X_ERR("Unknown state: %d\n", state);
5270	}
5271
5272	/* Transition is assured */
5273	if (next_state != BNX2X_F_STATE_MAX) {
5274		DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
5275				 state, cmd, next_state);
5276		o->next_state = next_state;
5277		return 0;
5278	}
5279
5280	DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
5281			 state, cmd);
5282
5283	return -EINVAL;
5284}
5285
5286/**
5287 * bnx2x_func_init_func - performs HW init at function stage
5288 *
5289 * @bp:		device handle
5290 * @drv:
5291 *
5292 * Init HW when the current phase is
5293 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
5294 * HW blocks.
5295 */
5296static inline int bnx2x_func_init_func(struct bnx2x *bp,
5297				       const struct bnx2x_func_sp_drv_ops *drv)
5298{
5299	return drv->init_hw_func(bp);
5300}
5301
5302/**
5303 * bnx2x_func_init_port - performs HW init at port stage
5304 *
5305 * @bp:		device handle
5306 * @drv:
5307 *
5308 * Init HW when the current phase is
5309 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
5310 * FUNCTION-only HW blocks.
5311 *
5312 */
5313static inline int bnx2x_func_init_port(struct bnx2x *bp,
5314				       const struct bnx2x_func_sp_drv_ops *drv)
5315{
5316	int rc = drv->init_hw_port(bp);
5317	if (rc)
5318		return rc;
5319
5320	return bnx2x_func_init_func(bp, drv);
5321}
5322
5323/**
5324 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5325 *
5326 * @bp:		device handle
5327 * @drv:
5328 *
5329 * Init HW when the current phase is
5330 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5331 * PORT-only and FUNCTION-only HW blocks.
5332 */
5333static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5334					const struct bnx2x_func_sp_drv_ops *drv)
5335{
5336	int rc = drv->init_hw_cmn_chip(bp);
5337	if (rc)
5338		return rc;
5339
5340	return bnx2x_func_init_port(bp, drv);
5341}
5342
5343/**
5344 * bnx2x_func_init_cmn - performs HW init at common stage
5345 *
5346 * @bp:		device handle
5347 * @drv:
5348 *
5349 * Init HW when the current phase is
5350 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5351 * PORT-only and FUNCTION-only HW blocks.
5352 */
5353static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5354				      const struct bnx2x_func_sp_drv_ops *drv)
5355{
5356	int rc = drv->init_hw_cmn(bp);
5357	if (rc)
5358		return rc;
5359
5360	return bnx2x_func_init_port(bp, drv);
5361}
5362
5363static int bnx2x_func_hw_init(struct bnx2x *bp,
5364			      struct bnx2x_func_state_params *params)
5365{
5366	u32 load_code = params->params.hw_init.load_phase;
5367	struct bnx2x_func_sp_obj *o = params->f_obj;
5368	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5369	int rc = 0;
5370
5371	DP(BNX2X_MSG_SP, "function %d  load_code %x\n",
5372			 BP_ABS_FUNC(bp), load_code);
5373
5374	/* Prepare buffers for unzipping the FW */
5375	rc = drv->gunzip_init(bp);
5376	if (rc)
5377		return rc;
5378
5379	/* Prepare FW */
5380	rc = drv->init_fw(bp);
5381	if (rc) {
5382		BNX2X_ERR("Error loading firmware\n");
5383		goto init_err;
5384	}
5385
5386	/* Handle the beginning of COMMON_XXX pases separatelly... */
5387	switch (load_code) {
5388	case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5389		rc = bnx2x_func_init_cmn_chip(bp, drv);
5390		if (rc)
5391			goto init_err;
5392
5393		break;
5394	case FW_MSG_CODE_DRV_LOAD_COMMON:
5395		rc = bnx2x_func_init_cmn(bp, drv);
5396		if (rc)
5397			goto init_err;
5398
5399		break;
5400	case FW_MSG_CODE_DRV_LOAD_PORT:
5401		rc = bnx2x_func_init_port(bp, drv);
5402		if (rc)
5403			goto init_err;
5404
5405		break;
5406	case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5407		rc = bnx2x_func_init_func(bp, drv);
5408		if (rc)
5409			goto init_err;
5410
5411		break;
5412	default:
5413		BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5414		rc = -EINVAL;
5415	}
5416
5417init_err:
5418	drv->gunzip_end(bp);
5419
5420	/* In case of success, complete the comand immediatelly: no ramrods
5421	 * have been sent.
5422	 */
5423	if (!rc)
5424		o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5425
5426	return rc;
5427}
5428
5429/**
5430 * bnx2x_func_reset_func - reset HW at function stage
5431 *
5432 * @bp:		device handle
5433 * @drv:
5434 *
5435 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5436 * FUNCTION-only HW blocks.
5437 */
5438static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5439					const struct bnx2x_func_sp_drv_ops *drv)
5440{
5441	drv->reset_hw_func(bp);
5442}
5443
5444/**
5445 * bnx2x_func_reset_port - reser HW at port stage
5446 *
5447 * @bp:		device handle
5448 * @drv:
5449 *
5450 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5451 * FUNCTION-only and PORT-only HW blocks.
5452 *
5453 *                 !!!IMPORTANT!!!
5454 *
5455 * It's important to call reset_port before reset_func() as the last thing
5456 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5457 * makes impossible any DMAE transactions.
5458 */
5459static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5460					const struct bnx2x_func_sp_drv_ops *drv)
5461{
5462	drv->reset_hw_port(bp);
5463	bnx2x_func_reset_func(bp, drv);
5464}
5465
5466/**
5467 * bnx2x_func_reset_cmn - reser HW at common stage
5468 *
5469 * @bp:		device handle
5470 * @drv:
5471 *
5472 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5473 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5474 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5475 */
5476static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5477					const struct bnx2x_func_sp_drv_ops *drv)
5478{
5479	bnx2x_func_reset_port(bp, drv);
5480	drv->reset_hw_cmn(bp);
5481}
5482
5483
5484static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5485				      struct bnx2x_func_state_params *params)
5486{
5487	u32 reset_phase = params->params.hw_reset.reset_phase;
5488	struct bnx2x_func_sp_obj *o = params->f_obj;
5489	const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5490
5491	DP(BNX2X_MSG_SP, "function %d  reset_phase %x\n", BP_ABS_FUNC(bp),
5492			 reset_phase);
5493
5494	switch (reset_phase) {
5495	case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5496		bnx2x_func_reset_cmn(bp, drv);
5497		break;
5498	case FW_MSG_CODE_DRV_UNLOAD_PORT:
5499		bnx2x_func_reset_port(bp, drv);
5500		break;
5501	case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5502		bnx2x_func_reset_func(bp, drv);
5503		break;
5504	default:
5505		BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5506			   reset_phase);
5507		break;
5508	}
5509
5510	/* Complete the comand immediatelly: no ramrods have been sent. */
5511	o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5512
5513	return 0;
5514}
5515
5516static inline int bnx2x_func_send_start(struct bnx2x *bp,
5517					struct bnx2x_func_state_params *params)
5518{
5519	struct bnx2x_func_sp_obj *o = params->f_obj;
5520	struct function_start_data *rdata =
5521		(struct function_start_data *)o->rdata;
5522	dma_addr_t data_mapping = o->rdata_mapping;
5523	struct bnx2x_func_start_params *start_params = &params->params.start;
5524
5525	memset(rdata, 0, sizeof(*rdata));
5526
5527	/* Fill the ramrod data with provided parameters */
5528	rdata->function_mode = cpu_to_le16(start_params->mf_mode);
5529	rdata->sd_vlan_tag   = start_params->sd_vlan_tag;
5530	rdata->path_id       = BP_PATH(bp);
5531	rdata->network_cos_mode = start_params->network_cos_mode;
5532
5533	/*
5534	 *  No need for an explicit memory barrier here as long we would
5535	 *  need to ensure the ordering of writing to the SPQ element
5536	 *  and updating of the SPQ producer which involves a memory
5537	 *  read and we will have to put a full memory barrier there
5538	 *  (inside bnx2x_sp_post()).
5539	 */
5540
5541	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5542			     U64_HI(data_mapping),
5543			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5544}
5545
5546static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5547				       struct bnx2x_func_state_params *params)
5548{
5549	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5550			     NONE_CONNECTION_TYPE);
5551}
5552
5553static inline int bnx2x_func_send_tx_stop(struct bnx2x *bp,
5554				       struct bnx2x_func_state_params *params)
5555{
5556	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC, 0, 0, 0,
5557			     NONE_CONNECTION_TYPE);
5558}
5559static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
5560				       struct bnx2x_func_state_params *params)
5561{
5562	struct bnx2x_func_sp_obj *o = params->f_obj;
5563	struct flow_control_configuration *rdata =
5564		(struct flow_control_configuration *)o->rdata;
5565	dma_addr_t data_mapping = o->rdata_mapping;
5566	struct bnx2x_func_tx_start_params *tx_start_params =
5567		&params->params.tx_start;
5568	int i;
5569
5570	memset(rdata, 0, sizeof(*rdata));
5571
5572	rdata->dcb_enabled = tx_start_params->dcb_enabled;
5573	rdata->dcb_version = tx_start_params->dcb_version;
5574	rdata->dont_add_pri_0_en = tx_start_params->dont_add_pri_0_en;
5575
5576	for (i = 0; i < ARRAY_SIZE(rdata->traffic_type_to_priority_cos); i++)
5577		rdata->traffic_type_to_priority_cos[i] =
5578			tx_start_params->traffic_type_to_priority_cos[i];
5579
5580	return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
5581			     U64_HI(data_mapping),
5582			     U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5583}
5584
5585static int bnx2x_func_send_cmd(struct bnx2x *bp,
5586			       struct bnx2x_func_state_params *params)
5587{
5588	switch (params->cmd) {
5589	case BNX2X_F_CMD_HW_INIT:
5590		return bnx2x_func_hw_init(bp, params);
5591	case BNX2X_F_CMD_START:
5592		return bnx2x_func_send_start(bp, params);
5593	case BNX2X_F_CMD_STOP:
5594		return bnx2x_func_send_stop(bp, params);
5595	case BNX2X_F_CMD_HW_RESET:
5596		return bnx2x_func_hw_reset(bp, params);
5597	case BNX2X_F_CMD_TX_STOP:
5598		return bnx2x_func_send_tx_stop(bp, params);
5599	case BNX2X_F_CMD_TX_START:
5600		return bnx2x_func_send_tx_start(bp, params);
5601	default:
5602		BNX2X_ERR("Unknown command: %d\n", params->cmd);
5603		return -EINVAL;
5604	}
5605}
5606
5607void bnx2x_init_func_obj(struct bnx2x *bp,
5608			 struct bnx2x_func_sp_obj *obj,
5609			 void *rdata, dma_addr_t rdata_mapping,
5610			 struct bnx2x_func_sp_drv_ops *drv_iface)
5611{
5612	memset(obj, 0, sizeof(*obj));
5613
5614	mutex_init(&obj->one_pending_mutex);
5615
5616	obj->rdata = rdata;
5617	obj->rdata_mapping = rdata_mapping;
5618
5619	obj->send_cmd = bnx2x_func_send_cmd;
5620	obj->check_transition = bnx2x_func_chk_transition;
5621	obj->complete_cmd = bnx2x_func_comp_cmd;
5622	obj->wait_comp = bnx2x_func_wait_comp;
5623
5624	obj->drv = drv_iface;
5625}
5626
5627/**
5628 * bnx2x_func_state_change - perform Function state change transition
5629 *
5630 * @bp:		device handle
5631 * @params:	parameters to perform the transaction
5632 *
5633 * returns 0 in case of successfully completed transition,
5634 *         negative error code in case of failure, positive
5635 *         (EBUSY) value if there is a completion to that is
5636 *         still pending (possible only if RAMROD_COMP_WAIT is
5637 *         not set in params->ramrod_flags for asynchronous
5638 *         commands).
5639 */
5640int bnx2x_func_state_change(struct bnx2x *bp,
5641			    struct bnx2x_func_state_params *params)
5642{
5643	struct bnx2x_func_sp_obj *o = params->f_obj;
5644	int rc;
5645	enum bnx2x_func_cmd cmd = params->cmd;
5646	unsigned long *pending = &o->pending;
5647
5648	mutex_lock(&o->one_pending_mutex);
5649
5650	/* Check that the requested transition is legal */
5651	if (o->check_transition(bp, o, params)) {
5652		mutex_unlock(&o->one_pending_mutex);
5653		return -EINVAL;
5654	}
5655
5656	/* Set "pending" bit */
5657	set_bit(cmd, pending);
5658
5659	/* Don't send a command if only driver cleanup was requested */
5660	if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5661		bnx2x_func_state_change_comp(bp, o, cmd);
5662		mutex_unlock(&o->one_pending_mutex);
5663	} else {
5664		/* Send a ramrod */
5665		rc = o->send_cmd(bp, params);
5666
5667		mutex_unlock(&o->one_pending_mutex);
5668
5669		if (rc) {
5670			o->next_state = BNX2X_F_STATE_MAX;
5671			clear_bit(cmd, pending);
5672			smp_mb__after_clear_bit();
5673			return rc;
5674		}
5675
5676		if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5677			rc = o->wait_comp(bp, o, cmd);
5678			if (rc)
5679				return rc;
5680
5681			return 0;
5682		}
5683	}
5684
5685	return !!test_bit(cmd, pending);
5686}
5687