1/*
2 * Copyright (c) 2008-2011, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, see <http://www.gnu.org/licenses/>.
15 *
16 * Author: Lucy Liu <lucy.liu@intel.com>
17 */
18
19#include <linux/netdevice.h>
20#include <linux/netlink.h>
21#include <linux/slab.h>
22#include <net/netlink.h>
23#include <net/rtnetlink.h>
24#include <linux/dcbnl.h>
25#include <net/dcbevent.h>
26#include <linux/rtnetlink.h>
27#include <linux/module.h>
28#include <net/sock.h>
29
30/* Data Center Bridging (DCB) is a collection of Ethernet enhancements
31 * intended to allow network traffic with differing requirements
32 * (highly reliable, no drops vs. best effort vs. low latency) to operate
33 * and co-exist on Ethernet.  Current DCB features are:
34 *
35 * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
36 *   framework for assigning bandwidth guarantees to traffic classes.
37 *
38 * Priority-based Flow Control (PFC) - provides a flow control mechanism which
39 *   can work independently for each 802.1p priority.
40 *
41 * Congestion Notification - provides a mechanism for end-to-end congestion
42 *   control for protocols which do not have built-in congestion management.
43 *
44 * More information about the emerging standards for these Ethernet features
45 * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
46 *
47 * This file implements an rtnetlink interface to allow configuration of DCB
48 * features for capable devices.
49 */
50
51MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
52MODULE_DESCRIPTION("Data Center Bridging netlink interface");
53MODULE_LICENSE("GPL");
54
55/**************** DCB attribute policies *************************************/
56
57/* DCB netlink attributes policy */
58static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
59	[DCB_ATTR_IFNAME]      = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
60	[DCB_ATTR_STATE]       = {.type = NLA_U8},
61	[DCB_ATTR_PFC_CFG]     = {.type = NLA_NESTED},
62	[DCB_ATTR_PG_CFG]      = {.type = NLA_NESTED},
63	[DCB_ATTR_SET_ALL]     = {.type = NLA_U8},
64	[DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
65	[DCB_ATTR_CAP]         = {.type = NLA_NESTED},
66	[DCB_ATTR_PFC_STATE]   = {.type = NLA_U8},
67	[DCB_ATTR_BCN]         = {.type = NLA_NESTED},
68	[DCB_ATTR_APP]         = {.type = NLA_NESTED},
69	[DCB_ATTR_IEEE]	       = {.type = NLA_NESTED},
70	[DCB_ATTR_DCBX]        = {.type = NLA_U8},
71	[DCB_ATTR_FEATCFG]     = {.type = NLA_NESTED},
72};
73
74/* DCB priority flow control to User Priority nested attributes */
75static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
76	[DCB_PFC_UP_ATTR_0]   = {.type = NLA_U8},
77	[DCB_PFC_UP_ATTR_1]   = {.type = NLA_U8},
78	[DCB_PFC_UP_ATTR_2]   = {.type = NLA_U8},
79	[DCB_PFC_UP_ATTR_3]   = {.type = NLA_U8},
80	[DCB_PFC_UP_ATTR_4]   = {.type = NLA_U8},
81	[DCB_PFC_UP_ATTR_5]   = {.type = NLA_U8},
82	[DCB_PFC_UP_ATTR_6]   = {.type = NLA_U8},
83	[DCB_PFC_UP_ATTR_7]   = {.type = NLA_U8},
84	[DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
85};
86
87/* DCB priority grouping nested attributes */
88static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
89	[DCB_PG_ATTR_TC_0]      = {.type = NLA_NESTED},
90	[DCB_PG_ATTR_TC_1]      = {.type = NLA_NESTED},
91	[DCB_PG_ATTR_TC_2]      = {.type = NLA_NESTED},
92	[DCB_PG_ATTR_TC_3]      = {.type = NLA_NESTED},
93	[DCB_PG_ATTR_TC_4]      = {.type = NLA_NESTED},
94	[DCB_PG_ATTR_TC_5]      = {.type = NLA_NESTED},
95	[DCB_PG_ATTR_TC_6]      = {.type = NLA_NESTED},
96	[DCB_PG_ATTR_TC_7]      = {.type = NLA_NESTED},
97	[DCB_PG_ATTR_TC_ALL]    = {.type = NLA_NESTED},
98	[DCB_PG_ATTR_BW_ID_0]   = {.type = NLA_U8},
99	[DCB_PG_ATTR_BW_ID_1]   = {.type = NLA_U8},
100	[DCB_PG_ATTR_BW_ID_2]   = {.type = NLA_U8},
101	[DCB_PG_ATTR_BW_ID_3]   = {.type = NLA_U8},
102	[DCB_PG_ATTR_BW_ID_4]   = {.type = NLA_U8},
103	[DCB_PG_ATTR_BW_ID_5]   = {.type = NLA_U8},
104	[DCB_PG_ATTR_BW_ID_6]   = {.type = NLA_U8},
105	[DCB_PG_ATTR_BW_ID_7]   = {.type = NLA_U8},
106	[DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
107};
108
109/* DCB traffic class nested attributes. */
110static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
111	[DCB_TC_ATTR_PARAM_PGID]            = {.type = NLA_U8},
112	[DCB_TC_ATTR_PARAM_UP_MAPPING]      = {.type = NLA_U8},
113	[DCB_TC_ATTR_PARAM_STRICT_PRIO]     = {.type = NLA_U8},
114	[DCB_TC_ATTR_PARAM_BW_PCT]          = {.type = NLA_U8},
115	[DCB_TC_ATTR_PARAM_ALL]             = {.type = NLA_FLAG},
116};
117
118/* DCB capabilities nested attributes. */
119static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
120	[DCB_CAP_ATTR_ALL]     = {.type = NLA_FLAG},
121	[DCB_CAP_ATTR_PG]      = {.type = NLA_U8},
122	[DCB_CAP_ATTR_PFC]     = {.type = NLA_U8},
123	[DCB_CAP_ATTR_UP2TC]   = {.type = NLA_U8},
124	[DCB_CAP_ATTR_PG_TCS]  = {.type = NLA_U8},
125	[DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
126	[DCB_CAP_ATTR_GSP]     = {.type = NLA_U8},
127	[DCB_CAP_ATTR_BCN]     = {.type = NLA_U8},
128	[DCB_CAP_ATTR_DCBX]    = {.type = NLA_U8},
129};
130
131/* DCB capabilities nested attributes. */
132static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
133	[DCB_NUMTCS_ATTR_ALL]     = {.type = NLA_FLAG},
134	[DCB_NUMTCS_ATTR_PG]      = {.type = NLA_U8},
135	[DCB_NUMTCS_ATTR_PFC]     = {.type = NLA_U8},
136};
137
138/* DCB BCN nested attributes. */
139static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
140	[DCB_BCN_ATTR_RP_0]         = {.type = NLA_U8},
141	[DCB_BCN_ATTR_RP_1]         = {.type = NLA_U8},
142	[DCB_BCN_ATTR_RP_2]         = {.type = NLA_U8},
143	[DCB_BCN_ATTR_RP_3]         = {.type = NLA_U8},
144	[DCB_BCN_ATTR_RP_4]         = {.type = NLA_U8},
145	[DCB_BCN_ATTR_RP_5]         = {.type = NLA_U8},
146	[DCB_BCN_ATTR_RP_6]         = {.type = NLA_U8},
147	[DCB_BCN_ATTR_RP_7]         = {.type = NLA_U8},
148	[DCB_BCN_ATTR_RP_ALL]       = {.type = NLA_FLAG},
149	[DCB_BCN_ATTR_BCNA_0]       = {.type = NLA_U32},
150	[DCB_BCN_ATTR_BCNA_1]       = {.type = NLA_U32},
151	[DCB_BCN_ATTR_ALPHA]        = {.type = NLA_U32},
152	[DCB_BCN_ATTR_BETA]         = {.type = NLA_U32},
153	[DCB_BCN_ATTR_GD]           = {.type = NLA_U32},
154	[DCB_BCN_ATTR_GI]           = {.type = NLA_U32},
155	[DCB_BCN_ATTR_TMAX]         = {.type = NLA_U32},
156	[DCB_BCN_ATTR_TD]           = {.type = NLA_U32},
157	[DCB_BCN_ATTR_RMIN]         = {.type = NLA_U32},
158	[DCB_BCN_ATTR_W]            = {.type = NLA_U32},
159	[DCB_BCN_ATTR_RD]           = {.type = NLA_U32},
160	[DCB_BCN_ATTR_RU]           = {.type = NLA_U32},
161	[DCB_BCN_ATTR_WRTT]         = {.type = NLA_U32},
162	[DCB_BCN_ATTR_RI]           = {.type = NLA_U32},
163	[DCB_BCN_ATTR_C]            = {.type = NLA_U32},
164	[DCB_BCN_ATTR_ALL]          = {.type = NLA_FLAG},
165};
166
167/* DCB APP nested attributes. */
168static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
169	[DCB_APP_ATTR_IDTYPE]       = {.type = NLA_U8},
170	[DCB_APP_ATTR_ID]           = {.type = NLA_U16},
171	[DCB_APP_ATTR_PRIORITY]     = {.type = NLA_U8},
172};
173
174/* IEEE 802.1Qaz nested attributes. */
175static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
176	[DCB_ATTR_IEEE_ETS]	    = {.len = sizeof(struct ieee_ets)},
177	[DCB_ATTR_IEEE_PFC]	    = {.len = sizeof(struct ieee_pfc)},
178	[DCB_ATTR_IEEE_APP_TABLE]   = {.type = NLA_NESTED},
179	[DCB_ATTR_IEEE_MAXRATE]   = {.len = sizeof(struct ieee_maxrate)},
180};
181
182static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
183	[DCB_ATTR_IEEE_APP]	    = {.len = sizeof(struct dcb_app)},
184};
185
186/* DCB number of traffic classes nested attributes. */
187static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
188	[DCB_FEATCFG_ATTR_ALL]      = {.type = NLA_FLAG},
189	[DCB_FEATCFG_ATTR_PG]       = {.type = NLA_U8},
190	[DCB_FEATCFG_ATTR_PFC]      = {.type = NLA_U8},
191	[DCB_FEATCFG_ATTR_APP]      = {.type = NLA_U8},
192};
193
194static LIST_HEAD(dcb_app_list);
195static DEFINE_SPINLOCK(dcb_lock);
196
197static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
198				    u32 flags, struct nlmsghdr **nlhp)
199{
200	struct sk_buff *skb;
201	struct dcbmsg *dcb;
202	struct nlmsghdr *nlh;
203
204	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
205	if (!skb)
206		return NULL;
207
208	nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
209	BUG_ON(!nlh);
210
211	dcb = nlmsg_data(nlh);
212	dcb->dcb_family = AF_UNSPEC;
213	dcb->cmd = cmd;
214	dcb->dcb_pad = 0;
215
216	if (nlhp)
217		*nlhp = nlh;
218
219	return skb;
220}
221
222static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
223			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
224{
225	/* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
226	if (!netdev->dcbnl_ops->getstate)
227		return -EOPNOTSUPP;
228
229	return nla_put_u8(skb, DCB_ATTR_STATE,
230			  netdev->dcbnl_ops->getstate(netdev));
231}
232
233static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
234			   u32 seq, struct nlattr **tb, struct sk_buff *skb)
235{
236	struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
237	u8 value;
238	int ret;
239	int i;
240	int getall = 0;
241
242	if (!tb[DCB_ATTR_PFC_CFG])
243		return -EINVAL;
244
245	if (!netdev->dcbnl_ops->getpfccfg)
246		return -EOPNOTSUPP;
247
248	ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
249	                       tb[DCB_ATTR_PFC_CFG],
250	                       dcbnl_pfc_up_nest);
251	if (ret)
252		return ret;
253
254	nest = nla_nest_start(skb, DCB_ATTR_PFC_CFG);
255	if (!nest)
256		return -EMSGSIZE;
257
258	if (data[DCB_PFC_UP_ATTR_ALL])
259		getall = 1;
260
261	for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
262		if (!getall && !data[i])
263			continue;
264
265		netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
266		                             &value);
267		ret = nla_put_u8(skb, i, value);
268		if (ret) {
269			nla_nest_cancel(skb, nest);
270			return ret;
271		}
272	}
273	nla_nest_end(skb, nest);
274
275	return 0;
276}
277
278static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
279				u32 seq, struct nlattr **tb, struct sk_buff *skb)
280{
281	u8 perm_addr[MAX_ADDR_LEN];
282
283	if (!netdev->dcbnl_ops->getpermhwaddr)
284		return -EOPNOTSUPP;
285
286	memset(perm_addr, 0, sizeof(perm_addr));
287	netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
288
289	return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
290}
291
292static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
293			u32 seq, struct nlattr **tb, struct sk_buff *skb)
294{
295	struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
296	u8 value;
297	int ret;
298	int i;
299	int getall = 0;
300
301	if (!tb[DCB_ATTR_CAP])
302		return -EINVAL;
303
304	if (!netdev->dcbnl_ops->getcap)
305		return -EOPNOTSUPP;
306
307	ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
308	                       dcbnl_cap_nest);
309	if (ret)
310		return ret;
311
312	nest = nla_nest_start(skb, DCB_ATTR_CAP);
313	if (!nest)
314		return -EMSGSIZE;
315
316	if (data[DCB_CAP_ATTR_ALL])
317		getall = 1;
318
319	for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
320		if (!getall && !data[i])
321			continue;
322
323		if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
324			ret = nla_put_u8(skb, i, value);
325			if (ret) {
326				nla_nest_cancel(skb, nest);
327				return ret;
328			}
329		}
330	}
331	nla_nest_end(skb, nest);
332
333	return 0;
334}
335
336static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
337			   u32 seq, struct nlattr **tb, struct sk_buff *skb)
338{
339	struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
340	u8 value;
341	int ret;
342	int i;
343	int getall = 0;
344
345	if (!tb[DCB_ATTR_NUMTCS])
346		return -EINVAL;
347
348	if (!netdev->dcbnl_ops->getnumtcs)
349		return -EOPNOTSUPP;
350
351	ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
352	                       dcbnl_numtcs_nest);
353	if (ret)
354		return ret;
355
356	nest = nla_nest_start(skb, DCB_ATTR_NUMTCS);
357	if (!nest)
358		return -EMSGSIZE;
359
360	if (data[DCB_NUMTCS_ATTR_ALL])
361		getall = 1;
362
363	for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
364		if (!getall && !data[i])
365			continue;
366
367		ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
368		if (!ret) {
369			ret = nla_put_u8(skb, i, value);
370			if (ret) {
371				nla_nest_cancel(skb, nest);
372				return ret;
373			}
374		} else
375			return -EINVAL;
376	}
377	nla_nest_end(skb, nest);
378
379	return 0;
380}
381
382static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
383			   u32 seq, struct nlattr **tb, struct sk_buff *skb)
384{
385	struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
386	int ret;
387	u8 value;
388	int i;
389
390	if (!tb[DCB_ATTR_NUMTCS])
391		return -EINVAL;
392
393	if (!netdev->dcbnl_ops->setnumtcs)
394		return -EOPNOTSUPP;
395
396	ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
397	                       dcbnl_numtcs_nest);
398	if (ret)
399		return ret;
400
401	for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
402		if (data[i] == NULL)
403			continue;
404
405		value = nla_get_u8(data[i]);
406
407		ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
408		if (ret)
409			break;
410	}
411
412	return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
413}
414
415static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
416			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
417{
418	if (!netdev->dcbnl_ops->getpfcstate)
419		return -EOPNOTSUPP;
420
421	return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
422			  netdev->dcbnl_ops->getpfcstate(netdev));
423}
424
425static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
426			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
427{
428	u8 value;
429
430	if (!tb[DCB_ATTR_PFC_STATE])
431		return -EINVAL;
432
433	if (!netdev->dcbnl_ops->setpfcstate)
434		return -EOPNOTSUPP;
435
436	value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
437
438	netdev->dcbnl_ops->setpfcstate(netdev, value);
439
440	return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
441}
442
443static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
444			u32 seq, struct nlattr **tb, struct sk_buff *skb)
445{
446	struct nlattr *app_nest;
447	struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
448	u16 id;
449	u8 up, idtype;
450	int ret;
451
452	if (!tb[DCB_ATTR_APP])
453		return -EINVAL;
454
455	ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
456	                       dcbnl_app_nest);
457	if (ret)
458		return ret;
459
460	/* all must be non-null */
461	if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
462	    (!app_tb[DCB_APP_ATTR_ID]))
463		return -EINVAL;
464
465	/* either by eth type or by socket number */
466	idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
467	if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
468	    (idtype != DCB_APP_IDTYPE_PORTNUM))
469		return -EINVAL;
470
471	id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
472
473	if (netdev->dcbnl_ops->getapp) {
474		ret = netdev->dcbnl_ops->getapp(netdev, idtype, id);
475		if (ret < 0)
476			return ret;
477		else
478			up = ret;
479	} else {
480		struct dcb_app app = {
481					.selector = idtype,
482					.protocol = id,
483				     };
484		up = dcb_getapp(netdev, &app);
485	}
486
487	app_nest = nla_nest_start(skb, DCB_ATTR_APP);
488	if (!app_nest)
489		return -EMSGSIZE;
490
491	ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
492	if (ret)
493		goto out_cancel;
494
495	ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
496	if (ret)
497		goto out_cancel;
498
499	ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
500	if (ret)
501		goto out_cancel;
502
503	nla_nest_end(skb, app_nest);
504
505	return 0;
506
507out_cancel:
508	nla_nest_cancel(skb, app_nest);
509	return ret;
510}
511
512static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
513			u32 seq, struct nlattr **tb, struct sk_buff *skb)
514{
515	int ret;
516	u16 id;
517	u8 up, idtype;
518	struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
519
520	if (!tb[DCB_ATTR_APP])
521		return -EINVAL;
522
523	ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
524	                       dcbnl_app_nest);
525	if (ret)
526		return ret;
527
528	/* all must be non-null */
529	if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
530	    (!app_tb[DCB_APP_ATTR_ID]) ||
531	    (!app_tb[DCB_APP_ATTR_PRIORITY]))
532		return -EINVAL;
533
534	/* either by eth type or by socket number */
535	idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
536	if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
537	    (idtype != DCB_APP_IDTYPE_PORTNUM))
538		return -EINVAL;
539
540	id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
541	up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
542
543	if (netdev->dcbnl_ops->setapp) {
544		ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
545		if (ret < 0)
546			return ret;
547	} else {
548		struct dcb_app app;
549		app.selector = idtype;
550		app.protocol = id;
551		app.priority = up;
552		ret = dcb_setapp(netdev, &app);
553	}
554
555	ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
556	dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
557
558	return ret;
559}
560
561static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
562			     struct nlattr **tb, struct sk_buff *skb, int dir)
563{
564	struct nlattr *pg_nest, *param_nest, *data;
565	struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
566	struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
567	u8 prio, pgid, tc_pct, up_map;
568	int ret;
569	int getall = 0;
570	int i;
571
572	if (!tb[DCB_ATTR_PG_CFG])
573		return -EINVAL;
574
575	if (!netdev->dcbnl_ops->getpgtccfgtx ||
576	    !netdev->dcbnl_ops->getpgtccfgrx ||
577	    !netdev->dcbnl_ops->getpgbwgcfgtx ||
578	    !netdev->dcbnl_ops->getpgbwgcfgrx)
579		return -EOPNOTSUPP;
580
581	ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
582	                       tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
583	if (ret)
584		return ret;
585
586	pg_nest = nla_nest_start(skb, DCB_ATTR_PG_CFG);
587	if (!pg_nest)
588		return -EMSGSIZE;
589
590	if (pg_tb[DCB_PG_ATTR_TC_ALL])
591		getall = 1;
592
593	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
594		if (!getall && !pg_tb[i])
595			continue;
596
597		if (pg_tb[DCB_PG_ATTR_TC_ALL])
598			data = pg_tb[DCB_PG_ATTR_TC_ALL];
599		else
600			data = pg_tb[i];
601		ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
602				       data, dcbnl_tc_param_nest);
603		if (ret)
604			goto err_pg;
605
606		param_nest = nla_nest_start(skb, i);
607		if (!param_nest)
608			goto err_pg;
609
610		pgid = DCB_ATTR_VALUE_UNDEFINED;
611		prio = DCB_ATTR_VALUE_UNDEFINED;
612		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
613		up_map = DCB_ATTR_VALUE_UNDEFINED;
614
615		if (dir) {
616			/* Rx */
617			netdev->dcbnl_ops->getpgtccfgrx(netdev,
618						i - DCB_PG_ATTR_TC_0, &prio,
619						&pgid, &tc_pct, &up_map);
620		} else {
621			/* Tx */
622			netdev->dcbnl_ops->getpgtccfgtx(netdev,
623						i - DCB_PG_ATTR_TC_0, &prio,
624						&pgid, &tc_pct, &up_map);
625		}
626
627		if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
628		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
629			ret = nla_put_u8(skb,
630			                 DCB_TC_ATTR_PARAM_PGID, pgid);
631			if (ret)
632				goto err_param;
633		}
634		if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
635		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
636			ret = nla_put_u8(skb,
637			                 DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
638			if (ret)
639				goto err_param;
640		}
641		if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
642		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
643			ret = nla_put_u8(skb,
644			                 DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
645			if (ret)
646				goto err_param;
647		}
648		if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
649		    param_tb[DCB_TC_ATTR_PARAM_ALL]) {
650			ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
651			                 tc_pct);
652			if (ret)
653				goto err_param;
654		}
655		nla_nest_end(skb, param_nest);
656	}
657
658	if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
659		getall = 1;
660	else
661		getall = 0;
662
663	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
664		if (!getall && !pg_tb[i])
665			continue;
666
667		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
668
669		if (dir) {
670			/* Rx */
671			netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
672					i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
673		} else {
674			/* Tx */
675			netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
676					i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
677		}
678		ret = nla_put_u8(skb, i, tc_pct);
679		if (ret)
680			goto err_pg;
681	}
682
683	nla_nest_end(skb, pg_nest);
684
685	return 0;
686
687err_param:
688	nla_nest_cancel(skb, param_nest);
689err_pg:
690	nla_nest_cancel(skb, pg_nest);
691
692	return -EMSGSIZE;
693}
694
695static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
696			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
697{
698	return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
699}
700
701static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
702			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
703{
704	return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
705}
706
707static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
708			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
709{
710	u8 value;
711
712	if (!tb[DCB_ATTR_STATE])
713		return -EINVAL;
714
715	if (!netdev->dcbnl_ops->setstate)
716		return -EOPNOTSUPP;
717
718	value = nla_get_u8(tb[DCB_ATTR_STATE]);
719
720	return nla_put_u8(skb, DCB_ATTR_STATE,
721			  netdev->dcbnl_ops->setstate(netdev, value));
722}
723
724static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
725			   u32 seq, struct nlattr **tb, struct sk_buff *skb)
726{
727	struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
728	int i;
729	int ret;
730	u8 value;
731
732	if (!tb[DCB_ATTR_PFC_CFG])
733		return -EINVAL;
734
735	if (!netdev->dcbnl_ops->setpfccfg)
736		return -EOPNOTSUPP;
737
738	ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
739	                       tb[DCB_ATTR_PFC_CFG],
740	                       dcbnl_pfc_up_nest);
741	if (ret)
742		return ret;
743
744	for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
745		if (data[i] == NULL)
746			continue;
747		value = nla_get_u8(data[i]);
748		netdev->dcbnl_ops->setpfccfg(netdev,
749			data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
750	}
751
752	return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
753}
754
755static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
756			u32 seq, struct nlattr **tb, struct sk_buff *skb)
757{
758	int ret;
759
760	if (!tb[DCB_ATTR_SET_ALL])
761		return -EINVAL;
762
763	if (!netdev->dcbnl_ops->setall)
764		return -EOPNOTSUPP;
765
766	ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
767			 netdev->dcbnl_ops->setall(netdev));
768	dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
769
770	return ret;
771}
772
773static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
774			     u32 seq, struct nlattr **tb, struct sk_buff *skb,
775			     int dir)
776{
777	struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
778	struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
779	int ret;
780	int i;
781	u8 pgid;
782	u8 up_map;
783	u8 prio;
784	u8 tc_pct;
785
786	if (!tb[DCB_ATTR_PG_CFG])
787		return -EINVAL;
788
789	if (!netdev->dcbnl_ops->setpgtccfgtx ||
790	    !netdev->dcbnl_ops->setpgtccfgrx ||
791	    !netdev->dcbnl_ops->setpgbwgcfgtx ||
792	    !netdev->dcbnl_ops->setpgbwgcfgrx)
793		return -EOPNOTSUPP;
794
795	ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
796	                       tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
797	if (ret)
798		return ret;
799
800	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
801		if (!pg_tb[i])
802			continue;
803
804		ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
805		                       pg_tb[i], dcbnl_tc_param_nest);
806		if (ret)
807			return ret;
808
809		pgid = DCB_ATTR_VALUE_UNDEFINED;
810		prio = DCB_ATTR_VALUE_UNDEFINED;
811		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
812		up_map = DCB_ATTR_VALUE_UNDEFINED;
813
814		if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
815			prio =
816			    nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
817
818		if (param_tb[DCB_TC_ATTR_PARAM_PGID])
819			pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
820
821		if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
822			tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
823
824		if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
825			up_map =
826			     nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
827
828		/* dir: Tx = 0, Rx = 1 */
829		if (dir) {
830			/* Rx */
831			netdev->dcbnl_ops->setpgtccfgrx(netdev,
832				i - DCB_PG_ATTR_TC_0,
833				prio, pgid, tc_pct, up_map);
834		} else {
835			/* Tx */
836			netdev->dcbnl_ops->setpgtccfgtx(netdev,
837				i - DCB_PG_ATTR_TC_0,
838				prio, pgid, tc_pct, up_map);
839		}
840	}
841
842	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
843		if (!pg_tb[i])
844			continue;
845
846		tc_pct = nla_get_u8(pg_tb[i]);
847
848		/* dir: Tx = 0, Rx = 1 */
849		if (dir) {
850			/* Rx */
851			netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
852					 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
853		} else {
854			/* Tx */
855			netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
856					 i - DCB_PG_ATTR_BW_ID_0, tc_pct);
857		}
858	}
859
860	return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
861}
862
863static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
864			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
865{
866	return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
867}
868
869static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
870			     u32 seq, struct nlattr **tb, struct sk_buff *skb)
871{
872	return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
873}
874
875static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
876			    u32 seq, struct nlattr **tb, struct sk_buff *skb)
877{
878	struct nlattr *bcn_nest;
879	struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
880	u8 value_byte;
881	u32 value_integer;
882	int ret;
883	bool getall = false;
884	int i;
885
886	if (!tb[DCB_ATTR_BCN])
887		return -EINVAL;
888
889	if (!netdev->dcbnl_ops->getbcnrp ||
890	    !netdev->dcbnl_ops->getbcncfg)
891		return -EOPNOTSUPP;
892
893	ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
894	                       tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
895	if (ret)
896		return ret;
897
898	bcn_nest = nla_nest_start(skb, DCB_ATTR_BCN);
899	if (!bcn_nest)
900		return -EMSGSIZE;
901
902	if (bcn_tb[DCB_BCN_ATTR_ALL])
903		getall = true;
904
905	for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
906		if (!getall && !bcn_tb[i])
907			continue;
908
909		netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
910		                            &value_byte);
911		ret = nla_put_u8(skb, i, value_byte);
912		if (ret)
913			goto err_bcn;
914	}
915
916	for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
917		if (!getall && !bcn_tb[i])
918			continue;
919
920		netdev->dcbnl_ops->getbcncfg(netdev, i,
921		                             &value_integer);
922		ret = nla_put_u32(skb, i, value_integer);
923		if (ret)
924			goto err_bcn;
925	}
926
927	nla_nest_end(skb, bcn_nest);
928
929	return 0;
930
931err_bcn:
932	nla_nest_cancel(skb, bcn_nest);
933	return ret;
934}
935
936static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
937			    u32 seq, struct nlattr **tb, struct sk_buff *skb)
938{
939	struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
940	int i;
941	int ret;
942	u8 value_byte;
943	u32 value_int;
944
945	if (!tb[DCB_ATTR_BCN])
946		return -EINVAL;
947
948	if (!netdev->dcbnl_ops->setbcncfg ||
949	    !netdev->dcbnl_ops->setbcnrp)
950		return -EOPNOTSUPP;
951
952	ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
953	                       tb[DCB_ATTR_BCN],
954	                       dcbnl_pfc_up_nest);
955	if (ret)
956		return ret;
957
958	for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
959		if (data[i] == NULL)
960			continue;
961		value_byte = nla_get_u8(data[i]);
962		netdev->dcbnl_ops->setbcnrp(netdev,
963			data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
964	}
965
966	for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
967		if (data[i] == NULL)
968			continue;
969		value_int = nla_get_u32(data[i]);
970		netdev->dcbnl_ops->setbcncfg(netdev,
971	                                     i, value_int);
972	}
973
974	return nla_put_u8(skb, DCB_ATTR_BCN, 0);
975}
976
977static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
978				int app_nested_type, int app_info_type,
979				int app_entry_type)
980{
981	struct dcb_peer_app_info info;
982	struct dcb_app *table = NULL;
983	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
984	u16 app_count;
985	int err;
986
987
988	/**
989	 * retrieve the peer app configuration form the driver. If the driver
990	 * handlers fail exit without doing anything
991	 */
992	err = ops->peer_getappinfo(netdev, &info, &app_count);
993	if (!err && app_count) {
994		table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
995		if (!table)
996			return -ENOMEM;
997
998		err = ops->peer_getapptable(netdev, table);
999	}
1000
1001	if (!err) {
1002		u16 i;
1003		struct nlattr *app;
1004
1005		/**
1006		 * build the message, from here on the only possible failure
1007		 * is due to the skb size
1008		 */
1009		err = -EMSGSIZE;
1010
1011		app = nla_nest_start(skb, app_nested_type);
1012		if (!app)
1013			goto nla_put_failure;
1014
1015		if (app_info_type &&
1016		    nla_put(skb, app_info_type, sizeof(info), &info))
1017			goto nla_put_failure;
1018
1019		for (i = 0; i < app_count; i++) {
1020			if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
1021				    &table[i]))
1022				goto nla_put_failure;
1023		}
1024		nla_nest_end(skb, app);
1025	}
1026	err = 0;
1027
1028nla_put_failure:
1029	kfree(table);
1030	return err;
1031}
1032
1033/* Handle IEEE 802.1Qaz GET commands. */
1034static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1035{
1036	struct nlattr *ieee, *app;
1037	struct dcb_app_type *itr;
1038	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1039	int dcbx;
1040	int err;
1041
1042	if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1043		return -EMSGSIZE;
1044
1045	ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
1046	if (!ieee)
1047		return -EMSGSIZE;
1048
1049	if (ops->ieee_getets) {
1050		struct ieee_ets ets;
1051		memset(&ets, 0, sizeof(ets));
1052		err = ops->ieee_getets(netdev, &ets);
1053		if (!err &&
1054		    nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
1055			return -EMSGSIZE;
1056	}
1057
1058	if (ops->ieee_getmaxrate) {
1059		struct ieee_maxrate maxrate;
1060		memset(&maxrate, 0, sizeof(maxrate));
1061		err = ops->ieee_getmaxrate(netdev, &maxrate);
1062		if (!err) {
1063			err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
1064				      sizeof(maxrate), &maxrate);
1065			if (err)
1066				return -EMSGSIZE;
1067		}
1068	}
1069
1070	if (ops->ieee_getpfc) {
1071		struct ieee_pfc pfc;
1072		memset(&pfc, 0, sizeof(pfc));
1073		err = ops->ieee_getpfc(netdev, &pfc);
1074		if (!err &&
1075		    nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
1076			return -EMSGSIZE;
1077	}
1078
1079	app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
1080	if (!app)
1081		return -EMSGSIZE;
1082
1083	spin_lock_bh(&dcb_lock);
1084	list_for_each_entry(itr, &dcb_app_list, list) {
1085		if (itr->ifindex == netdev->ifindex) {
1086			err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1087					 &itr->app);
1088			if (err) {
1089				spin_unlock_bh(&dcb_lock);
1090				return -EMSGSIZE;
1091			}
1092		}
1093	}
1094
1095	if (netdev->dcbnl_ops->getdcbx)
1096		dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1097	else
1098		dcbx = -EOPNOTSUPP;
1099
1100	spin_unlock_bh(&dcb_lock);
1101	nla_nest_end(skb, app);
1102
1103	/* get peer info if available */
1104	if (ops->ieee_peer_getets) {
1105		struct ieee_ets ets;
1106		memset(&ets, 0, sizeof(ets));
1107		err = ops->ieee_peer_getets(netdev, &ets);
1108		if (!err &&
1109		    nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
1110			return -EMSGSIZE;
1111	}
1112
1113	if (ops->ieee_peer_getpfc) {
1114		struct ieee_pfc pfc;
1115		memset(&pfc, 0, sizeof(pfc));
1116		err = ops->ieee_peer_getpfc(netdev, &pfc);
1117		if (!err &&
1118		    nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
1119			return -EMSGSIZE;
1120	}
1121
1122	if (ops->peer_getappinfo && ops->peer_getapptable) {
1123		err = dcbnl_build_peer_app(netdev, skb,
1124					   DCB_ATTR_IEEE_PEER_APP,
1125					   DCB_ATTR_IEEE_APP_UNSPEC,
1126					   DCB_ATTR_IEEE_APP);
1127		if (err)
1128			return -EMSGSIZE;
1129	}
1130
1131	nla_nest_end(skb, ieee);
1132	if (dcbx >= 0) {
1133		err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1134		if (err)
1135			return -EMSGSIZE;
1136	}
1137
1138	return 0;
1139}
1140
1141static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
1142			     int dir)
1143{
1144	u8 pgid, up_map, prio, tc_pct;
1145	const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1146	int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
1147	struct nlattr *pg = nla_nest_start(skb, i);
1148
1149	if (!pg)
1150		return -EMSGSIZE;
1151
1152	for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
1153		struct nlattr *tc_nest = nla_nest_start(skb, i);
1154
1155		if (!tc_nest)
1156			return -EMSGSIZE;
1157
1158		pgid = DCB_ATTR_VALUE_UNDEFINED;
1159		prio = DCB_ATTR_VALUE_UNDEFINED;
1160		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1161		up_map = DCB_ATTR_VALUE_UNDEFINED;
1162
1163		if (!dir)
1164			ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
1165					  &prio, &pgid, &tc_pct, &up_map);
1166		else
1167			ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
1168					  &prio, &pgid, &tc_pct, &up_map);
1169
1170		if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
1171		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
1172		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
1173		    nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
1174			return -EMSGSIZE;
1175		nla_nest_end(skb, tc_nest);
1176	}
1177
1178	for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
1179		tc_pct = DCB_ATTR_VALUE_UNDEFINED;
1180
1181		if (!dir)
1182			ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
1183					   &tc_pct);
1184		else
1185			ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
1186					   &tc_pct);
1187		if (nla_put_u8(skb, i, tc_pct))
1188			return -EMSGSIZE;
1189	}
1190	nla_nest_end(skb, pg);
1191	return 0;
1192}
1193
1194static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1195{
1196	struct nlattr *cee, *app;
1197	struct dcb_app_type *itr;
1198	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1199	int dcbx, i, err = -EMSGSIZE;
1200	u8 value;
1201
1202	if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
1203		goto nla_put_failure;
1204	cee = nla_nest_start(skb, DCB_ATTR_CEE);
1205	if (!cee)
1206		goto nla_put_failure;
1207
1208	/* local pg */
1209	if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
1210		err = dcbnl_cee_pg_fill(skb, netdev, 1);
1211		if (err)
1212			goto nla_put_failure;
1213	}
1214
1215	if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
1216		err = dcbnl_cee_pg_fill(skb, netdev, 0);
1217		if (err)
1218			goto nla_put_failure;
1219	}
1220
1221	/* local pfc */
1222	if (ops->getpfccfg) {
1223		struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
1224
1225		if (!pfc_nest)
1226			goto nla_put_failure;
1227
1228		for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
1229			ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
1230			if (nla_put_u8(skb, i, value))
1231				goto nla_put_failure;
1232		}
1233		nla_nest_end(skb, pfc_nest);
1234	}
1235
1236	/* local app */
1237	spin_lock_bh(&dcb_lock);
1238	app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
1239	if (!app)
1240		goto dcb_unlock;
1241
1242	list_for_each_entry(itr, &dcb_app_list, list) {
1243		if (itr->ifindex == netdev->ifindex) {
1244			struct nlattr *app_nest = nla_nest_start(skb,
1245								 DCB_ATTR_APP);
1246			if (!app_nest)
1247				goto dcb_unlock;
1248
1249			err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
1250					 itr->app.selector);
1251			if (err)
1252				goto dcb_unlock;
1253
1254			err = nla_put_u16(skb, DCB_APP_ATTR_ID,
1255					  itr->app.protocol);
1256			if (err)
1257				goto dcb_unlock;
1258
1259			err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
1260					 itr->app.priority);
1261			if (err)
1262				goto dcb_unlock;
1263
1264			nla_nest_end(skb, app_nest);
1265		}
1266	}
1267	nla_nest_end(skb, app);
1268
1269	if (netdev->dcbnl_ops->getdcbx)
1270		dcbx = netdev->dcbnl_ops->getdcbx(netdev);
1271	else
1272		dcbx = -EOPNOTSUPP;
1273
1274	spin_unlock_bh(&dcb_lock);
1275
1276	/* features flags */
1277	if (ops->getfeatcfg) {
1278		struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
1279		if (!feat)
1280			goto nla_put_failure;
1281
1282		for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
1283		     i++)
1284			if (!ops->getfeatcfg(netdev, i, &value) &&
1285			    nla_put_u8(skb, i, value))
1286				goto nla_put_failure;
1287
1288		nla_nest_end(skb, feat);
1289	}
1290
1291	/* peer info if available */
1292	if (ops->cee_peer_getpg) {
1293		struct cee_pg pg;
1294		memset(&pg, 0, sizeof(pg));
1295		err = ops->cee_peer_getpg(netdev, &pg);
1296		if (!err &&
1297		    nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
1298			goto nla_put_failure;
1299	}
1300
1301	if (ops->cee_peer_getpfc) {
1302		struct cee_pfc pfc;
1303		memset(&pfc, 0, sizeof(pfc));
1304		err = ops->cee_peer_getpfc(netdev, &pfc);
1305		if (!err &&
1306		    nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
1307			goto nla_put_failure;
1308	}
1309
1310	if (ops->peer_getappinfo && ops->peer_getapptable) {
1311		err = dcbnl_build_peer_app(netdev, skb,
1312					   DCB_ATTR_CEE_PEER_APP_TABLE,
1313					   DCB_ATTR_CEE_PEER_APP_INFO,
1314					   DCB_ATTR_CEE_PEER_APP);
1315		if (err)
1316			goto nla_put_failure;
1317	}
1318	nla_nest_end(skb, cee);
1319
1320	/* DCBX state */
1321	if (dcbx >= 0) {
1322		err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
1323		if (err)
1324			goto nla_put_failure;
1325	}
1326	return 0;
1327
1328dcb_unlock:
1329	spin_unlock_bh(&dcb_lock);
1330nla_put_failure:
1331	return err;
1332}
1333
1334static int dcbnl_notify(struct net_device *dev, int event, int cmd,
1335			u32 seq, u32 portid, int dcbx_ver)
1336{
1337	struct net *net = dev_net(dev);
1338	struct sk_buff *skb;
1339	struct nlmsghdr *nlh;
1340	const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
1341	int err;
1342
1343	if (!ops)
1344		return -EOPNOTSUPP;
1345
1346	skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
1347	if (!skb)
1348		return -ENOBUFS;
1349
1350	if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
1351		err = dcbnl_ieee_fill(skb, dev);
1352	else
1353		err = dcbnl_cee_fill(skb, dev);
1354
1355	if (err < 0) {
1356		/* Report error to broadcast listeners */
1357		nlmsg_free(skb);
1358		rtnl_set_sk_err(net, RTNLGRP_DCB, err);
1359	} else {
1360		/* End nlmsg and notify broadcast listeners */
1361		nlmsg_end(skb, nlh);
1362		rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
1363	}
1364
1365	return err;
1366}
1367
1368int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
1369		      u32 seq, u32 portid)
1370{
1371	return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE);
1372}
1373EXPORT_SYMBOL(dcbnl_ieee_notify);
1374
1375int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
1376		     u32 seq, u32 portid)
1377{
1378	return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE);
1379}
1380EXPORT_SYMBOL(dcbnl_cee_notify);
1381
1382/* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
1383 * be completed the entire msg is aborted and error value is returned.
1384 * No attempt is made to reconcile the case where only part of the
1385 * cmd can be completed.
1386 */
1387static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
1388			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
1389{
1390	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1391	struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1392	int err;
1393
1394	if (!ops)
1395		return -EOPNOTSUPP;
1396
1397	if (!tb[DCB_ATTR_IEEE])
1398		return -EINVAL;
1399
1400	err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1401			       tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1402	if (err)
1403		return err;
1404
1405	if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
1406		struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
1407		err = ops->ieee_setets(netdev, ets);
1408		if (err)
1409			goto err;
1410	}
1411
1412	if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
1413		struct ieee_maxrate *maxrate =
1414			nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
1415		err = ops->ieee_setmaxrate(netdev, maxrate);
1416		if (err)
1417			goto err;
1418	}
1419
1420	if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
1421		struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
1422		err = ops->ieee_setpfc(netdev, pfc);
1423		if (err)
1424			goto err;
1425	}
1426
1427	if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1428		struct nlattr *attr;
1429		int rem;
1430
1431		nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1432			struct dcb_app *app_data;
1433			if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1434				continue;
1435			app_data = nla_data(attr);
1436			if (ops->ieee_setapp)
1437				err = ops->ieee_setapp(netdev, app_data);
1438			else
1439				err = dcb_ieee_setapp(netdev, app_data);
1440			if (err)
1441				goto err;
1442		}
1443	}
1444
1445err:
1446	err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1447	dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
1448	return err;
1449}
1450
1451static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1452			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
1453{
1454	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1455
1456	if (!ops)
1457		return -EOPNOTSUPP;
1458
1459	return dcbnl_ieee_fill(skb, netdev);
1460}
1461
1462static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
1463			  u32 seq, struct nlattr **tb, struct sk_buff *skb)
1464{
1465	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1466	struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
1467	int err;
1468
1469	if (!ops)
1470		return -EOPNOTSUPP;
1471
1472	if (!tb[DCB_ATTR_IEEE])
1473		return -EINVAL;
1474
1475	err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
1476			       tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
1477	if (err)
1478		return err;
1479
1480	if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
1481		struct nlattr *attr;
1482		int rem;
1483
1484		nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
1485			struct dcb_app *app_data;
1486
1487			if (nla_type(attr) != DCB_ATTR_IEEE_APP)
1488				continue;
1489			app_data = nla_data(attr);
1490			if (ops->ieee_delapp)
1491				err = ops->ieee_delapp(netdev, app_data);
1492			else
1493				err = dcb_ieee_delapp(netdev, app_data);
1494			if (err)
1495				goto err;
1496		}
1497	}
1498
1499err:
1500	err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
1501	dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
1502	return err;
1503}
1504
1505
1506/* DCBX configuration */
1507static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1508			 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1509{
1510	if (!netdev->dcbnl_ops->getdcbx)
1511		return -EOPNOTSUPP;
1512
1513	return nla_put_u8(skb, DCB_ATTR_DCBX,
1514			  netdev->dcbnl_ops->getdcbx(netdev));
1515}
1516
1517static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
1518			 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1519{
1520	u8 value;
1521
1522	if (!netdev->dcbnl_ops->setdcbx)
1523		return -EOPNOTSUPP;
1524
1525	if (!tb[DCB_ATTR_DCBX])
1526		return -EINVAL;
1527
1528	value = nla_get_u8(tb[DCB_ATTR_DCBX]);
1529
1530	return nla_put_u8(skb, DCB_ATTR_DCBX,
1531			  netdev->dcbnl_ops->setdcbx(netdev, value));
1532}
1533
1534static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1535			    u32 seq, struct nlattr **tb, struct sk_buff *skb)
1536{
1537	struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
1538	u8 value;
1539	int ret, i;
1540	int getall = 0;
1541
1542	if (!netdev->dcbnl_ops->getfeatcfg)
1543		return -EOPNOTSUPP;
1544
1545	if (!tb[DCB_ATTR_FEATCFG])
1546		return -EINVAL;
1547
1548	ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1549			       dcbnl_featcfg_nest);
1550	if (ret)
1551		return ret;
1552
1553	nest = nla_nest_start(skb, DCB_ATTR_FEATCFG);
1554	if (!nest)
1555		return -EMSGSIZE;
1556
1557	if (data[DCB_FEATCFG_ATTR_ALL])
1558		getall = 1;
1559
1560	for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1561		if (!getall && !data[i])
1562			continue;
1563
1564		ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
1565		if (!ret)
1566			ret = nla_put_u8(skb, i, value);
1567
1568		if (ret) {
1569			nla_nest_cancel(skb, nest);
1570			goto nla_put_failure;
1571		}
1572	}
1573	nla_nest_end(skb, nest);
1574
1575nla_put_failure:
1576	return ret;
1577}
1578
1579static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
1580			    u32 seq, struct nlattr **tb, struct sk_buff *skb)
1581{
1582	struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
1583	int ret, i;
1584	u8 value;
1585
1586	if (!netdev->dcbnl_ops->setfeatcfg)
1587		return -ENOTSUPP;
1588
1589	if (!tb[DCB_ATTR_FEATCFG])
1590		return -EINVAL;
1591
1592	ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
1593			       dcbnl_featcfg_nest);
1594
1595	if (ret)
1596		goto err;
1597
1598	for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
1599		if (data[i] == NULL)
1600			continue;
1601
1602		value = nla_get_u8(data[i]);
1603
1604		ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
1605
1606		if (ret)
1607			goto err;
1608	}
1609err:
1610	ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
1611
1612	return ret;
1613}
1614
1615/* Handle CEE DCBX GET commands. */
1616static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
1617			 u32 seq, struct nlattr **tb, struct sk_buff *skb)
1618{
1619	const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
1620
1621	if (!ops)
1622		return -EOPNOTSUPP;
1623
1624	return dcbnl_cee_fill(skb, netdev);
1625}
1626
1627struct reply_func {
1628	/* reply netlink message type */
1629	int	type;
1630
1631	/* function to fill message contents */
1632	int   (*cb)(struct net_device *, struct nlmsghdr *, u32,
1633		    struct nlattr **, struct sk_buff *);
1634};
1635
1636static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
1637	[DCB_CMD_GSTATE]	= { RTM_GETDCB, dcbnl_getstate },
1638	[DCB_CMD_SSTATE]	= { RTM_SETDCB, dcbnl_setstate },
1639	[DCB_CMD_PFC_GCFG]	= { RTM_GETDCB, dcbnl_getpfccfg },
1640	[DCB_CMD_PFC_SCFG]	= { RTM_SETDCB, dcbnl_setpfccfg },
1641	[DCB_CMD_GPERM_HWADDR]	= { RTM_GETDCB, dcbnl_getperm_hwaddr },
1642	[DCB_CMD_GCAP]		= { RTM_GETDCB, dcbnl_getcap },
1643	[DCB_CMD_GNUMTCS]	= { RTM_GETDCB, dcbnl_getnumtcs },
1644	[DCB_CMD_SNUMTCS]	= { RTM_SETDCB, dcbnl_setnumtcs },
1645	[DCB_CMD_PFC_GSTATE]	= { RTM_GETDCB, dcbnl_getpfcstate },
1646	[DCB_CMD_PFC_SSTATE]	= { RTM_SETDCB, dcbnl_setpfcstate },
1647	[DCB_CMD_GAPP]		= { RTM_GETDCB, dcbnl_getapp },
1648	[DCB_CMD_SAPP]		= { RTM_SETDCB, dcbnl_setapp },
1649	[DCB_CMD_PGTX_GCFG]	= { RTM_GETDCB, dcbnl_pgtx_getcfg },
1650	[DCB_CMD_PGTX_SCFG]	= { RTM_SETDCB, dcbnl_pgtx_setcfg },
1651	[DCB_CMD_PGRX_GCFG]	= { RTM_GETDCB, dcbnl_pgrx_getcfg },
1652	[DCB_CMD_PGRX_SCFG]	= { RTM_SETDCB, dcbnl_pgrx_setcfg },
1653	[DCB_CMD_SET_ALL]	= { RTM_SETDCB, dcbnl_setall },
1654	[DCB_CMD_BCN_GCFG]	= { RTM_GETDCB, dcbnl_bcn_getcfg },
1655	[DCB_CMD_BCN_SCFG]	= { RTM_SETDCB, dcbnl_bcn_setcfg },
1656	[DCB_CMD_IEEE_GET]	= { RTM_GETDCB, dcbnl_ieee_get },
1657	[DCB_CMD_IEEE_SET]	= { RTM_SETDCB, dcbnl_ieee_set },
1658	[DCB_CMD_IEEE_DEL]	= { RTM_SETDCB, dcbnl_ieee_del },
1659	[DCB_CMD_GDCBX]		= { RTM_GETDCB, dcbnl_getdcbx },
1660	[DCB_CMD_SDCBX]		= { RTM_SETDCB, dcbnl_setdcbx },
1661	[DCB_CMD_GFEATCFG]	= { RTM_GETDCB, dcbnl_getfeatcfg },
1662	[DCB_CMD_SFEATCFG]	= { RTM_SETDCB, dcbnl_setfeatcfg },
1663	[DCB_CMD_CEE_GET]	= { RTM_GETDCB, dcbnl_cee_get },
1664};
1665
1666static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
1667{
1668	struct net *net = sock_net(skb->sk);
1669	struct net_device *netdev;
1670	struct dcbmsg *dcb = nlmsg_data(nlh);
1671	struct nlattr *tb[DCB_ATTR_MAX + 1];
1672	u32 portid = skb ? NETLINK_CB(skb).portid : 0;
1673	int ret = -EINVAL;
1674	struct sk_buff *reply_skb;
1675	struct nlmsghdr *reply_nlh = NULL;
1676	const struct reply_func *fn;
1677
1678	if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
1679		return -EPERM;
1680
1681	ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
1682			  dcbnl_rtnl_policy);
1683	if (ret < 0)
1684		return ret;
1685
1686	if (dcb->cmd > DCB_CMD_MAX)
1687		return -EINVAL;
1688
1689	/* check if a reply function has been defined for the command */
1690	fn = &reply_funcs[dcb->cmd];
1691	if (!fn->cb)
1692		return -EOPNOTSUPP;
1693
1694	if (!tb[DCB_ATTR_IFNAME])
1695		return -EINVAL;
1696
1697	netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME]));
1698	if (!netdev)
1699		return -ENODEV;
1700
1701	if (!netdev->dcbnl_ops)
1702		return -EOPNOTSUPP;
1703
1704	reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
1705				 nlh->nlmsg_flags, &reply_nlh);
1706	if (!reply_skb)
1707		return -ENOBUFS;
1708
1709	ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
1710	if (ret < 0) {
1711		nlmsg_free(reply_skb);
1712		goto out;
1713	}
1714
1715	nlmsg_end(reply_skb, reply_nlh);
1716
1717	ret = rtnl_unicast(reply_skb, net, portid);
1718out:
1719	return ret;
1720}
1721
1722static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
1723					   int ifindex, int prio)
1724{
1725	struct dcb_app_type *itr;
1726
1727	list_for_each_entry(itr, &dcb_app_list, list) {
1728		if (itr->app.selector == app->selector &&
1729		    itr->app.protocol == app->protocol &&
1730		    itr->ifindex == ifindex &&
1731		    (!prio || itr->app.priority == prio))
1732			return itr;
1733	}
1734
1735	return NULL;
1736}
1737
1738static int dcb_app_add(const struct dcb_app *app, int ifindex)
1739{
1740	struct dcb_app_type *entry;
1741
1742	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
1743	if (!entry)
1744		return -ENOMEM;
1745
1746	memcpy(&entry->app, app, sizeof(*app));
1747	entry->ifindex = ifindex;
1748	list_add(&entry->list, &dcb_app_list);
1749
1750	return 0;
1751}
1752
1753/**
1754 * dcb_getapp - retrieve the DCBX application user priority
1755 *
1756 * On success returns a non-zero 802.1p user priority bitmap
1757 * otherwise returns 0 as the invalid user priority bitmap to
1758 * indicate an error.
1759 */
1760u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
1761{
1762	struct dcb_app_type *itr;
1763	u8 prio = 0;
1764
1765	spin_lock_bh(&dcb_lock);
1766	if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
1767		prio = itr->app.priority;
1768	spin_unlock_bh(&dcb_lock);
1769
1770	return prio;
1771}
1772EXPORT_SYMBOL(dcb_getapp);
1773
1774/**
1775 * dcb_setapp - add CEE dcb application data to app list
1776 *
1777 * Priority 0 is an invalid priority in CEE spec. This routine
1778 * removes applications from the app list if the priority is
1779 * set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap
1780 */
1781int dcb_setapp(struct net_device *dev, struct dcb_app *new)
1782{
1783	struct dcb_app_type *itr;
1784	struct dcb_app_type event;
1785	int err = 0;
1786
1787	event.ifindex = dev->ifindex;
1788	memcpy(&event.app, new, sizeof(event.app));
1789	if (dev->dcbnl_ops->getdcbx)
1790		event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1791
1792	spin_lock_bh(&dcb_lock);
1793	/* Search for existing match and replace */
1794	if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
1795		if (new->priority)
1796			itr->app.priority = new->priority;
1797		else {
1798			list_del(&itr->list);
1799			kfree(itr);
1800		}
1801		goto out;
1802	}
1803	/* App type does not exist add new application type */
1804	if (new->priority)
1805		err = dcb_app_add(new, dev->ifindex);
1806out:
1807	spin_unlock_bh(&dcb_lock);
1808	if (!err)
1809		call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1810	return err;
1811}
1812EXPORT_SYMBOL(dcb_setapp);
1813
1814/**
1815 * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
1816 *
1817 * Helper routine which on success returns a non-zero 802.1Qaz user
1818 * priority bitmap otherwise returns 0 to indicate the dcb_app was
1819 * not found in APP list.
1820 */
1821u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
1822{
1823	struct dcb_app_type *itr;
1824	u8 prio = 0;
1825
1826	spin_lock_bh(&dcb_lock);
1827	if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
1828		prio |= 1 << itr->app.priority;
1829	spin_unlock_bh(&dcb_lock);
1830
1831	return prio;
1832}
1833EXPORT_SYMBOL(dcb_ieee_getapp_mask);
1834
1835/**
1836 * dcb_ieee_setapp - add IEEE dcb application data to app list
1837 *
1838 * This adds Application data to the list. Multiple application
1839 * entries may exists for the same selector and protocol as long
1840 * as the priorities are different. Priority is expected to be a
1841 * 3-bit unsigned integer
1842 */
1843int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
1844{
1845	struct dcb_app_type event;
1846	int err = 0;
1847
1848	event.ifindex = dev->ifindex;
1849	memcpy(&event.app, new, sizeof(event.app));
1850	if (dev->dcbnl_ops->getdcbx)
1851		event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1852
1853	spin_lock_bh(&dcb_lock);
1854	/* Search for existing match and abort if found */
1855	if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
1856		err = -EEXIST;
1857		goto out;
1858	}
1859
1860	err = dcb_app_add(new, dev->ifindex);
1861out:
1862	spin_unlock_bh(&dcb_lock);
1863	if (!err)
1864		call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1865	return err;
1866}
1867EXPORT_SYMBOL(dcb_ieee_setapp);
1868
1869/**
1870 * dcb_ieee_delapp - delete IEEE dcb application data from list
1871 *
1872 * This removes a matching APP data from the APP list
1873 */
1874int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
1875{
1876	struct dcb_app_type *itr;
1877	struct dcb_app_type event;
1878	int err = -ENOENT;
1879
1880	event.ifindex = dev->ifindex;
1881	memcpy(&event.app, del, sizeof(event.app));
1882	if (dev->dcbnl_ops->getdcbx)
1883		event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1884
1885	spin_lock_bh(&dcb_lock);
1886	/* Search for existing match and remove it. */
1887	if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
1888		list_del(&itr->list);
1889		kfree(itr);
1890		err = 0;
1891	}
1892
1893	spin_unlock_bh(&dcb_lock);
1894	if (!err)
1895		call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1896	return err;
1897}
1898EXPORT_SYMBOL(dcb_ieee_delapp);
1899
1900static void dcb_flushapp(void)
1901{
1902	struct dcb_app_type *app;
1903	struct dcb_app_type *tmp;
1904
1905	spin_lock_bh(&dcb_lock);
1906	list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
1907		list_del(&app->list);
1908		kfree(app);
1909	}
1910	spin_unlock_bh(&dcb_lock);
1911}
1912
1913static int __init dcbnl_init(void)
1914{
1915	INIT_LIST_HEAD(&dcb_app_list);
1916
1917	rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL);
1918	rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL);
1919
1920	return 0;
1921}
1922module_init(dcbnl_init);
1923
1924static void __exit dcbnl_exit(void)
1925{
1926	rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
1927	rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
1928	dcb_flushapp();
1929}
1930module_exit(dcbnl_exit);
1931