dcbnl.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383
  1. /*
  2. * Copyright (c) 2008, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Author: Lucy Liu <lucy.liu@intel.com>
  18. */
  19. #include <linux/netdevice.h>
  20. #include <linux/netlink.h>
  21. #include <linux/slab.h>
  22. #include <net/netlink.h>
  23. #include <net/rtnetlink.h>
  24. #include <linux/dcbnl.h>
  25. #include <linux/rtnetlink.h>
  26. #include <net/sock.h>
  27. /**
  28. * Data Center Bridging (DCB) is a collection of Ethernet enhancements
  29. * intended to allow network traffic with differing requirements
  30. * (highly reliable, no drops vs. best effort vs. low latency) to operate
  31. * and co-exist on Ethernet. Current DCB features are:
  32. *
  33. * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
  34. * framework for assigning bandwidth guarantees to traffic classes.
  35. *
  36. * Priority-based Flow Control (PFC) - provides a flow control mechanism which
  37. * can work independently for each 802.1p priority.
  38. *
  39. * Congestion Notification - provides a mechanism for end-to-end congestion
  40. * control for protocols which do not have built-in congestion management.
  41. *
  42. * More information about the emerging standards for these Ethernet features
  43. * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
  44. *
  45. * This file implements an rtnetlink interface to allow configuration of DCB
  46. * features for capable devices.
  47. */
  48. MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
  49. MODULE_DESCRIPTION("Data Center Bridging netlink interface");
  50. MODULE_LICENSE("GPL");
  51. /**************** DCB attribute policies *************************************/
  52. /* DCB netlink attributes policy */
  53. static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
  54. [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
  55. [DCB_ATTR_STATE] = {.type = NLA_U8},
  56. [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
  57. [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
  58. [DCB_ATTR_SET_ALL] = {.type = NLA_U8},
  59. [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
  60. [DCB_ATTR_CAP] = {.type = NLA_NESTED},
  61. [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
  62. [DCB_ATTR_BCN] = {.type = NLA_NESTED},
  63. [DCB_ATTR_APP] = {.type = NLA_NESTED},
  64. [DCB_ATTR_IEEE] = {.type = NLA_NESTED},
  65. };
  66. /* DCB priority flow control to User Priority nested attributes */
  67. static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
  68. [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
  69. [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
  70. [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
  71. [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
  72. [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
  73. [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
  74. [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
  75. [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
  76. [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
  77. };
  78. /* DCB priority grouping nested attributes */
  79. static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
  80. [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
  81. [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
  82. [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
  83. [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
  84. [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
  85. [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
  86. [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
  87. [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
  88. [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
  89. [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
  90. [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
  91. [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
  92. [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
  93. [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
  94. [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
  95. [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
  96. [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
  97. [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
  98. };
  99. /* DCB traffic class nested attributes. */
  100. static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
  101. [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
  102. [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
  103. [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
  104. [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
  105. [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
  106. };
  107. /* DCB capabilities nested attributes. */
  108. static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
  109. [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
  110. [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
  111. [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
  112. [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
  113. [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
  114. [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
  115. [DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
  116. [DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
  117. };
  118. /* DCB capabilities nested attributes. */
  119. static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
  120. [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
  121. [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
  122. [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
  123. };
  124. /* DCB BCN nested attributes. */
  125. static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
  126. [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
  127. [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
  128. [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
  129. [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
  130. [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
  131. [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
  132. [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
  133. [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
  134. [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
  135. [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
  136. [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
  137. [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
  138. [DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
  139. [DCB_BCN_ATTR_GD] = {.type = NLA_U32},
  140. [DCB_BCN_ATTR_GI] = {.type = NLA_U32},
  141. [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
  142. [DCB_BCN_ATTR_TD] = {.type = NLA_U32},
  143. [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
  144. [DCB_BCN_ATTR_W] = {.type = NLA_U32},
  145. [DCB_BCN_ATTR_RD] = {.type = NLA_U32},
  146. [DCB_BCN_ATTR_RU] = {.type = NLA_U32},
  147. [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
  148. [DCB_BCN_ATTR_RI] = {.type = NLA_U32},
  149. [DCB_BCN_ATTR_C] = {.type = NLA_U32},
  150. [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
  151. };
  152. /* DCB APP nested attributes. */
  153. static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
  154. [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
  155. [DCB_APP_ATTR_ID] = {.type = NLA_U16},
  156. [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
  157. };
  158. /* IEEE 802.1Qaz nested attributes. */
  159. static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
  160. [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
  161. [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
  162. [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
  163. };
  164. static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
  165. [DCB_ATTR_IEEE_APP] = {.len = sizeof(struct dcb_app)},
  166. };
  167. /* standard netlink reply call */
  168. static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
  169. u32 seq, u16 flags)
  170. {
  171. struct sk_buff *dcbnl_skb;
  172. struct dcbmsg *dcb;
  173. struct nlmsghdr *nlh;
  174. int ret = -EINVAL;
  175. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  176. if (!dcbnl_skb)
  177. return ret;
  178. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags);
  179. dcb = NLMSG_DATA(nlh);
  180. dcb->dcb_family = AF_UNSPEC;
  181. dcb->cmd = cmd;
  182. dcb->dcb_pad = 0;
  183. ret = nla_put_u8(dcbnl_skb, attr, value);
  184. if (ret)
  185. goto err;
  186. /* end the message, assign the nlmsg_len. */
  187. nlmsg_end(dcbnl_skb, nlh);
  188. ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
  189. if (ret)
  190. return -EINVAL;
  191. return 0;
  192. nlmsg_failure:
  193. err:
  194. kfree_skb(dcbnl_skb);
  195. return ret;
  196. }
  197. static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb,
  198. u32 pid, u32 seq, u16 flags)
  199. {
  200. int ret = -EINVAL;
  201. /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
  202. if (!netdev->dcbnl_ops->getstate)
  203. return ret;
  204. ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB,
  205. DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags);
  206. return ret;
  207. }
  208. static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
  209. u32 pid, u32 seq, u16 flags)
  210. {
  211. struct sk_buff *dcbnl_skb;
  212. struct nlmsghdr *nlh;
  213. struct dcbmsg *dcb;
  214. struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
  215. u8 value;
  216. int ret = -EINVAL;
  217. int i;
  218. int getall = 0;
  219. if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg)
  220. return ret;
  221. ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
  222. tb[DCB_ATTR_PFC_CFG],
  223. dcbnl_pfc_up_nest);
  224. if (ret)
  225. goto err_out;
  226. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  227. if (!dcbnl_skb)
  228. goto err_out;
  229. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  230. dcb = NLMSG_DATA(nlh);
  231. dcb->dcb_family = AF_UNSPEC;
  232. dcb->cmd = DCB_CMD_PFC_GCFG;
  233. nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG);
  234. if (!nest)
  235. goto err;
  236. if (data[DCB_PFC_UP_ATTR_ALL])
  237. getall = 1;
  238. for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
  239. if (!getall && !data[i])
  240. continue;
  241. netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
  242. &value);
  243. ret = nla_put_u8(dcbnl_skb, i, value);
  244. if (ret) {
  245. nla_nest_cancel(dcbnl_skb, nest);
  246. goto err;
  247. }
  248. }
  249. nla_nest_end(dcbnl_skb, nest);
  250. nlmsg_end(dcbnl_skb, nlh);
  251. ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
  252. if (ret)
  253. goto err_out;
  254. return 0;
  255. nlmsg_failure:
  256. err:
  257. kfree_skb(dcbnl_skb);
  258. err_out:
  259. return -EINVAL;
  260. }
  261. static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
  262. u32 pid, u32 seq, u16 flags)
  263. {
  264. struct sk_buff *dcbnl_skb;
  265. struct nlmsghdr *nlh;
  266. struct dcbmsg *dcb;
  267. u8 perm_addr[MAX_ADDR_LEN];
  268. int ret = -EINVAL;
  269. if (!netdev->dcbnl_ops->getpermhwaddr)
  270. return ret;
  271. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  272. if (!dcbnl_skb)
  273. goto err_out;
  274. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  275. dcb = NLMSG_DATA(nlh);
  276. dcb->dcb_family = AF_UNSPEC;
  277. dcb->cmd = DCB_CMD_GPERM_HWADDR;
  278. netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
  279. ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
  280. perm_addr);
  281. nlmsg_end(dcbnl_skb, nlh);
  282. ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
  283. if (ret)
  284. goto err_out;
  285. return 0;
  286. nlmsg_failure:
  287. kfree_skb(dcbnl_skb);
  288. err_out:
  289. return -EINVAL;
  290. }
  291. static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
  292. u32 pid, u32 seq, u16 flags)
  293. {
  294. struct sk_buff *dcbnl_skb;
  295. struct nlmsghdr *nlh;
  296. struct dcbmsg *dcb;
  297. struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
  298. u8 value;
  299. int ret = -EINVAL;
  300. int i;
  301. int getall = 0;
  302. if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap)
  303. return ret;
  304. ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
  305. dcbnl_cap_nest);
  306. if (ret)
  307. goto err_out;
  308. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  309. if (!dcbnl_skb)
  310. goto err_out;
  311. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  312. dcb = NLMSG_DATA(nlh);
  313. dcb->dcb_family = AF_UNSPEC;
  314. dcb->cmd = DCB_CMD_GCAP;
  315. nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP);
  316. if (!nest)
  317. goto err;
  318. if (data[DCB_CAP_ATTR_ALL])
  319. getall = 1;
  320. for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
  321. if (!getall && !data[i])
  322. continue;
  323. if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
  324. ret = nla_put_u8(dcbnl_skb, i, value);
  325. if (ret) {
  326. nla_nest_cancel(dcbnl_skb, nest);
  327. goto err;
  328. }
  329. }
  330. }
  331. nla_nest_end(dcbnl_skb, nest);
  332. nlmsg_end(dcbnl_skb, nlh);
  333. ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
  334. if (ret)
  335. goto err_out;
  336. return 0;
  337. nlmsg_failure:
  338. err:
  339. kfree_skb(dcbnl_skb);
  340. err_out:
  341. return -EINVAL;
  342. }
  343. static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
  344. u32 pid, u32 seq, u16 flags)
  345. {
  346. struct sk_buff *dcbnl_skb;
  347. struct nlmsghdr *nlh;
  348. struct dcbmsg *dcb;
  349. struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
  350. u8 value;
  351. int ret = -EINVAL;
  352. int i;
  353. int getall = 0;
  354. if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs)
  355. return ret;
  356. ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
  357. dcbnl_numtcs_nest);
  358. if (ret) {
  359. ret = -EINVAL;
  360. goto err_out;
  361. }
  362. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  363. if (!dcbnl_skb) {
  364. ret = -EINVAL;
  365. goto err_out;
  366. }
  367. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  368. dcb = NLMSG_DATA(nlh);
  369. dcb->dcb_family = AF_UNSPEC;
  370. dcb->cmd = DCB_CMD_GNUMTCS;
  371. nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS);
  372. if (!nest) {
  373. ret = -EINVAL;
  374. goto err;
  375. }
  376. if (data[DCB_NUMTCS_ATTR_ALL])
  377. getall = 1;
  378. for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
  379. if (!getall && !data[i])
  380. continue;
  381. ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
  382. if (!ret) {
  383. ret = nla_put_u8(dcbnl_skb, i, value);
  384. if (ret) {
  385. nla_nest_cancel(dcbnl_skb, nest);
  386. ret = -EINVAL;
  387. goto err;
  388. }
  389. } else {
  390. goto err;
  391. }
  392. }
  393. nla_nest_end(dcbnl_skb, nest);
  394. nlmsg_end(dcbnl_skb, nlh);
  395. ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
  396. if (ret) {
  397. ret = -EINVAL;
  398. goto err_out;
  399. }
  400. return 0;
  401. nlmsg_failure:
  402. err:
  403. kfree_skb(dcbnl_skb);
  404. err_out:
  405. return ret;
  406. }
  407. static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
  408. u32 pid, u32 seq, u16 flags)
  409. {
  410. struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
  411. int ret = -EINVAL;
  412. u8 value;
  413. int i;
  414. if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setnumtcs)
  415. return ret;
  416. ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
  417. dcbnl_numtcs_nest);
  418. if (ret) {
  419. ret = -EINVAL;
  420. goto err;
  421. }
  422. for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
  423. if (data[i] == NULL)
  424. continue;
  425. value = nla_get_u8(data[i]);
  426. ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
  427. if (ret)
  428. goto operr;
  429. }
  430. operr:
  431. ret = dcbnl_reply(!!ret, RTM_SETDCB, DCB_CMD_SNUMTCS,
  432. DCB_ATTR_NUMTCS, pid, seq, flags);
  433. err:
  434. return ret;
  435. }
  436. static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb,
  437. u32 pid, u32 seq, u16 flags)
  438. {
  439. int ret = -EINVAL;
  440. if (!netdev->dcbnl_ops->getpfcstate)
  441. return ret;
  442. ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB,
  443. DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE,
  444. pid, seq, flags);
  445. return ret;
  446. }
  447. static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb,
  448. u32 pid, u32 seq, u16 flags)
  449. {
  450. int ret = -EINVAL;
  451. u8 value;
  452. if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate)
  453. return ret;
  454. value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
  455. netdev->dcbnl_ops->setpfcstate(netdev, value);
  456. ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE,
  457. pid, seq, flags);
  458. return ret;
  459. }
  460. static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
  461. u32 pid, u32 seq, u16 flags)
  462. {
  463. struct sk_buff *dcbnl_skb;
  464. struct nlmsghdr *nlh;
  465. struct dcbmsg *dcb;
  466. struct nlattr *app_nest;
  467. struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
  468. u16 id;
  469. u8 up, idtype;
  470. int ret = -EINVAL;
  471. if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->getapp)
  472. goto out;
  473. ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
  474. dcbnl_app_nest);
  475. if (ret)
  476. goto out;
  477. ret = -EINVAL;
  478. /* all must be non-null */
  479. if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
  480. (!app_tb[DCB_APP_ATTR_ID]))
  481. goto out;
  482. /* either by eth type or by socket number */
  483. idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
  484. if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
  485. (idtype != DCB_APP_IDTYPE_PORTNUM))
  486. goto out;
  487. id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
  488. up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
  489. /* send this back */
  490. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  491. if (!dcbnl_skb)
  492. goto out;
  493. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  494. dcb = NLMSG_DATA(nlh);
  495. dcb->dcb_family = AF_UNSPEC;
  496. dcb->cmd = DCB_CMD_GAPP;
  497. app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
  498. ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
  499. if (ret)
  500. goto out_cancel;
  501. ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id);
  502. if (ret)
  503. goto out_cancel;
  504. ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up);
  505. if (ret)
  506. goto out_cancel;
  507. nla_nest_end(dcbnl_skb, app_nest);
  508. nlmsg_end(dcbnl_skb, nlh);
  509. ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
  510. if (ret)
  511. goto nlmsg_failure;
  512. goto out;
  513. out_cancel:
  514. nla_nest_cancel(dcbnl_skb, app_nest);
  515. nlmsg_failure:
  516. kfree_skb(dcbnl_skb);
  517. out:
  518. return ret;
  519. }
  520. static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
  521. u32 pid, u32 seq, u16 flags)
  522. {
  523. int ret = -EINVAL;
  524. u16 id;
  525. u8 up, idtype;
  526. struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
  527. if (!tb[DCB_ATTR_APP] || !netdev->dcbnl_ops->setapp)
  528. goto out;
  529. ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
  530. dcbnl_app_nest);
  531. if (ret)
  532. goto out;
  533. ret = -EINVAL;
  534. /* all must be non-null */
  535. if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
  536. (!app_tb[DCB_APP_ATTR_ID]) ||
  537. (!app_tb[DCB_APP_ATTR_PRIORITY]))
  538. goto out;
  539. /* either by eth type or by socket number */
  540. idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
  541. if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
  542. (idtype != DCB_APP_IDTYPE_PORTNUM))
  543. goto out;
  544. id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
  545. up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
  546. ret = dcbnl_reply(netdev->dcbnl_ops->setapp(netdev, idtype, id, up),
  547. RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
  548. pid, seq, flags);
  549. out:
  550. return ret;
  551. }
  552. static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
  553. u32 pid, u32 seq, u16 flags, int dir)
  554. {
  555. struct sk_buff *dcbnl_skb;
  556. struct nlmsghdr *nlh;
  557. struct dcbmsg *dcb;
  558. struct nlattr *pg_nest, *param_nest, *data;
  559. struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
  560. struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
  561. u8 prio, pgid, tc_pct, up_map;
  562. int ret = -EINVAL;
  563. int getall = 0;
  564. int i;
  565. if (!tb[DCB_ATTR_PG_CFG] ||
  566. !netdev->dcbnl_ops->getpgtccfgtx ||
  567. !netdev->dcbnl_ops->getpgtccfgrx ||
  568. !netdev->dcbnl_ops->getpgbwgcfgtx ||
  569. !netdev->dcbnl_ops->getpgbwgcfgrx)
  570. return ret;
  571. ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
  572. tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
  573. if (ret)
  574. goto err_out;
  575. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  576. if (!dcbnl_skb)
  577. goto err_out;
  578. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  579. dcb = NLMSG_DATA(nlh);
  580. dcb->dcb_family = AF_UNSPEC;
  581. dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG;
  582. pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG);
  583. if (!pg_nest)
  584. goto err;
  585. if (pg_tb[DCB_PG_ATTR_TC_ALL])
  586. getall = 1;
  587. for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
  588. if (!getall && !pg_tb[i])
  589. continue;
  590. if (pg_tb[DCB_PG_ATTR_TC_ALL])
  591. data = pg_tb[DCB_PG_ATTR_TC_ALL];
  592. else
  593. data = pg_tb[i];
  594. ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
  595. data, dcbnl_tc_param_nest);
  596. if (ret)
  597. goto err_pg;
  598. param_nest = nla_nest_start(dcbnl_skb, i);
  599. if (!param_nest)
  600. goto err_pg;
  601. pgid = DCB_ATTR_VALUE_UNDEFINED;
  602. prio = DCB_ATTR_VALUE_UNDEFINED;
  603. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  604. up_map = DCB_ATTR_VALUE_UNDEFINED;
  605. if (dir) {
  606. /* Rx */
  607. netdev->dcbnl_ops->getpgtccfgrx(netdev,
  608. i - DCB_PG_ATTR_TC_0, &prio,
  609. &pgid, &tc_pct, &up_map);
  610. } else {
  611. /* Tx */
  612. netdev->dcbnl_ops->getpgtccfgtx(netdev,
  613. i - DCB_PG_ATTR_TC_0, &prio,
  614. &pgid, &tc_pct, &up_map);
  615. }
  616. if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
  617. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  618. ret = nla_put_u8(dcbnl_skb,
  619. DCB_TC_ATTR_PARAM_PGID, pgid);
  620. if (ret)
  621. goto err_param;
  622. }
  623. if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
  624. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  625. ret = nla_put_u8(dcbnl_skb,
  626. DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
  627. if (ret)
  628. goto err_param;
  629. }
  630. if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
  631. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  632. ret = nla_put_u8(dcbnl_skb,
  633. DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
  634. if (ret)
  635. goto err_param;
  636. }
  637. if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
  638. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  639. ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT,
  640. tc_pct);
  641. if (ret)
  642. goto err_param;
  643. }
  644. nla_nest_end(dcbnl_skb, param_nest);
  645. }
  646. if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
  647. getall = 1;
  648. else
  649. getall = 0;
  650. for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
  651. if (!getall && !pg_tb[i])
  652. continue;
  653. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  654. if (dir) {
  655. /* Rx */
  656. netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
  657. i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
  658. } else {
  659. /* Tx */
  660. netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
  661. i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
  662. }
  663. ret = nla_put_u8(dcbnl_skb, i, tc_pct);
  664. if (ret)
  665. goto err_pg;
  666. }
  667. nla_nest_end(dcbnl_skb, pg_nest);
  668. nlmsg_end(dcbnl_skb, nlh);
  669. ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
  670. if (ret)
  671. goto err_out;
  672. return 0;
  673. err_param:
  674. nla_nest_cancel(dcbnl_skb, param_nest);
  675. err_pg:
  676. nla_nest_cancel(dcbnl_skb, pg_nest);
  677. nlmsg_failure:
  678. err:
  679. kfree_skb(dcbnl_skb);
  680. err_out:
  681. ret = -EINVAL;
  682. return ret;
  683. }
  684. static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb,
  685. u32 pid, u32 seq, u16 flags)
  686. {
  687. return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0);
  688. }
  689. static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb,
  690. u32 pid, u32 seq, u16 flags)
  691. {
  692. return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1);
  693. }
  694. static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb,
  695. u32 pid, u32 seq, u16 flags)
  696. {
  697. int ret = -EINVAL;
  698. u8 value;
  699. if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate)
  700. return ret;
  701. value = nla_get_u8(tb[DCB_ATTR_STATE]);
  702. ret = dcbnl_reply(netdev->dcbnl_ops->setstate(netdev, value),
  703. RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE,
  704. pid, seq, flags);
  705. return ret;
  706. }
  707. static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
  708. u32 pid, u32 seq, u16 flags)
  709. {
  710. struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
  711. int i;
  712. int ret = -EINVAL;
  713. u8 value;
  714. if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg)
  715. return ret;
  716. ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
  717. tb[DCB_ATTR_PFC_CFG],
  718. dcbnl_pfc_up_nest);
  719. if (ret)
  720. goto err;
  721. for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
  722. if (data[i] == NULL)
  723. continue;
  724. value = nla_get_u8(data[i]);
  725. netdev->dcbnl_ops->setpfccfg(netdev,
  726. data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
  727. }
  728. ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG,
  729. pid, seq, flags);
  730. err:
  731. return ret;
  732. }
  733. static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
  734. u32 pid, u32 seq, u16 flags)
  735. {
  736. int ret = -EINVAL;
  737. if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall)
  738. return ret;
  739. ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
  740. DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
  741. return ret;
  742. }
  743. static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
  744. u32 pid, u32 seq, u16 flags, int dir)
  745. {
  746. struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
  747. struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
  748. int ret = -EINVAL;
  749. int i;
  750. u8 pgid;
  751. u8 up_map;
  752. u8 prio;
  753. u8 tc_pct;
  754. if (!tb[DCB_ATTR_PG_CFG] ||
  755. !netdev->dcbnl_ops->setpgtccfgtx ||
  756. !netdev->dcbnl_ops->setpgtccfgrx ||
  757. !netdev->dcbnl_ops->setpgbwgcfgtx ||
  758. !netdev->dcbnl_ops->setpgbwgcfgrx)
  759. return ret;
  760. ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
  761. tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
  762. if (ret)
  763. goto err;
  764. for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
  765. if (!pg_tb[i])
  766. continue;
  767. ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
  768. pg_tb[i], dcbnl_tc_param_nest);
  769. if (ret)
  770. goto err;
  771. pgid = DCB_ATTR_VALUE_UNDEFINED;
  772. prio = DCB_ATTR_VALUE_UNDEFINED;
  773. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  774. up_map = DCB_ATTR_VALUE_UNDEFINED;
  775. if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
  776. prio =
  777. nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
  778. if (param_tb[DCB_TC_ATTR_PARAM_PGID])
  779. pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
  780. if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
  781. tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
  782. if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
  783. up_map =
  784. nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
  785. /* dir: Tx = 0, Rx = 1 */
  786. if (dir) {
  787. /* Rx */
  788. netdev->dcbnl_ops->setpgtccfgrx(netdev,
  789. i - DCB_PG_ATTR_TC_0,
  790. prio, pgid, tc_pct, up_map);
  791. } else {
  792. /* Tx */
  793. netdev->dcbnl_ops->setpgtccfgtx(netdev,
  794. i - DCB_PG_ATTR_TC_0,
  795. prio, pgid, tc_pct, up_map);
  796. }
  797. }
  798. for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
  799. if (!pg_tb[i])
  800. continue;
  801. tc_pct = nla_get_u8(pg_tb[i]);
  802. /* dir: Tx = 0, Rx = 1 */
  803. if (dir) {
  804. /* Rx */
  805. netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
  806. i - DCB_PG_ATTR_BW_ID_0, tc_pct);
  807. } else {
  808. /* Tx */
  809. netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
  810. i - DCB_PG_ATTR_BW_ID_0, tc_pct);
  811. }
  812. }
  813. ret = dcbnl_reply(0, RTM_SETDCB,
  814. (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG),
  815. DCB_ATTR_PG_CFG, pid, seq, flags);
  816. err:
  817. return ret;
  818. }
  819. static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb,
  820. u32 pid, u32 seq, u16 flags)
  821. {
  822. return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0);
  823. }
  824. static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb,
  825. u32 pid, u32 seq, u16 flags)
  826. {
  827. return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1);
  828. }
  829. static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
  830. u32 pid, u32 seq, u16 flags)
  831. {
  832. struct sk_buff *dcbnl_skb;
  833. struct nlmsghdr *nlh;
  834. struct dcbmsg *dcb;
  835. struct nlattr *bcn_nest;
  836. struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
  837. u8 value_byte;
  838. u32 value_integer;
  839. int ret = -EINVAL;
  840. bool getall = false;
  841. int i;
  842. if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp ||
  843. !netdev->dcbnl_ops->getbcncfg)
  844. return ret;
  845. ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
  846. tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
  847. if (ret)
  848. goto err_out;
  849. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  850. if (!dcbnl_skb)
  851. goto err_out;
  852. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  853. dcb = NLMSG_DATA(nlh);
  854. dcb->dcb_family = AF_UNSPEC;
  855. dcb->cmd = DCB_CMD_BCN_GCFG;
  856. bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN);
  857. if (!bcn_nest)
  858. goto err;
  859. if (bcn_tb[DCB_BCN_ATTR_ALL])
  860. getall = true;
  861. for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
  862. if (!getall && !bcn_tb[i])
  863. continue;
  864. netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
  865. &value_byte);
  866. ret = nla_put_u8(dcbnl_skb, i, value_byte);
  867. if (ret)
  868. goto err_bcn;
  869. }
  870. for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
  871. if (!getall && !bcn_tb[i])
  872. continue;
  873. netdev->dcbnl_ops->getbcncfg(netdev, i,
  874. &value_integer);
  875. ret = nla_put_u32(dcbnl_skb, i, value_integer);
  876. if (ret)
  877. goto err_bcn;
  878. }
  879. nla_nest_end(dcbnl_skb, bcn_nest);
  880. nlmsg_end(dcbnl_skb, nlh);
  881. ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
  882. if (ret)
  883. goto err_out;
  884. return 0;
  885. err_bcn:
  886. nla_nest_cancel(dcbnl_skb, bcn_nest);
  887. nlmsg_failure:
  888. err:
  889. kfree_skb(dcbnl_skb);
  890. err_out:
  891. ret = -EINVAL;
  892. return ret;
  893. }
  894. static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
  895. u32 pid, u32 seq, u16 flags)
  896. {
  897. struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
  898. int i;
  899. int ret = -EINVAL;
  900. u8 value_byte;
  901. u32 value_int;
  902. if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg ||
  903. !netdev->dcbnl_ops->setbcnrp)
  904. return ret;
  905. ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
  906. tb[DCB_ATTR_BCN],
  907. dcbnl_pfc_up_nest);
  908. if (ret)
  909. goto err;
  910. for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
  911. if (data[i] == NULL)
  912. continue;
  913. value_byte = nla_get_u8(data[i]);
  914. netdev->dcbnl_ops->setbcnrp(netdev,
  915. data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
  916. }
  917. for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
  918. if (data[i] == NULL)
  919. continue;
  920. value_int = nla_get_u32(data[i]);
  921. netdev->dcbnl_ops->setbcncfg(netdev,
  922. i, value_int);
  923. }
  924. ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN,
  925. pid, seq, flags);
  926. err:
  927. return ret;
  928. }
  929. /* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
  930. * be completed the entire msg is aborted and error value is returned.
  931. * No attempt is made to reconcile the case where only part of the
  932. * cmd can be completed.
  933. */
  934. static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
  935. u32 pid, u32 seq, u16 flags)
  936. {
  937. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  938. struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
  939. int err = -EOPNOTSUPP;
  940. if (!ops)
  941. goto err;
  942. err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
  943. tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
  944. if (err)
  945. goto err;
  946. if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
  947. struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
  948. err = ops->ieee_setets(netdev, ets);
  949. if (err)
  950. goto err;
  951. }
  952. if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) {
  953. struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
  954. err = ops->ieee_setpfc(netdev, pfc);
  955. if (err)
  956. goto err;
  957. }
  958. if (ieee[DCB_ATTR_IEEE_APP_TABLE] && ops->ieee_setapp) {
  959. struct nlattr *attr;
  960. int rem;
  961. nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
  962. struct dcb_app *app_data;
  963. if (nla_type(attr) != DCB_ATTR_IEEE_APP)
  964. continue;
  965. app_data = nla_data(attr);
  966. err = ops->ieee_setapp(netdev, app_data);
  967. if (err)
  968. goto err;
  969. }
  970. }
  971. err:
  972. dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
  973. pid, seq, flags);
  974. return err;
  975. }
  976. /* Handle IEEE 802.1Qaz GET commands. */
  977. static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
  978. u32 pid, u32 seq, u16 flags)
  979. {
  980. struct sk_buff *skb;
  981. struct nlmsghdr *nlh;
  982. struct dcbmsg *dcb;
  983. struct nlattr *ieee;
  984. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  985. int err;
  986. if (!ops)
  987. return -EOPNOTSUPP;
  988. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  989. if (!skb)
  990. return -ENOBUFS;
  991. nlh = NLMSG_NEW(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  992. dcb = NLMSG_DATA(nlh);
  993. dcb->dcb_family = AF_UNSPEC;
  994. dcb->cmd = DCB_CMD_IEEE_GET;
  995. NLA_PUT_STRING(skb, DCB_ATTR_IFNAME, netdev->name);
  996. ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
  997. if (!ieee)
  998. goto nla_put_failure;
  999. if (ops->ieee_getets) {
  1000. struct ieee_ets ets;
  1001. err = ops->ieee_getets(netdev, &ets);
  1002. if (!err)
  1003. NLA_PUT(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets);
  1004. }
  1005. if (ops->ieee_getpfc) {
  1006. struct ieee_pfc pfc;
  1007. err = ops->ieee_getpfc(netdev, &pfc);
  1008. if (!err)
  1009. NLA_PUT(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc);
  1010. }
  1011. nla_nest_end(skb, ieee);
  1012. nlmsg_end(skb, nlh);
  1013. return rtnl_unicast(skb, &init_net, pid);
  1014. nla_put_failure:
  1015. nlmsg_cancel(skb, nlh);
  1016. nlmsg_failure:
  1017. kfree_skb(skb);
  1018. return -1;
  1019. }
  1020. static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
  1021. {
  1022. struct net *net = sock_net(skb->sk);
  1023. struct net_device *netdev;
  1024. struct dcbmsg *dcb = (struct dcbmsg *)NLMSG_DATA(nlh);
  1025. struct nlattr *tb[DCB_ATTR_MAX + 1];
  1026. u32 pid = skb ? NETLINK_CB(skb).pid : 0;
  1027. int ret = -EINVAL;
  1028. if (!net_eq(net, &init_net))
  1029. return -EINVAL;
  1030. ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
  1031. dcbnl_rtnl_policy);
  1032. if (ret < 0)
  1033. return ret;
  1034. if (!tb[DCB_ATTR_IFNAME])
  1035. return -EINVAL;
  1036. netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
  1037. if (!netdev)
  1038. return -EINVAL;
  1039. if (!netdev->dcbnl_ops)
  1040. goto errout;
  1041. switch (dcb->cmd) {
  1042. case DCB_CMD_GSTATE:
  1043. ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq,
  1044. nlh->nlmsg_flags);
  1045. goto out;
  1046. case DCB_CMD_PFC_GCFG:
  1047. ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
  1048. nlh->nlmsg_flags);
  1049. goto out;
  1050. case DCB_CMD_GPERM_HWADDR:
  1051. ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq,
  1052. nlh->nlmsg_flags);
  1053. goto out;
  1054. case DCB_CMD_PGTX_GCFG:
  1055. ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
  1056. nlh->nlmsg_flags);
  1057. goto out;
  1058. case DCB_CMD_PGRX_GCFG:
  1059. ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
  1060. nlh->nlmsg_flags);
  1061. goto out;
  1062. case DCB_CMD_BCN_GCFG:
  1063. ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
  1064. nlh->nlmsg_flags);
  1065. goto out;
  1066. case DCB_CMD_SSTATE:
  1067. ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq,
  1068. nlh->nlmsg_flags);
  1069. goto out;
  1070. case DCB_CMD_PFC_SCFG:
  1071. ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
  1072. nlh->nlmsg_flags);
  1073. goto out;
  1074. case DCB_CMD_SET_ALL:
  1075. ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq,
  1076. nlh->nlmsg_flags);
  1077. goto out;
  1078. case DCB_CMD_PGTX_SCFG:
  1079. ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
  1080. nlh->nlmsg_flags);
  1081. goto out;
  1082. case DCB_CMD_PGRX_SCFG:
  1083. ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
  1084. nlh->nlmsg_flags);
  1085. goto out;
  1086. case DCB_CMD_GCAP:
  1087. ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq,
  1088. nlh->nlmsg_flags);
  1089. goto out;
  1090. case DCB_CMD_GNUMTCS:
  1091. ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
  1092. nlh->nlmsg_flags);
  1093. goto out;
  1094. case DCB_CMD_SNUMTCS:
  1095. ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
  1096. nlh->nlmsg_flags);
  1097. goto out;
  1098. case DCB_CMD_PFC_GSTATE:
  1099. ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
  1100. nlh->nlmsg_flags);
  1101. goto out;
  1102. case DCB_CMD_PFC_SSTATE:
  1103. ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
  1104. nlh->nlmsg_flags);
  1105. goto out;
  1106. case DCB_CMD_BCN_SCFG:
  1107. ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
  1108. nlh->nlmsg_flags);
  1109. goto out;
  1110. case DCB_CMD_GAPP:
  1111. ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq,
  1112. nlh->nlmsg_flags);
  1113. goto out;
  1114. case DCB_CMD_SAPP:
  1115. ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
  1116. nlh->nlmsg_flags);
  1117. goto out;
  1118. case DCB_CMD_IEEE_SET:
  1119. ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
  1120. nlh->nlmsg_flags);
  1121. goto out;
  1122. case DCB_CMD_IEEE_GET:
  1123. ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
  1124. nlh->nlmsg_flags);
  1125. goto out;
  1126. default:
  1127. goto errout;
  1128. }
  1129. errout:
  1130. ret = -EINVAL;
  1131. out:
  1132. dev_put(netdev);
  1133. return ret;
  1134. }
  1135. static int __init dcbnl_init(void)
  1136. {
  1137. rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL);
  1138. rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL);
  1139. return 0;
  1140. }
  1141. module_init(dcbnl_init);
  1142. static void __exit dcbnl_exit(void)
  1143. {
  1144. rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
  1145. rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
  1146. }
  1147. module_exit(dcbnl_exit);