dcbnl.c 48 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926
  1. /*
  2. * Copyright (c) 2008-2011, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Author: Lucy Liu <lucy.liu@intel.com>
  18. */
  19. #include <linux/netdevice.h>
  20. #include <linux/netlink.h>
  21. #include <linux/slab.h>
  22. #include <net/netlink.h>
  23. #include <net/rtnetlink.h>
  24. #include <linux/dcbnl.h>
  25. #include <net/dcbevent.h>
  26. #include <linux/rtnetlink.h>
  27. #include <linux/module.h>
  28. #include <net/sock.h>
  29. /**
  30. * Data Center Bridging (DCB) is a collection of Ethernet enhancements
  31. * intended to allow network traffic with differing requirements
  32. * (highly reliable, no drops vs. best effort vs. low latency) to operate
  33. * and co-exist on Ethernet. Current DCB features are:
  34. *
  35. * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
  36. * framework for assigning bandwidth guarantees to traffic classes.
  37. *
  38. * Priority-based Flow Control (PFC) - provides a flow control mechanism which
  39. * can work independently for each 802.1p priority.
  40. *
  41. * Congestion Notification - provides a mechanism for end-to-end congestion
  42. * control for protocols which do not have built-in congestion management.
  43. *
  44. * More information about the emerging standards for these Ethernet features
  45. * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
  46. *
  47. * This file implements an rtnetlink interface to allow configuration of DCB
  48. * features for capable devices.
  49. */
  50. MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
  51. MODULE_DESCRIPTION("Data Center Bridging netlink interface");
  52. MODULE_LICENSE("GPL");
  53. /**************** DCB attribute policies *************************************/
  54. /* DCB netlink attributes policy */
  55. static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
  56. [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
  57. [DCB_ATTR_STATE] = {.type = NLA_U8},
  58. [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
  59. [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
  60. [DCB_ATTR_SET_ALL] = {.type = NLA_U8},
  61. [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
  62. [DCB_ATTR_CAP] = {.type = NLA_NESTED},
  63. [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
  64. [DCB_ATTR_BCN] = {.type = NLA_NESTED},
  65. [DCB_ATTR_APP] = {.type = NLA_NESTED},
  66. [DCB_ATTR_IEEE] = {.type = NLA_NESTED},
  67. [DCB_ATTR_DCBX] = {.type = NLA_U8},
  68. [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
  69. };
  70. /* DCB priority flow control to User Priority nested attributes */
  71. static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
  72. [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
  73. [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
  74. [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
  75. [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
  76. [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
  77. [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
  78. [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
  79. [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
  80. [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
  81. };
  82. /* DCB priority grouping nested attributes */
  83. static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
  84. [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
  85. [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
  86. [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
  87. [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
  88. [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
  89. [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
  90. [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
  91. [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
  92. [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
  93. [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
  94. [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
  95. [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
  96. [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
  97. [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
  98. [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
  99. [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
  100. [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
  101. [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
  102. };
  103. /* DCB traffic class nested attributes. */
  104. static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
  105. [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
  106. [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
  107. [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
  108. [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
  109. [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
  110. };
  111. /* DCB capabilities nested attributes. */
  112. static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
  113. [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
  114. [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
  115. [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
  116. [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
  117. [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
  118. [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
  119. [DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
  120. [DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
  121. [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
  122. };
  123. /* DCB capabilities nested attributes. */
  124. static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
  125. [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
  126. [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
  127. [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
  128. };
  129. /* DCB BCN nested attributes. */
  130. static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
  131. [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
  132. [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
  133. [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
  134. [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
  135. [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
  136. [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
  137. [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
  138. [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
  139. [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
  140. [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
  141. [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
  142. [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
  143. [DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
  144. [DCB_BCN_ATTR_GD] = {.type = NLA_U32},
  145. [DCB_BCN_ATTR_GI] = {.type = NLA_U32},
  146. [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
  147. [DCB_BCN_ATTR_TD] = {.type = NLA_U32},
  148. [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
  149. [DCB_BCN_ATTR_W] = {.type = NLA_U32},
  150. [DCB_BCN_ATTR_RD] = {.type = NLA_U32},
  151. [DCB_BCN_ATTR_RU] = {.type = NLA_U32},
  152. [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
  153. [DCB_BCN_ATTR_RI] = {.type = NLA_U32},
  154. [DCB_BCN_ATTR_C] = {.type = NLA_U32},
  155. [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
  156. };
  157. /* DCB APP nested attributes. */
  158. static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
  159. [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
  160. [DCB_APP_ATTR_ID] = {.type = NLA_U16},
  161. [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
  162. };
  163. /* IEEE 802.1Qaz nested attributes. */
  164. static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
  165. [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
  166. [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
  167. [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
  168. [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
  169. };
  170. static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
  171. [DCB_ATTR_IEEE_APP] = {.len = sizeof(struct dcb_app)},
  172. };
  173. /* DCB number of traffic classes nested attributes. */
  174. static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
  175. [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
  176. [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
  177. [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
  178. [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
  179. };
  180. static LIST_HEAD(dcb_app_list);
  181. static DEFINE_SPINLOCK(dcb_lock);
  182. static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
  183. u32 flags, struct nlmsghdr **nlhp)
  184. {
  185. struct sk_buff *skb;
  186. struct dcbmsg *dcb;
  187. struct nlmsghdr *nlh;
  188. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  189. if (!skb)
  190. return NULL;
  191. nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
  192. if (!nlh) {
  193. /* header should always fit, allocation must be buggy */
  194. BUG();
  195. }
  196. dcb = nlmsg_data(nlh);
  197. dcb->dcb_family = AF_UNSPEC;
  198. dcb->cmd = cmd;
  199. dcb->dcb_pad = 0;
  200. if (nlhp)
  201. *nlhp = nlh;
  202. return skb;
  203. }
  204. static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
  205. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  206. {
  207. /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
  208. if (!netdev->dcbnl_ops->getstate)
  209. return -EOPNOTSUPP;
  210. return nla_put_u8(skb, DCB_ATTR_STATE,
  211. netdev->dcbnl_ops->getstate(netdev));
  212. }
  213. static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
  214. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  215. {
  216. struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
  217. u8 value;
  218. int ret;
  219. int i;
  220. int getall = 0;
  221. if (!tb[DCB_ATTR_PFC_CFG])
  222. return -EINVAL;
  223. if (!netdev->dcbnl_ops->getpfccfg)
  224. return -EOPNOTSUPP;
  225. ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
  226. tb[DCB_ATTR_PFC_CFG],
  227. dcbnl_pfc_up_nest);
  228. if (ret)
  229. return ret;
  230. nest = nla_nest_start(skb, DCB_ATTR_PFC_CFG);
  231. if (!nest)
  232. return -EMSGSIZE;
  233. if (data[DCB_PFC_UP_ATTR_ALL])
  234. getall = 1;
  235. for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
  236. if (!getall && !data[i])
  237. continue;
  238. netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
  239. &value);
  240. ret = nla_put_u8(skb, i, value);
  241. if (ret) {
  242. nla_nest_cancel(skb, nest);
  243. return ret;
  244. }
  245. }
  246. nla_nest_end(skb, nest);
  247. return 0;
  248. }
  249. static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
  250. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  251. {
  252. u8 perm_addr[MAX_ADDR_LEN];
  253. if (!netdev->dcbnl_ops->getpermhwaddr)
  254. return -EOPNOTSUPP;
  255. netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
  256. return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
  257. }
  258. static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
  259. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  260. {
  261. struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
  262. u8 value;
  263. int ret;
  264. int i;
  265. int getall = 0;
  266. if (!tb[DCB_ATTR_CAP])
  267. return -EINVAL;
  268. if (!netdev->dcbnl_ops->getcap)
  269. return -EOPNOTSUPP;
  270. ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
  271. dcbnl_cap_nest);
  272. if (ret)
  273. return ret;
  274. nest = nla_nest_start(skb, DCB_ATTR_CAP);
  275. if (!nest)
  276. return -EMSGSIZE;
  277. if (data[DCB_CAP_ATTR_ALL])
  278. getall = 1;
  279. for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
  280. if (!getall && !data[i])
  281. continue;
  282. if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
  283. ret = nla_put_u8(skb, i, value);
  284. if (ret) {
  285. nla_nest_cancel(skb, nest);
  286. return ret;
  287. }
  288. }
  289. }
  290. nla_nest_end(skb, nest);
  291. return 0;
  292. }
  293. static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
  294. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  295. {
  296. struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
  297. u8 value;
  298. int ret;
  299. int i;
  300. int getall = 0;
  301. if (!tb[DCB_ATTR_NUMTCS])
  302. return -EINVAL;
  303. if (!netdev->dcbnl_ops->getnumtcs)
  304. return -EOPNOTSUPP;
  305. ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
  306. dcbnl_numtcs_nest);
  307. if (ret)
  308. return ret;
  309. nest = nla_nest_start(skb, DCB_ATTR_NUMTCS);
  310. if (!nest)
  311. return -EMSGSIZE;
  312. if (data[DCB_NUMTCS_ATTR_ALL])
  313. getall = 1;
  314. for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
  315. if (!getall && !data[i])
  316. continue;
  317. ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
  318. if (!ret) {
  319. ret = nla_put_u8(skb, i, value);
  320. if (ret) {
  321. nla_nest_cancel(skb, nest);
  322. return ret;
  323. }
  324. } else
  325. return -EINVAL;
  326. }
  327. nla_nest_end(skb, nest);
  328. return 0;
  329. }
  330. static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
  331. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  332. {
  333. struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
  334. int ret;
  335. u8 value;
  336. int i;
  337. if (!tb[DCB_ATTR_NUMTCS])
  338. return -EINVAL;
  339. if (!netdev->dcbnl_ops->setnumtcs)
  340. return -EOPNOTSUPP;
  341. ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
  342. dcbnl_numtcs_nest);
  343. if (ret)
  344. return ret;
  345. for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
  346. if (data[i] == NULL)
  347. continue;
  348. value = nla_get_u8(data[i]);
  349. ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
  350. if (ret)
  351. break;
  352. }
  353. return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
  354. }
  355. static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
  356. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  357. {
  358. if (!netdev->dcbnl_ops->getpfcstate)
  359. return -EOPNOTSUPP;
  360. return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
  361. netdev->dcbnl_ops->getpfcstate(netdev));
  362. }
  363. static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
  364. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  365. {
  366. u8 value;
  367. if (!tb[DCB_ATTR_PFC_STATE])
  368. return -EINVAL;
  369. if (!netdev->dcbnl_ops->setpfcstate)
  370. return -EOPNOTSUPP;
  371. value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
  372. netdev->dcbnl_ops->setpfcstate(netdev, value);
  373. return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
  374. }
  375. static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
  376. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  377. {
  378. struct nlattr *app_nest;
  379. struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
  380. u16 id;
  381. u8 up, idtype;
  382. int ret;
  383. if (!tb[DCB_ATTR_APP])
  384. return -EINVAL;
  385. ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
  386. dcbnl_app_nest);
  387. if (ret)
  388. return ret;
  389. /* all must be non-null */
  390. if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
  391. (!app_tb[DCB_APP_ATTR_ID]))
  392. return -EINVAL;
  393. /* either by eth type or by socket number */
  394. idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
  395. if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
  396. (idtype != DCB_APP_IDTYPE_PORTNUM))
  397. return -EINVAL;
  398. id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
  399. if (netdev->dcbnl_ops->getapp) {
  400. up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
  401. } else {
  402. struct dcb_app app = {
  403. .selector = idtype,
  404. .protocol = id,
  405. };
  406. up = dcb_getapp(netdev, &app);
  407. }
  408. app_nest = nla_nest_start(skb, DCB_ATTR_APP);
  409. if (!app_nest)
  410. return -EMSGSIZE;
  411. ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
  412. if (ret)
  413. goto out_cancel;
  414. ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
  415. if (ret)
  416. goto out_cancel;
  417. ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
  418. if (ret)
  419. goto out_cancel;
  420. nla_nest_end(skb, app_nest);
  421. return 0;
  422. out_cancel:
  423. nla_nest_cancel(skb, app_nest);
  424. return ret;
  425. }
  426. static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
  427. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  428. {
  429. int ret;
  430. u16 id;
  431. u8 up, idtype;
  432. struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
  433. if (!tb[DCB_ATTR_APP])
  434. return -EINVAL;
  435. ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
  436. dcbnl_app_nest);
  437. if (ret)
  438. return ret;
  439. /* all must be non-null */
  440. if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
  441. (!app_tb[DCB_APP_ATTR_ID]) ||
  442. (!app_tb[DCB_APP_ATTR_PRIORITY]))
  443. return -EINVAL;
  444. /* either by eth type or by socket number */
  445. idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
  446. if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
  447. (idtype != DCB_APP_IDTYPE_PORTNUM))
  448. return -EINVAL;
  449. id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
  450. up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
  451. if (netdev->dcbnl_ops->setapp) {
  452. ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
  453. } else {
  454. struct dcb_app app;
  455. app.selector = idtype;
  456. app.protocol = id;
  457. app.priority = up;
  458. ret = dcb_setapp(netdev, &app);
  459. }
  460. ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
  461. dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
  462. return ret;
  463. }
  464. static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  465. struct nlattr **tb, struct sk_buff *skb, int dir)
  466. {
  467. struct nlattr *pg_nest, *param_nest, *data;
  468. struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
  469. struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
  470. u8 prio, pgid, tc_pct, up_map;
  471. int ret;
  472. int getall = 0;
  473. int i;
  474. if (!tb[DCB_ATTR_PG_CFG])
  475. return -EINVAL;
  476. if (!netdev->dcbnl_ops->getpgtccfgtx ||
  477. !netdev->dcbnl_ops->getpgtccfgrx ||
  478. !netdev->dcbnl_ops->getpgbwgcfgtx ||
  479. !netdev->dcbnl_ops->getpgbwgcfgrx)
  480. return -EOPNOTSUPP;
  481. ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
  482. tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
  483. if (ret)
  484. return ret;
  485. pg_nest = nla_nest_start(skb, DCB_ATTR_PG_CFG);
  486. if (!pg_nest)
  487. return -EMSGSIZE;
  488. if (pg_tb[DCB_PG_ATTR_TC_ALL])
  489. getall = 1;
  490. for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
  491. if (!getall && !pg_tb[i])
  492. continue;
  493. if (pg_tb[DCB_PG_ATTR_TC_ALL])
  494. data = pg_tb[DCB_PG_ATTR_TC_ALL];
  495. else
  496. data = pg_tb[i];
  497. ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
  498. data, dcbnl_tc_param_nest);
  499. if (ret)
  500. goto err_pg;
  501. param_nest = nla_nest_start(skb, i);
  502. if (!param_nest)
  503. goto err_pg;
  504. pgid = DCB_ATTR_VALUE_UNDEFINED;
  505. prio = DCB_ATTR_VALUE_UNDEFINED;
  506. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  507. up_map = DCB_ATTR_VALUE_UNDEFINED;
  508. if (dir) {
  509. /* Rx */
  510. netdev->dcbnl_ops->getpgtccfgrx(netdev,
  511. i - DCB_PG_ATTR_TC_0, &prio,
  512. &pgid, &tc_pct, &up_map);
  513. } else {
  514. /* Tx */
  515. netdev->dcbnl_ops->getpgtccfgtx(netdev,
  516. i - DCB_PG_ATTR_TC_0, &prio,
  517. &pgid, &tc_pct, &up_map);
  518. }
  519. if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
  520. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  521. ret = nla_put_u8(skb,
  522. DCB_TC_ATTR_PARAM_PGID, pgid);
  523. if (ret)
  524. goto err_param;
  525. }
  526. if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
  527. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  528. ret = nla_put_u8(skb,
  529. DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
  530. if (ret)
  531. goto err_param;
  532. }
  533. if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
  534. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  535. ret = nla_put_u8(skb,
  536. DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
  537. if (ret)
  538. goto err_param;
  539. }
  540. if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
  541. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  542. ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
  543. tc_pct);
  544. if (ret)
  545. goto err_param;
  546. }
  547. nla_nest_end(skb, param_nest);
  548. }
  549. if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
  550. getall = 1;
  551. else
  552. getall = 0;
  553. for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
  554. if (!getall && !pg_tb[i])
  555. continue;
  556. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  557. if (dir) {
  558. /* Rx */
  559. netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
  560. i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
  561. } else {
  562. /* Tx */
  563. netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
  564. i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
  565. }
  566. ret = nla_put_u8(skb, i, tc_pct);
  567. if (ret)
  568. goto err_pg;
  569. }
  570. nla_nest_end(skb, pg_nest);
  571. return 0;
  572. err_param:
  573. nla_nest_cancel(skb, param_nest);
  574. err_pg:
  575. nla_nest_cancel(skb, pg_nest);
  576. return -EMSGSIZE;
  577. }
  578. static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  579. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  580. {
  581. return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
  582. }
  583. static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  584. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  585. {
  586. return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
  587. }
  588. static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
  589. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  590. {
  591. u8 value;
  592. if (!tb[DCB_ATTR_STATE])
  593. return -EINVAL;
  594. if (!netdev->dcbnl_ops->setstate)
  595. return -EOPNOTSUPP;
  596. value = nla_get_u8(tb[DCB_ATTR_STATE]);
  597. return nla_put_u8(skb, DCB_ATTR_STATE,
  598. netdev->dcbnl_ops->setstate(netdev, value));
  599. }
  600. static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
  601. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  602. {
  603. struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
  604. int i;
  605. int ret;
  606. u8 value;
  607. if (!tb[DCB_ATTR_PFC_CFG])
  608. return -EINVAL;
  609. if (!netdev->dcbnl_ops->setpfccfg)
  610. return -EOPNOTSUPP;
  611. ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
  612. tb[DCB_ATTR_PFC_CFG],
  613. dcbnl_pfc_up_nest);
  614. if (ret)
  615. return ret;
  616. for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
  617. if (data[i] == NULL)
  618. continue;
  619. value = nla_get_u8(data[i]);
  620. netdev->dcbnl_ops->setpfccfg(netdev,
  621. data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
  622. }
  623. return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
  624. }
  625. static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
  626. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  627. {
  628. int ret;
  629. if (!tb[DCB_ATTR_SET_ALL])
  630. return -EINVAL;
  631. if (!netdev->dcbnl_ops->setall)
  632. return -EOPNOTSUPP;
  633. ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
  634. netdev->dcbnl_ops->setall(netdev));
  635. dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
  636. return ret;
  637. }
  638. static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  639. u32 seq, struct nlattr **tb, struct sk_buff *skb,
  640. int dir)
  641. {
  642. struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
  643. struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
  644. int ret;
  645. int i;
  646. u8 pgid;
  647. u8 up_map;
  648. u8 prio;
  649. u8 tc_pct;
  650. if (!tb[DCB_ATTR_PG_CFG])
  651. return -EINVAL;
  652. if (!netdev->dcbnl_ops->setpgtccfgtx ||
  653. !netdev->dcbnl_ops->setpgtccfgrx ||
  654. !netdev->dcbnl_ops->setpgbwgcfgtx ||
  655. !netdev->dcbnl_ops->setpgbwgcfgrx)
  656. return -EOPNOTSUPP;
  657. ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
  658. tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
  659. if (ret)
  660. return ret;
  661. for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
  662. if (!pg_tb[i])
  663. continue;
  664. ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
  665. pg_tb[i], dcbnl_tc_param_nest);
  666. if (ret)
  667. return ret;
  668. pgid = DCB_ATTR_VALUE_UNDEFINED;
  669. prio = DCB_ATTR_VALUE_UNDEFINED;
  670. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  671. up_map = DCB_ATTR_VALUE_UNDEFINED;
  672. if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
  673. prio =
  674. nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
  675. if (param_tb[DCB_TC_ATTR_PARAM_PGID])
  676. pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
  677. if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
  678. tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
  679. if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
  680. up_map =
  681. nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
  682. /* dir: Tx = 0, Rx = 1 */
  683. if (dir) {
  684. /* Rx */
  685. netdev->dcbnl_ops->setpgtccfgrx(netdev,
  686. i - DCB_PG_ATTR_TC_0,
  687. prio, pgid, tc_pct, up_map);
  688. } else {
  689. /* Tx */
  690. netdev->dcbnl_ops->setpgtccfgtx(netdev,
  691. i - DCB_PG_ATTR_TC_0,
  692. prio, pgid, tc_pct, up_map);
  693. }
  694. }
  695. for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
  696. if (!pg_tb[i])
  697. continue;
  698. tc_pct = nla_get_u8(pg_tb[i]);
  699. /* dir: Tx = 0, Rx = 1 */
  700. if (dir) {
  701. /* Rx */
  702. netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
  703. i - DCB_PG_ATTR_BW_ID_0, tc_pct);
  704. } else {
  705. /* Tx */
  706. netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
  707. i - DCB_PG_ATTR_BW_ID_0, tc_pct);
  708. }
  709. }
  710. return nla_put_u8(skb,
  711. (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG), 0);
  712. }
  713. static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  714. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  715. {
  716. return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
  717. }
  718. static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  719. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  720. {
  721. return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
  722. }
  723. static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  724. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  725. {
  726. struct nlattr *bcn_nest;
  727. struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
  728. u8 value_byte;
  729. u32 value_integer;
  730. int ret;
  731. bool getall = false;
  732. int i;
  733. if (!tb[DCB_ATTR_BCN])
  734. return -EINVAL;
  735. if (!netdev->dcbnl_ops->getbcnrp ||
  736. !netdev->dcbnl_ops->getbcncfg)
  737. return -EOPNOTSUPP;
  738. ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
  739. tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
  740. if (ret)
  741. return ret;
  742. bcn_nest = nla_nest_start(skb, DCB_ATTR_BCN);
  743. if (!bcn_nest)
  744. return -EMSGSIZE;
  745. if (bcn_tb[DCB_BCN_ATTR_ALL])
  746. getall = true;
  747. for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
  748. if (!getall && !bcn_tb[i])
  749. continue;
  750. netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
  751. &value_byte);
  752. ret = nla_put_u8(skb, i, value_byte);
  753. if (ret)
  754. goto err_bcn;
  755. }
  756. for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
  757. if (!getall && !bcn_tb[i])
  758. continue;
  759. netdev->dcbnl_ops->getbcncfg(netdev, i,
  760. &value_integer);
  761. ret = nla_put_u32(skb, i, value_integer);
  762. if (ret)
  763. goto err_bcn;
  764. }
  765. nla_nest_end(skb, bcn_nest);
  766. return 0;
  767. err_bcn:
  768. nla_nest_cancel(skb, bcn_nest);
  769. return ret;
  770. }
  771. static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  772. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  773. {
  774. struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
  775. int i;
  776. int ret;
  777. u8 value_byte;
  778. u32 value_int;
  779. if (!tb[DCB_ATTR_BCN])
  780. return -EINVAL;
  781. if (!netdev->dcbnl_ops->setbcncfg ||
  782. !netdev->dcbnl_ops->setbcnrp)
  783. return -EOPNOTSUPP;
  784. ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
  785. tb[DCB_ATTR_BCN],
  786. dcbnl_pfc_up_nest);
  787. if (ret)
  788. return ret;
  789. for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
  790. if (data[i] == NULL)
  791. continue;
  792. value_byte = nla_get_u8(data[i]);
  793. netdev->dcbnl_ops->setbcnrp(netdev,
  794. data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
  795. }
  796. for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
  797. if (data[i] == NULL)
  798. continue;
  799. value_int = nla_get_u32(data[i]);
  800. netdev->dcbnl_ops->setbcncfg(netdev,
  801. i, value_int);
  802. }
  803. return nla_put_u8(skb, DCB_ATTR_BCN, 0);
  804. }
  805. static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
  806. int app_nested_type, int app_info_type,
  807. int app_entry_type)
  808. {
  809. struct dcb_peer_app_info info;
  810. struct dcb_app *table = NULL;
  811. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  812. u16 app_count;
  813. int err;
  814. /**
  815. * retrieve the peer app configuration form the driver. If the driver
  816. * handlers fail exit without doing anything
  817. */
  818. err = ops->peer_getappinfo(netdev, &info, &app_count);
  819. if (!err && app_count) {
  820. table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
  821. if (!table)
  822. return -ENOMEM;
  823. err = ops->peer_getapptable(netdev, table);
  824. }
  825. if (!err) {
  826. u16 i;
  827. struct nlattr *app;
  828. /**
  829. * build the message, from here on the only possible failure
  830. * is due to the skb size
  831. */
  832. err = -EMSGSIZE;
  833. app = nla_nest_start(skb, app_nested_type);
  834. if (!app)
  835. goto nla_put_failure;
  836. if (app_info_type &&
  837. nla_put(skb, app_info_type, sizeof(info), &info))
  838. goto nla_put_failure;
  839. for (i = 0; i < app_count; i++) {
  840. if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
  841. &table[i]))
  842. goto nla_put_failure;
  843. }
  844. nla_nest_end(skb, app);
  845. }
  846. err = 0;
  847. nla_put_failure:
  848. kfree(table);
  849. return err;
  850. }
  851. /* Handle IEEE 802.1Qaz GET commands. */
  852. static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
  853. {
  854. struct nlattr *ieee, *app;
  855. struct dcb_app_type *itr;
  856. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  857. int dcbx;
  858. int err;
  859. if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
  860. return -EMSGSIZE;
  861. ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
  862. if (!ieee)
  863. return -EMSGSIZE;
  864. if (ops->ieee_getets) {
  865. struct ieee_ets ets;
  866. err = ops->ieee_getets(netdev, &ets);
  867. if (!err &&
  868. nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
  869. return -EMSGSIZE;
  870. }
  871. if (ops->ieee_getmaxrate) {
  872. struct ieee_maxrate maxrate;
  873. err = ops->ieee_getmaxrate(netdev, &maxrate);
  874. if (!err) {
  875. err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
  876. sizeof(maxrate), &maxrate);
  877. if (err)
  878. return -EMSGSIZE;
  879. }
  880. }
  881. if (ops->ieee_getpfc) {
  882. struct ieee_pfc pfc;
  883. err = ops->ieee_getpfc(netdev, &pfc);
  884. if (!err &&
  885. nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
  886. return -EMSGSIZE;
  887. }
  888. app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
  889. if (!app)
  890. return -EMSGSIZE;
  891. spin_lock(&dcb_lock);
  892. list_for_each_entry(itr, &dcb_app_list, list) {
  893. if (itr->ifindex == netdev->ifindex) {
  894. err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
  895. &itr->app);
  896. if (err) {
  897. spin_unlock(&dcb_lock);
  898. return -EMSGSIZE;
  899. }
  900. }
  901. }
  902. if (netdev->dcbnl_ops->getdcbx)
  903. dcbx = netdev->dcbnl_ops->getdcbx(netdev);
  904. else
  905. dcbx = -EOPNOTSUPP;
  906. spin_unlock(&dcb_lock);
  907. nla_nest_end(skb, app);
  908. /* get peer info if available */
  909. if (ops->ieee_peer_getets) {
  910. struct ieee_ets ets;
  911. err = ops->ieee_peer_getets(netdev, &ets);
  912. if (!err &&
  913. nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
  914. return -EMSGSIZE;
  915. }
  916. if (ops->ieee_peer_getpfc) {
  917. struct ieee_pfc pfc;
  918. err = ops->ieee_peer_getpfc(netdev, &pfc);
  919. if (!err &&
  920. nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
  921. return -EMSGSIZE;
  922. }
  923. if (ops->peer_getappinfo && ops->peer_getapptable) {
  924. err = dcbnl_build_peer_app(netdev, skb,
  925. DCB_ATTR_IEEE_PEER_APP,
  926. DCB_ATTR_IEEE_APP_UNSPEC,
  927. DCB_ATTR_IEEE_APP);
  928. if (err)
  929. return -EMSGSIZE;
  930. }
  931. nla_nest_end(skb, ieee);
  932. if (dcbx >= 0) {
  933. err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
  934. if (err)
  935. return -EMSGSIZE;
  936. }
  937. return 0;
  938. }
  939. static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
  940. int dir)
  941. {
  942. u8 pgid, up_map, prio, tc_pct;
  943. const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
  944. int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
  945. struct nlattr *pg = nla_nest_start(skb, i);
  946. if (!pg)
  947. return -EMSGSIZE;
  948. for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
  949. struct nlattr *tc_nest = nla_nest_start(skb, i);
  950. if (!tc_nest)
  951. return -EMSGSIZE;
  952. pgid = DCB_ATTR_VALUE_UNDEFINED;
  953. prio = DCB_ATTR_VALUE_UNDEFINED;
  954. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  955. up_map = DCB_ATTR_VALUE_UNDEFINED;
  956. if (!dir)
  957. ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
  958. &prio, &pgid, &tc_pct, &up_map);
  959. else
  960. ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
  961. &prio, &pgid, &tc_pct, &up_map);
  962. if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
  963. nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
  964. nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
  965. nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
  966. return -EMSGSIZE;
  967. nla_nest_end(skb, tc_nest);
  968. }
  969. for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
  970. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  971. if (!dir)
  972. ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
  973. &tc_pct);
  974. else
  975. ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
  976. &tc_pct);
  977. if (nla_put_u8(skb, i, tc_pct))
  978. return -EMSGSIZE;
  979. }
  980. nla_nest_end(skb, pg);
  981. return 0;
  982. }
  983. static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
  984. {
  985. struct nlattr *cee, *app;
  986. struct dcb_app_type *itr;
  987. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  988. int dcbx, i, err = -EMSGSIZE;
  989. u8 value;
  990. if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
  991. goto nla_put_failure;
  992. cee = nla_nest_start(skb, DCB_ATTR_CEE);
  993. if (!cee)
  994. goto nla_put_failure;
  995. /* local pg */
  996. if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
  997. err = dcbnl_cee_pg_fill(skb, netdev, 1);
  998. if (err)
  999. goto nla_put_failure;
  1000. }
  1001. if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
  1002. err = dcbnl_cee_pg_fill(skb, netdev, 0);
  1003. if (err)
  1004. goto nla_put_failure;
  1005. }
  1006. /* local pfc */
  1007. if (ops->getpfccfg) {
  1008. struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
  1009. if (!pfc_nest)
  1010. goto nla_put_failure;
  1011. for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
  1012. ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
  1013. if (nla_put_u8(skb, i, value))
  1014. goto nla_put_failure;
  1015. }
  1016. nla_nest_end(skb, pfc_nest);
  1017. }
  1018. /* local app */
  1019. spin_lock(&dcb_lock);
  1020. app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
  1021. if (!app)
  1022. goto dcb_unlock;
  1023. list_for_each_entry(itr, &dcb_app_list, list) {
  1024. if (itr->ifindex == netdev->ifindex) {
  1025. struct nlattr *app_nest = nla_nest_start(skb,
  1026. DCB_ATTR_APP);
  1027. if (!app_nest)
  1028. goto dcb_unlock;
  1029. err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
  1030. itr->app.selector);
  1031. if (err)
  1032. goto dcb_unlock;
  1033. err = nla_put_u16(skb, DCB_APP_ATTR_ID,
  1034. itr->app.protocol);
  1035. if (err)
  1036. goto dcb_unlock;
  1037. err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
  1038. itr->app.priority);
  1039. if (err)
  1040. goto dcb_unlock;
  1041. nla_nest_end(skb, app_nest);
  1042. }
  1043. }
  1044. nla_nest_end(skb, app);
  1045. if (netdev->dcbnl_ops->getdcbx)
  1046. dcbx = netdev->dcbnl_ops->getdcbx(netdev);
  1047. else
  1048. dcbx = -EOPNOTSUPP;
  1049. spin_unlock(&dcb_lock);
  1050. /* features flags */
  1051. if (ops->getfeatcfg) {
  1052. struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
  1053. if (!feat)
  1054. goto nla_put_failure;
  1055. for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
  1056. i++)
  1057. if (!ops->getfeatcfg(netdev, i, &value) &&
  1058. nla_put_u8(skb, i, value))
  1059. goto nla_put_failure;
  1060. nla_nest_end(skb, feat);
  1061. }
  1062. /* peer info if available */
  1063. if (ops->cee_peer_getpg) {
  1064. struct cee_pg pg;
  1065. err = ops->cee_peer_getpg(netdev, &pg);
  1066. if (!err &&
  1067. nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
  1068. goto nla_put_failure;
  1069. }
  1070. if (ops->cee_peer_getpfc) {
  1071. struct cee_pfc pfc;
  1072. err = ops->cee_peer_getpfc(netdev, &pfc);
  1073. if (!err &&
  1074. nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
  1075. goto nla_put_failure;
  1076. }
  1077. if (ops->peer_getappinfo && ops->peer_getapptable) {
  1078. err = dcbnl_build_peer_app(netdev, skb,
  1079. DCB_ATTR_CEE_PEER_APP_TABLE,
  1080. DCB_ATTR_CEE_PEER_APP_INFO,
  1081. DCB_ATTR_CEE_PEER_APP);
  1082. if (err)
  1083. goto nla_put_failure;
  1084. }
  1085. nla_nest_end(skb, cee);
  1086. /* DCBX state */
  1087. if (dcbx >= 0) {
  1088. err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
  1089. if (err)
  1090. goto nla_put_failure;
  1091. }
  1092. return 0;
  1093. dcb_unlock:
  1094. spin_unlock(&dcb_lock);
  1095. nla_put_failure:
  1096. return err;
  1097. }
  1098. static int dcbnl_notify(struct net_device *dev, int event, int cmd,
  1099. u32 seq, u32 pid, int dcbx_ver)
  1100. {
  1101. struct net *net = dev_net(dev);
  1102. struct sk_buff *skb;
  1103. struct nlmsghdr *nlh;
  1104. const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
  1105. int err;
  1106. if (!ops)
  1107. return -EOPNOTSUPP;
  1108. skb = dcbnl_newmsg(event, cmd, pid, seq, 0, &nlh);
  1109. if (!skb)
  1110. return -ENOBUFS;
  1111. if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
  1112. err = dcbnl_ieee_fill(skb, dev);
  1113. else
  1114. err = dcbnl_cee_fill(skb, dev);
  1115. if (err < 0) {
  1116. /* Report error to broadcast listeners */
  1117. nlmsg_free(skb);
  1118. rtnl_set_sk_err(net, RTNLGRP_DCB, err);
  1119. } else {
  1120. /* End nlmsg and notify broadcast listeners */
  1121. nlmsg_end(skb, nlh);
  1122. rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
  1123. }
  1124. return err;
  1125. }
  1126. int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
  1127. u32 seq, u32 pid)
  1128. {
  1129. return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE);
  1130. }
  1131. EXPORT_SYMBOL(dcbnl_ieee_notify);
  1132. int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
  1133. u32 seq, u32 pid)
  1134. {
  1135. return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE);
  1136. }
  1137. EXPORT_SYMBOL(dcbnl_cee_notify);
  1138. /* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
  1139. * be completed the entire msg is aborted and error value is returned.
  1140. * No attempt is made to reconcile the case where only part of the
  1141. * cmd can be completed.
  1142. */
  1143. static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
  1144. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1145. {
  1146. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1147. struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
  1148. int err;
  1149. if (!ops)
  1150. return -EOPNOTSUPP;
  1151. if (!tb[DCB_ATTR_IEEE])
  1152. return -EINVAL;
  1153. err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
  1154. tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
  1155. if (err)
  1156. return err;
  1157. if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
  1158. struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
  1159. err = ops->ieee_setets(netdev, ets);
  1160. if (err)
  1161. goto err;
  1162. }
  1163. if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
  1164. struct ieee_maxrate *maxrate =
  1165. nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
  1166. err = ops->ieee_setmaxrate(netdev, maxrate);
  1167. if (err)
  1168. goto err;
  1169. }
  1170. if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
  1171. struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
  1172. err = ops->ieee_setpfc(netdev, pfc);
  1173. if (err)
  1174. goto err;
  1175. }
  1176. if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
  1177. struct nlattr *attr;
  1178. int rem;
  1179. nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
  1180. struct dcb_app *app_data;
  1181. if (nla_type(attr) != DCB_ATTR_IEEE_APP)
  1182. continue;
  1183. app_data = nla_data(attr);
  1184. if (ops->ieee_setapp)
  1185. err = ops->ieee_setapp(netdev, app_data);
  1186. else
  1187. err = dcb_ieee_setapp(netdev, app_data);
  1188. if (err)
  1189. goto err;
  1190. }
  1191. }
  1192. err:
  1193. err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
  1194. dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
  1195. return err;
  1196. }
  1197. static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
  1198. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1199. {
  1200. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1201. if (!ops)
  1202. return -EOPNOTSUPP;
  1203. return dcbnl_ieee_fill(skb, netdev);
  1204. }
  1205. static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
  1206. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1207. {
  1208. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1209. struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
  1210. int err;
  1211. if (!ops)
  1212. return -EOPNOTSUPP;
  1213. if (!tb[DCB_ATTR_IEEE])
  1214. return -EINVAL;
  1215. err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
  1216. tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
  1217. if (err)
  1218. return err;
  1219. if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
  1220. struct nlattr *attr;
  1221. int rem;
  1222. nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
  1223. struct dcb_app *app_data;
  1224. if (nla_type(attr) != DCB_ATTR_IEEE_APP)
  1225. continue;
  1226. app_data = nla_data(attr);
  1227. if (ops->ieee_delapp)
  1228. err = ops->ieee_delapp(netdev, app_data);
  1229. else
  1230. err = dcb_ieee_delapp(netdev, app_data);
  1231. if (err)
  1232. goto err;
  1233. }
  1234. }
  1235. err:
  1236. err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
  1237. dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
  1238. return err;
  1239. }
  1240. /* DCBX configuration */
  1241. static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
  1242. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1243. {
  1244. if (!netdev->dcbnl_ops->getdcbx)
  1245. return -EOPNOTSUPP;
  1246. return nla_put_u8(skb, DCB_ATTR_DCBX,
  1247. netdev->dcbnl_ops->getdcbx(netdev));
  1248. }
  1249. static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
  1250. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1251. {
  1252. u8 value;
  1253. if (!netdev->dcbnl_ops->setdcbx)
  1254. return -EOPNOTSUPP;
  1255. if (!tb[DCB_ATTR_DCBX])
  1256. return -EINVAL;
  1257. value = nla_get_u8(tb[DCB_ATTR_DCBX]);
  1258. return nla_put_u8(skb, DCB_ATTR_DCBX,
  1259. netdev->dcbnl_ops->setdcbx(netdev, value));
  1260. }
  1261. static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  1262. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1263. {
  1264. struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
  1265. u8 value;
  1266. int ret, i;
  1267. int getall = 0;
  1268. if (!netdev->dcbnl_ops->getfeatcfg)
  1269. return -EOPNOTSUPP;
  1270. if (!tb[DCB_ATTR_FEATCFG])
  1271. return -EINVAL;
  1272. ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
  1273. dcbnl_featcfg_nest);
  1274. if (ret)
  1275. return ret;
  1276. nest = nla_nest_start(skb, DCB_ATTR_FEATCFG);
  1277. if (!nest)
  1278. return -EMSGSIZE;
  1279. if (data[DCB_FEATCFG_ATTR_ALL])
  1280. getall = 1;
  1281. for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
  1282. if (!getall && !data[i])
  1283. continue;
  1284. ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
  1285. if (!ret)
  1286. ret = nla_put_u8(skb, i, value);
  1287. if (ret) {
  1288. nla_nest_cancel(skb, nest);
  1289. goto nla_put_failure;
  1290. }
  1291. }
  1292. nla_nest_end(skb, nest);
  1293. nla_put_failure:
  1294. return ret;
  1295. }
  1296. static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  1297. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1298. {
  1299. struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
  1300. int ret, i;
  1301. u8 value;
  1302. if (!netdev->dcbnl_ops->setfeatcfg)
  1303. return -ENOTSUPP;
  1304. if (!tb[DCB_ATTR_FEATCFG])
  1305. return -EINVAL;
  1306. ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
  1307. dcbnl_featcfg_nest);
  1308. if (ret)
  1309. goto err;
  1310. for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
  1311. if (data[i] == NULL)
  1312. continue;
  1313. value = nla_get_u8(data[i]);
  1314. ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
  1315. if (ret)
  1316. goto err;
  1317. }
  1318. err:
  1319. ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
  1320. return ret;
  1321. }
  1322. /* Handle CEE DCBX GET commands. */
  1323. static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
  1324. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1325. {
  1326. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1327. if (!ops)
  1328. return -EOPNOTSUPP;
  1329. return dcbnl_cee_fill(skb, netdev);
  1330. }
  1331. struct reply_func {
  1332. /* reply netlink message type */
  1333. int type;
  1334. /* function to fill message contents */
  1335. int (*cb)(struct net_device *, struct nlmsghdr *, u32,
  1336. struct nlattr **, struct sk_buff *);
  1337. };
  1338. static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
  1339. [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate },
  1340. [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate },
  1341. [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg },
  1342. [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg },
  1343. [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr },
  1344. [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap },
  1345. [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs },
  1346. [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs },
  1347. [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate },
  1348. [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate },
  1349. [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp },
  1350. [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp },
  1351. [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg },
  1352. [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg },
  1353. [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg },
  1354. [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg },
  1355. [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall },
  1356. [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg },
  1357. [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg },
  1358. [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get },
  1359. [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set },
  1360. [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del },
  1361. [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx },
  1362. [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx },
  1363. [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg },
  1364. [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg },
  1365. [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get },
  1366. };
  1367. static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
  1368. {
  1369. struct net *net = sock_net(skb->sk);
  1370. struct net_device *netdev;
  1371. struct dcbmsg *dcb = (struct dcbmsg *)NLMSG_DATA(nlh);
  1372. struct nlattr *tb[DCB_ATTR_MAX + 1];
  1373. u32 pid = skb ? NETLINK_CB(skb).pid : 0;
  1374. int ret = -EINVAL;
  1375. struct sk_buff *reply_skb;
  1376. struct nlmsghdr *reply_nlh;
  1377. const struct reply_func *fn;
  1378. if (!net_eq(net, &init_net))
  1379. return -EINVAL;
  1380. ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
  1381. dcbnl_rtnl_policy);
  1382. if (ret < 0)
  1383. return ret;
  1384. if (dcb->cmd > DCB_CMD_MAX)
  1385. return -EINVAL;
  1386. /* check if a reply function has been defined for the command */
  1387. fn = &reply_funcs[dcb->cmd];
  1388. if (!fn->cb)
  1389. return -EOPNOTSUPP;
  1390. if (!tb[DCB_ATTR_IFNAME])
  1391. return -EINVAL;
  1392. netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
  1393. if (!netdev)
  1394. return -ENODEV;
  1395. if (!netdev->dcbnl_ops) {
  1396. ret = -EOPNOTSUPP;
  1397. goto out;
  1398. }
  1399. reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, pid, nlh->nlmsg_seq,
  1400. nlh->nlmsg_flags, &reply_nlh);
  1401. if (!reply_skb) {
  1402. ret = -ENOBUFS;
  1403. goto out;
  1404. }
  1405. ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
  1406. if (ret < 0) {
  1407. nlmsg_free(reply_skb);
  1408. goto out;
  1409. }
  1410. nlmsg_end(reply_skb, reply_nlh);
  1411. ret = rtnl_unicast(reply_skb, &init_net, pid);
  1412. out:
  1413. dev_put(netdev);
  1414. return ret;
  1415. }
  1416. static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
  1417. int ifindex, int prio)
  1418. {
  1419. struct dcb_app_type *itr;
  1420. list_for_each_entry(itr, &dcb_app_list, list) {
  1421. if (itr->app.selector == app->selector &&
  1422. itr->app.protocol == app->protocol &&
  1423. itr->ifindex == ifindex &&
  1424. (!prio || itr->app.priority == prio))
  1425. return itr;
  1426. }
  1427. return NULL;
  1428. }
  1429. static int dcb_app_add(const struct dcb_app *app, int ifindex)
  1430. {
  1431. struct dcb_app_type *entry;
  1432. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  1433. if (!entry)
  1434. return -ENOMEM;
  1435. memcpy(&entry->app, app, sizeof(*app));
  1436. entry->ifindex = ifindex;
  1437. list_add(&entry->list, &dcb_app_list);
  1438. return 0;
  1439. }
  1440. /**
  1441. * dcb_getapp - retrieve the DCBX application user priority
  1442. *
  1443. * On success returns a non-zero 802.1p user priority bitmap
  1444. * otherwise returns 0 as the invalid user priority bitmap to
  1445. * indicate an error.
  1446. */
  1447. u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
  1448. {
  1449. struct dcb_app_type *itr;
  1450. u8 prio = 0;
  1451. spin_lock(&dcb_lock);
  1452. if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
  1453. prio = itr->app.priority;
  1454. spin_unlock(&dcb_lock);
  1455. return prio;
  1456. }
  1457. EXPORT_SYMBOL(dcb_getapp);
  1458. /**
  1459. * dcb_setapp - add CEE dcb application data to app list
  1460. *
  1461. * Priority 0 is an invalid priority in CEE spec. This routine
  1462. * removes applications from the app list if the priority is
  1463. * set to zero.
  1464. */
  1465. int dcb_setapp(struct net_device *dev, struct dcb_app *new)
  1466. {
  1467. struct dcb_app_type *itr;
  1468. struct dcb_app_type event;
  1469. int err = 0;
  1470. event.ifindex = dev->ifindex;
  1471. memcpy(&event.app, new, sizeof(event.app));
  1472. if (dev->dcbnl_ops->getdcbx)
  1473. event.dcbx = dev->dcbnl_ops->getdcbx(dev);
  1474. spin_lock(&dcb_lock);
  1475. /* Search for existing match and replace */
  1476. if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
  1477. if (new->priority)
  1478. itr->app.priority = new->priority;
  1479. else {
  1480. list_del(&itr->list);
  1481. kfree(itr);
  1482. }
  1483. goto out;
  1484. }
  1485. /* App type does not exist add new application type */
  1486. if (new->priority)
  1487. err = dcb_app_add(new, dev->ifindex);
  1488. out:
  1489. spin_unlock(&dcb_lock);
  1490. if (!err)
  1491. call_dcbevent_notifiers(DCB_APP_EVENT, &event);
  1492. return err;
  1493. }
  1494. EXPORT_SYMBOL(dcb_setapp);
  1495. /**
  1496. * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
  1497. *
  1498. * Helper routine which on success returns a non-zero 802.1Qaz user
  1499. * priority bitmap otherwise returns 0 to indicate the dcb_app was
  1500. * not found in APP list.
  1501. */
  1502. u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
  1503. {
  1504. struct dcb_app_type *itr;
  1505. u8 prio = 0;
  1506. spin_lock(&dcb_lock);
  1507. if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
  1508. prio |= 1 << itr->app.priority;
  1509. spin_unlock(&dcb_lock);
  1510. return prio;
  1511. }
  1512. EXPORT_SYMBOL(dcb_ieee_getapp_mask);
  1513. /**
  1514. * dcb_ieee_setapp - add IEEE dcb application data to app list
  1515. *
  1516. * This adds Application data to the list. Multiple application
  1517. * entries may exists for the same selector and protocol as long
  1518. * as the priorities are different.
  1519. */
  1520. int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
  1521. {
  1522. struct dcb_app_type event;
  1523. int err = 0;
  1524. event.ifindex = dev->ifindex;
  1525. memcpy(&event.app, new, sizeof(event.app));
  1526. if (dev->dcbnl_ops->getdcbx)
  1527. event.dcbx = dev->dcbnl_ops->getdcbx(dev);
  1528. spin_lock(&dcb_lock);
  1529. /* Search for existing match and abort if found */
  1530. if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
  1531. err = -EEXIST;
  1532. goto out;
  1533. }
  1534. err = dcb_app_add(new, dev->ifindex);
  1535. out:
  1536. spin_unlock(&dcb_lock);
  1537. if (!err)
  1538. call_dcbevent_notifiers(DCB_APP_EVENT, &event);
  1539. return err;
  1540. }
  1541. EXPORT_SYMBOL(dcb_ieee_setapp);
  1542. /**
  1543. * dcb_ieee_delapp - delete IEEE dcb application data from list
  1544. *
  1545. * This removes a matching APP data from the APP list
  1546. */
  1547. int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
  1548. {
  1549. struct dcb_app_type *itr;
  1550. struct dcb_app_type event;
  1551. int err = -ENOENT;
  1552. event.ifindex = dev->ifindex;
  1553. memcpy(&event.app, del, sizeof(event.app));
  1554. if (dev->dcbnl_ops->getdcbx)
  1555. event.dcbx = dev->dcbnl_ops->getdcbx(dev);
  1556. spin_lock(&dcb_lock);
  1557. /* Search for existing match and remove it. */
  1558. if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
  1559. list_del(&itr->list);
  1560. kfree(itr);
  1561. err = 0;
  1562. }
  1563. spin_unlock(&dcb_lock);
  1564. if (!err)
  1565. call_dcbevent_notifiers(DCB_APP_EVENT, &event);
  1566. return err;
  1567. }
  1568. EXPORT_SYMBOL(dcb_ieee_delapp);
  1569. static void dcb_flushapp(void)
  1570. {
  1571. struct dcb_app_type *app;
  1572. struct dcb_app_type *tmp;
  1573. spin_lock(&dcb_lock);
  1574. list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
  1575. list_del(&app->list);
  1576. kfree(app);
  1577. }
  1578. spin_unlock(&dcb_lock);
  1579. }
  1580. static int __init dcbnl_init(void)
  1581. {
  1582. INIT_LIST_HEAD(&dcb_app_list);
  1583. rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL);
  1584. rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL);
  1585. return 0;
  1586. }
  1587. module_init(dcbnl_init);
  1588. static void __exit dcbnl_exit(void)
  1589. {
  1590. rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
  1591. rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
  1592. dcb_flushapp();
  1593. }
  1594. module_exit(dcbnl_exit);