dcbnl.c 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293
  1. /*
  2. * Copyright (c) 2008-2011, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  15. * Place - Suite 330, Boston, MA 02111-1307 USA.
  16. *
  17. * Author: Lucy Liu <lucy.liu@intel.com>
  18. */
  19. #include <linux/netdevice.h>
  20. #include <linux/netlink.h>
  21. #include <linux/slab.h>
  22. #include <net/netlink.h>
  23. #include <net/rtnetlink.h>
  24. #include <linux/dcbnl.h>
  25. #include <net/dcbevent.h>
  26. #include <linux/rtnetlink.h>
  27. #include <linux/module.h>
  28. #include <net/sock.h>
  29. /**
  30. * Data Center Bridging (DCB) is a collection of Ethernet enhancements
  31. * intended to allow network traffic with differing requirements
  32. * (highly reliable, no drops vs. best effort vs. low latency) to operate
  33. * and co-exist on Ethernet. Current DCB features are:
  34. *
  35. * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
  36. * framework for assigning bandwidth guarantees to traffic classes.
  37. *
  38. * Priority-based Flow Control (PFC) - provides a flow control mechanism which
  39. * can work independently for each 802.1p priority.
  40. *
  41. * Congestion Notification - provides a mechanism for end-to-end congestion
  42. * control for protocols which do not have built-in congestion management.
  43. *
  44. * More information about the emerging standards for these Ethernet features
  45. * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
  46. *
  47. * This file implements an rtnetlink interface to allow configuration of DCB
  48. * features for capable devices.
  49. */
  50. MODULE_AUTHOR("Lucy Liu, <lucy.liu@intel.com>");
  51. MODULE_DESCRIPTION("Data Center Bridging netlink interface");
  52. MODULE_LICENSE("GPL");
  53. /**************** DCB attribute policies *************************************/
  54. /* DCB netlink attributes policy */
  55. static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
  56. [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
  57. [DCB_ATTR_STATE] = {.type = NLA_U8},
  58. [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
  59. [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
  60. [DCB_ATTR_SET_ALL] = {.type = NLA_U8},
  61. [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
  62. [DCB_ATTR_CAP] = {.type = NLA_NESTED},
  63. [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
  64. [DCB_ATTR_BCN] = {.type = NLA_NESTED},
  65. [DCB_ATTR_APP] = {.type = NLA_NESTED},
  66. [DCB_ATTR_IEEE] = {.type = NLA_NESTED},
  67. [DCB_ATTR_DCBX] = {.type = NLA_U8},
  68. [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
  69. };
  70. /* DCB priority flow control to User Priority nested attributes */
  71. static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
  72. [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
  73. [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
  74. [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
  75. [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
  76. [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
  77. [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
  78. [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
  79. [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
  80. [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
  81. };
  82. /* DCB priority grouping nested attributes */
  83. static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
  84. [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
  85. [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
  86. [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
  87. [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
  88. [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
  89. [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
  90. [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
  91. [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
  92. [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
  93. [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
  94. [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
  95. [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
  96. [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
  97. [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
  98. [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
  99. [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
  100. [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
  101. [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
  102. };
  103. /* DCB traffic class nested attributes. */
  104. static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
  105. [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
  106. [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
  107. [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
  108. [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
  109. [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
  110. };
  111. /* DCB capabilities nested attributes. */
  112. static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
  113. [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
  114. [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
  115. [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
  116. [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
  117. [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
  118. [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
  119. [DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
  120. [DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
  121. [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
  122. };
  123. /* DCB capabilities nested attributes. */
  124. static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
  125. [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
  126. [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
  127. [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
  128. };
  129. /* DCB BCN nested attributes. */
  130. static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
  131. [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
  132. [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
  133. [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
  134. [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
  135. [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
  136. [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
  137. [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
  138. [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
  139. [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
  140. [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
  141. [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
  142. [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
  143. [DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
  144. [DCB_BCN_ATTR_GD] = {.type = NLA_U32},
  145. [DCB_BCN_ATTR_GI] = {.type = NLA_U32},
  146. [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
  147. [DCB_BCN_ATTR_TD] = {.type = NLA_U32},
  148. [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
  149. [DCB_BCN_ATTR_W] = {.type = NLA_U32},
  150. [DCB_BCN_ATTR_RD] = {.type = NLA_U32},
  151. [DCB_BCN_ATTR_RU] = {.type = NLA_U32},
  152. [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
  153. [DCB_BCN_ATTR_RI] = {.type = NLA_U32},
  154. [DCB_BCN_ATTR_C] = {.type = NLA_U32},
  155. [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
  156. };
  157. /* DCB APP nested attributes. */
  158. static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
  159. [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
  160. [DCB_APP_ATTR_ID] = {.type = NLA_U16},
  161. [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
  162. };
  163. /* IEEE 802.1Qaz nested attributes. */
  164. static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
  165. [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
  166. [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
  167. [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
  168. [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
  169. };
  170. static const struct nla_policy dcbnl_ieee_app[DCB_ATTR_IEEE_APP_MAX + 1] = {
  171. [DCB_ATTR_IEEE_APP] = {.len = sizeof(struct dcb_app)},
  172. };
  173. /* DCB number of traffic classes nested attributes. */
  174. static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
  175. [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
  176. [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
  177. [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
  178. [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
  179. };
  180. static LIST_HEAD(dcb_app_list);
  181. static DEFINE_SPINLOCK(dcb_lock);
  182. /* standard netlink reply call */
  183. static int dcbnl_reply(u8 value, u8 event, u8 cmd, u8 attr, u32 pid,
  184. u32 seq, u16 flags)
  185. {
  186. struct sk_buff *dcbnl_skb;
  187. struct dcbmsg *dcb;
  188. struct nlmsghdr *nlh;
  189. int ret = -EINVAL;
  190. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  191. if (!dcbnl_skb)
  192. return ret;
  193. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, event, sizeof(*dcb), flags);
  194. dcb = NLMSG_DATA(nlh);
  195. dcb->dcb_family = AF_UNSPEC;
  196. dcb->cmd = cmd;
  197. dcb->dcb_pad = 0;
  198. ret = nla_put_u8(dcbnl_skb, attr, value);
  199. if (ret)
  200. goto err;
  201. /* end the message, assign the nlmsg_len. */
  202. nlmsg_end(dcbnl_skb, nlh);
  203. ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
  204. if (ret)
  205. return -EINVAL;
  206. return 0;
  207. nlmsg_failure:
  208. err:
  209. kfree_skb(dcbnl_skb);
  210. return ret;
  211. }
  212. static int dcbnl_getstate(struct net_device *netdev, struct nlattr **tb,
  213. u32 pid, u32 seq, u16 flags)
  214. {
  215. int ret = -EINVAL;
  216. /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
  217. if (!netdev->dcbnl_ops->getstate)
  218. return ret;
  219. ret = dcbnl_reply(netdev->dcbnl_ops->getstate(netdev), RTM_GETDCB,
  220. DCB_CMD_GSTATE, DCB_ATTR_STATE, pid, seq, flags);
  221. return ret;
  222. }
  223. static int dcbnl_getpfccfg(struct net_device *netdev, struct nlattr **tb,
  224. u32 pid, u32 seq, u16 flags)
  225. {
  226. struct sk_buff *dcbnl_skb;
  227. struct nlmsghdr *nlh;
  228. struct dcbmsg *dcb;
  229. struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
  230. u8 value;
  231. int ret = -EINVAL;
  232. int i;
  233. int getall = 0;
  234. if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->getpfccfg)
  235. return ret;
  236. ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
  237. tb[DCB_ATTR_PFC_CFG],
  238. dcbnl_pfc_up_nest);
  239. if (ret)
  240. goto err_out;
  241. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  242. if (!dcbnl_skb)
  243. goto err_out;
  244. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  245. dcb = NLMSG_DATA(nlh);
  246. dcb->dcb_family = AF_UNSPEC;
  247. dcb->cmd = DCB_CMD_PFC_GCFG;
  248. nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PFC_CFG);
  249. if (!nest)
  250. goto err;
  251. if (data[DCB_PFC_UP_ATTR_ALL])
  252. getall = 1;
  253. for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
  254. if (!getall && !data[i])
  255. continue;
  256. netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
  257. &value);
  258. ret = nla_put_u8(dcbnl_skb, i, value);
  259. if (ret) {
  260. nla_nest_cancel(dcbnl_skb, nest);
  261. goto err;
  262. }
  263. }
  264. nla_nest_end(dcbnl_skb, nest);
  265. nlmsg_end(dcbnl_skb, nlh);
  266. ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
  267. if (ret)
  268. goto err_out;
  269. return 0;
  270. nlmsg_failure:
  271. err:
  272. kfree_skb(dcbnl_skb);
  273. err_out:
  274. return -EINVAL;
  275. }
  276. static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlattr **tb,
  277. u32 pid, u32 seq, u16 flags)
  278. {
  279. struct sk_buff *dcbnl_skb;
  280. struct nlmsghdr *nlh;
  281. struct dcbmsg *dcb;
  282. u8 perm_addr[MAX_ADDR_LEN];
  283. int ret = -EINVAL;
  284. if (!netdev->dcbnl_ops->getpermhwaddr)
  285. return ret;
  286. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  287. if (!dcbnl_skb)
  288. goto err_out;
  289. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  290. dcb = NLMSG_DATA(nlh);
  291. dcb->dcb_family = AF_UNSPEC;
  292. dcb->cmd = DCB_CMD_GPERM_HWADDR;
  293. netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
  294. ret = nla_put(dcbnl_skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr),
  295. perm_addr);
  296. nlmsg_end(dcbnl_skb, nlh);
  297. ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
  298. if (ret)
  299. goto err_out;
  300. return 0;
  301. nlmsg_failure:
  302. kfree_skb(dcbnl_skb);
  303. err_out:
  304. return -EINVAL;
  305. }
  306. static int dcbnl_getcap(struct net_device *netdev, struct nlattr **tb,
  307. u32 pid, u32 seq, u16 flags)
  308. {
  309. struct sk_buff *dcbnl_skb;
  310. struct nlmsghdr *nlh;
  311. struct dcbmsg *dcb;
  312. struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
  313. u8 value;
  314. int ret = -EINVAL;
  315. int i;
  316. int getall = 0;
  317. if (!tb[DCB_ATTR_CAP] || !netdev->dcbnl_ops->getcap)
  318. return ret;
  319. ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
  320. dcbnl_cap_nest);
  321. if (ret)
  322. goto err_out;
  323. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  324. if (!dcbnl_skb)
  325. goto err_out;
  326. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  327. dcb = NLMSG_DATA(nlh);
  328. dcb->dcb_family = AF_UNSPEC;
  329. dcb->cmd = DCB_CMD_GCAP;
  330. nest = nla_nest_start(dcbnl_skb, DCB_ATTR_CAP);
  331. if (!nest)
  332. goto err;
  333. if (data[DCB_CAP_ATTR_ALL])
  334. getall = 1;
  335. for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
  336. if (!getall && !data[i])
  337. continue;
  338. if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
  339. ret = nla_put_u8(dcbnl_skb, i, value);
  340. if (ret) {
  341. nla_nest_cancel(dcbnl_skb, nest);
  342. goto err;
  343. }
  344. }
  345. }
  346. nla_nest_end(dcbnl_skb, nest);
  347. nlmsg_end(dcbnl_skb, nlh);
  348. ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
  349. if (ret)
  350. goto err_out;
  351. return 0;
  352. nlmsg_failure:
  353. err:
  354. kfree_skb(dcbnl_skb);
  355. err_out:
  356. return -EINVAL;
  357. }
  358. static int dcbnl_getnumtcs(struct net_device *netdev, struct nlattr **tb,
  359. u32 pid, u32 seq, u16 flags)
  360. {
  361. struct sk_buff *dcbnl_skb;
  362. struct nlmsghdr *nlh;
  363. struct dcbmsg *dcb;
  364. struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
  365. u8 value;
  366. int ret = -EINVAL;
  367. int i;
  368. int getall = 0;
  369. if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->getnumtcs)
  370. return ret;
  371. ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
  372. dcbnl_numtcs_nest);
  373. if (ret) {
  374. ret = -EINVAL;
  375. goto err_out;
  376. }
  377. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  378. if (!dcbnl_skb) {
  379. ret = -EINVAL;
  380. goto err_out;
  381. }
  382. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  383. dcb = NLMSG_DATA(nlh);
  384. dcb->dcb_family = AF_UNSPEC;
  385. dcb->cmd = DCB_CMD_GNUMTCS;
  386. nest = nla_nest_start(dcbnl_skb, DCB_ATTR_NUMTCS);
  387. if (!nest) {
  388. ret = -EINVAL;
  389. goto err;
  390. }
  391. if (data[DCB_NUMTCS_ATTR_ALL])
  392. getall = 1;
  393. for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
  394. if (!getall && !data[i])
  395. continue;
  396. ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
  397. if (!ret) {
  398. ret = nla_put_u8(dcbnl_skb, i, value);
  399. if (ret) {
  400. nla_nest_cancel(dcbnl_skb, nest);
  401. ret = -EINVAL;
  402. goto err;
  403. }
  404. } else {
  405. goto err;
  406. }
  407. }
  408. nla_nest_end(dcbnl_skb, nest);
  409. nlmsg_end(dcbnl_skb, nlh);
  410. ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
  411. if (ret) {
  412. ret = -EINVAL;
  413. goto err_out;
  414. }
  415. return 0;
  416. nlmsg_failure:
  417. err:
  418. kfree_skb(dcbnl_skb);
  419. err_out:
  420. return ret;
  421. }
  422. static int dcbnl_setnumtcs(struct net_device *netdev, struct nlattr **tb,
  423. u32 pid, u32 seq, u16 flags)
  424. {
  425. struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
  426. int ret = -EINVAL;
  427. u8 value;
  428. int i;
  429. if (!tb[DCB_ATTR_NUMTCS] || !netdev->dcbnl_ops->setnumtcs)
  430. return ret;
  431. ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
  432. dcbnl_numtcs_nest);
  433. if (ret) {
  434. ret = -EINVAL;
  435. goto err;
  436. }
  437. for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
  438. if (data[i] == NULL)
  439. continue;
  440. value = nla_get_u8(data[i]);
  441. ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
  442. if (ret)
  443. goto operr;
  444. }
  445. operr:
  446. ret = dcbnl_reply(!!ret, RTM_SETDCB, DCB_CMD_SNUMTCS,
  447. DCB_ATTR_NUMTCS, pid, seq, flags);
  448. err:
  449. return ret;
  450. }
  451. static int dcbnl_getpfcstate(struct net_device *netdev, struct nlattr **tb,
  452. u32 pid, u32 seq, u16 flags)
  453. {
  454. int ret = -EINVAL;
  455. if (!netdev->dcbnl_ops->getpfcstate)
  456. return ret;
  457. ret = dcbnl_reply(netdev->dcbnl_ops->getpfcstate(netdev), RTM_GETDCB,
  458. DCB_CMD_PFC_GSTATE, DCB_ATTR_PFC_STATE,
  459. pid, seq, flags);
  460. return ret;
  461. }
  462. static int dcbnl_setpfcstate(struct net_device *netdev, struct nlattr **tb,
  463. u32 pid, u32 seq, u16 flags)
  464. {
  465. int ret = -EINVAL;
  466. u8 value;
  467. if (!tb[DCB_ATTR_PFC_STATE] || !netdev->dcbnl_ops->setpfcstate)
  468. return ret;
  469. value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
  470. netdev->dcbnl_ops->setpfcstate(netdev, value);
  471. ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SSTATE, DCB_ATTR_PFC_STATE,
  472. pid, seq, flags);
  473. return ret;
  474. }
  475. static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb,
  476. u32 pid, u32 seq, u16 flags)
  477. {
  478. struct sk_buff *dcbnl_skb;
  479. struct nlmsghdr *nlh;
  480. struct dcbmsg *dcb;
  481. struct nlattr *app_nest;
  482. struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
  483. u16 id;
  484. u8 up, idtype;
  485. int ret = -EINVAL;
  486. if (!tb[DCB_ATTR_APP])
  487. goto out;
  488. ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
  489. dcbnl_app_nest);
  490. if (ret)
  491. goto out;
  492. ret = -EINVAL;
  493. /* all must be non-null */
  494. if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
  495. (!app_tb[DCB_APP_ATTR_ID]))
  496. goto out;
  497. /* either by eth type or by socket number */
  498. idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
  499. if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
  500. (idtype != DCB_APP_IDTYPE_PORTNUM))
  501. goto out;
  502. id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
  503. if (netdev->dcbnl_ops->getapp) {
  504. up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
  505. } else {
  506. struct dcb_app app = {
  507. .selector = idtype,
  508. .protocol = id,
  509. };
  510. up = dcb_getapp(netdev, &app);
  511. }
  512. /* send this back */
  513. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  514. if (!dcbnl_skb)
  515. goto out;
  516. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  517. dcb = NLMSG_DATA(nlh);
  518. dcb->dcb_family = AF_UNSPEC;
  519. dcb->cmd = DCB_CMD_GAPP;
  520. app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP);
  521. if (!app_nest)
  522. goto out_cancel;
  523. ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype);
  524. if (ret)
  525. goto out_cancel;
  526. ret = nla_put_u16(dcbnl_skb, DCB_APP_ATTR_ID, id);
  527. if (ret)
  528. goto out_cancel;
  529. ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_PRIORITY, up);
  530. if (ret)
  531. goto out_cancel;
  532. nla_nest_end(dcbnl_skb, app_nest);
  533. nlmsg_end(dcbnl_skb, nlh);
  534. ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
  535. if (ret)
  536. goto nlmsg_failure;
  537. goto out;
  538. out_cancel:
  539. nla_nest_cancel(dcbnl_skb, app_nest);
  540. nlmsg_failure:
  541. kfree_skb(dcbnl_skb);
  542. out:
  543. return ret;
  544. }
  545. static int dcbnl_setapp(struct net_device *netdev, struct nlattr **tb,
  546. u32 pid, u32 seq, u16 flags)
  547. {
  548. int err, ret = -EINVAL;
  549. u16 id;
  550. u8 up, idtype;
  551. struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
  552. if (!tb[DCB_ATTR_APP])
  553. goto out;
  554. ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
  555. dcbnl_app_nest);
  556. if (ret)
  557. goto out;
  558. ret = -EINVAL;
  559. /* all must be non-null */
  560. if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
  561. (!app_tb[DCB_APP_ATTR_ID]) ||
  562. (!app_tb[DCB_APP_ATTR_PRIORITY]))
  563. goto out;
  564. /* either by eth type or by socket number */
  565. idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
  566. if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
  567. (idtype != DCB_APP_IDTYPE_PORTNUM))
  568. goto out;
  569. id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
  570. up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
  571. if (netdev->dcbnl_ops->setapp) {
  572. err = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
  573. } else {
  574. struct dcb_app app;
  575. app.selector = idtype;
  576. app.protocol = id;
  577. app.priority = up;
  578. err = dcb_setapp(netdev, &app);
  579. }
  580. ret = dcbnl_reply(err, RTM_SETDCB, DCB_CMD_SAPP, DCB_ATTR_APP,
  581. pid, seq, flags);
  582. dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
  583. out:
  584. return ret;
  585. }
  586. static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlattr **tb,
  587. u32 pid, u32 seq, u16 flags, int dir)
  588. {
  589. struct sk_buff *dcbnl_skb;
  590. struct nlmsghdr *nlh;
  591. struct dcbmsg *dcb;
  592. struct nlattr *pg_nest, *param_nest, *data;
  593. struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
  594. struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
  595. u8 prio, pgid, tc_pct, up_map;
  596. int ret = -EINVAL;
  597. int getall = 0;
  598. int i;
  599. if (!tb[DCB_ATTR_PG_CFG] ||
  600. !netdev->dcbnl_ops->getpgtccfgtx ||
  601. !netdev->dcbnl_ops->getpgtccfgrx ||
  602. !netdev->dcbnl_ops->getpgbwgcfgtx ||
  603. !netdev->dcbnl_ops->getpgbwgcfgrx)
  604. return ret;
  605. ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
  606. tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
  607. if (ret)
  608. goto err_out;
  609. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  610. if (!dcbnl_skb)
  611. goto err_out;
  612. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  613. dcb = NLMSG_DATA(nlh);
  614. dcb->dcb_family = AF_UNSPEC;
  615. dcb->cmd = (dir) ? DCB_CMD_PGRX_GCFG : DCB_CMD_PGTX_GCFG;
  616. pg_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_PG_CFG);
  617. if (!pg_nest)
  618. goto err;
  619. if (pg_tb[DCB_PG_ATTR_TC_ALL])
  620. getall = 1;
  621. for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
  622. if (!getall && !pg_tb[i])
  623. continue;
  624. if (pg_tb[DCB_PG_ATTR_TC_ALL])
  625. data = pg_tb[DCB_PG_ATTR_TC_ALL];
  626. else
  627. data = pg_tb[i];
  628. ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
  629. data, dcbnl_tc_param_nest);
  630. if (ret)
  631. goto err_pg;
  632. param_nest = nla_nest_start(dcbnl_skb, i);
  633. if (!param_nest)
  634. goto err_pg;
  635. pgid = DCB_ATTR_VALUE_UNDEFINED;
  636. prio = DCB_ATTR_VALUE_UNDEFINED;
  637. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  638. up_map = DCB_ATTR_VALUE_UNDEFINED;
  639. if (dir) {
  640. /* Rx */
  641. netdev->dcbnl_ops->getpgtccfgrx(netdev,
  642. i - DCB_PG_ATTR_TC_0, &prio,
  643. &pgid, &tc_pct, &up_map);
  644. } else {
  645. /* Tx */
  646. netdev->dcbnl_ops->getpgtccfgtx(netdev,
  647. i - DCB_PG_ATTR_TC_0, &prio,
  648. &pgid, &tc_pct, &up_map);
  649. }
  650. if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
  651. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  652. ret = nla_put_u8(dcbnl_skb,
  653. DCB_TC_ATTR_PARAM_PGID, pgid);
  654. if (ret)
  655. goto err_param;
  656. }
  657. if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
  658. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  659. ret = nla_put_u8(dcbnl_skb,
  660. DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
  661. if (ret)
  662. goto err_param;
  663. }
  664. if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
  665. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  666. ret = nla_put_u8(dcbnl_skb,
  667. DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
  668. if (ret)
  669. goto err_param;
  670. }
  671. if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
  672. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  673. ret = nla_put_u8(dcbnl_skb, DCB_TC_ATTR_PARAM_BW_PCT,
  674. tc_pct);
  675. if (ret)
  676. goto err_param;
  677. }
  678. nla_nest_end(dcbnl_skb, param_nest);
  679. }
  680. if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
  681. getall = 1;
  682. else
  683. getall = 0;
  684. for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
  685. if (!getall && !pg_tb[i])
  686. continue;
  687. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  688. if (dir) {
  689. /* Rx */
  690. netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
  691. i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
  692. } else {
  693. /* Tx */
  694. netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
  695. i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
  696. }
  697. ret = nla_put_u8(dcbnl_skb, i, tc_pct);
  698. if (ret)
  699. goto err_pg;
  700. }
  701. nla_nest_end(dcbnl_skb, pg_nest);
  702. nlmsg_end(dcbnl_skb, nlh);
  703. ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
  704. if (ret)
  705. goto err_out;
  706. return 0;
  707. err_param:
  708. nla_nest_cancel(dcbnl_skb, param_nest);
  709. err_pg:
  710. nla_nest_cancel(dcbnl_skb, pg_nest);
  711. nlmsg_failure:
  712. err:
  713. kfree_skb(dcbnl_skb);
  714. err_out:
  715. ret = -EINVAL;
  716. return ret;
  717. }
  718. static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlattr **tb,
  719. u32 pid, u32 seq, u16 flags)
  720. {
  721. return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 0);
  722. }
  723. static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlattr **tb,
  724. u32 pid, u32 seq, u16 flags)
  725. {
  726. return __dcbnl_pg_getcfg(netdev, tb, pid, seq, flags, 1);
  727. }
  728. static int dcbnl_setstate(struct net_device *netdev, struct nlattr **tb,
  729. u32 pid, u32 seq, u16 flags)
  730. {
  731. int ret = -EINVAL;
  732. u8 value;
  733. if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->setstate)
  734. return ret;
  735. value = nla_get_u8(tb[DCB_ATTR_STATE]);
  736. ret = dcbnl_reply(netdev->dcbnl_ops->setstate(netdev, value),
  737. RTM_SETDCB, DCB_CMD_SSTATE, DCB_ATTR_STATE,
  738. pid, seq, flags);
  739. return ret;
  740. }
  741. static int dcbnl_setpfccfg(struct net_device *netdev, struct nlattr **tb,
  742. u32 pid, u32 seq, u16 flags)
  743. {
  744. struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
  745. int i;
  746. int ret = -EINVAL;
  747. u8 value;
  748. if (!tb[DCB_ATTR_PFC_CFG] || !netdev->dcbnl_ops->setpfccfg)
  749. return ret;
  750. ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
  751. tb[DCB_ATTR_PFC_CFG],
  752. dcbnl_pfc_up_nest);
  753. if (ret)
  754. goto err;
  755. for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
  756. if (data[i] == NULL)
  757. continue;
  758. value = nla_get_u8(data[i]);
  759. netdev->dcbnl_ops->setpfccfg(netdev,
  760. data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
  761. }
  762. ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_PFC_SCFG, DCB_ATTR_PFC_CFG,
  763. pid, seq, flags);
  764. err:
  765. return ret;
  766. }
  767. static int dcbnl_setall(struct net_device *netdev, struct nlattr **tb,
  768. u32 pid, u32 seq, u16 flags)
  769. {
  770. int ret = -EINVAL;
  771. if (!tb[DCB_ATTR_SET_ALL] || !netdev->dcbnl_ops->setall)
  772. return ret;
  773. ret = dcbnl_reply(netdev->dcbnl_ops->setall(netdev), RTM_SETDCB,
  774. DCB_CMD_SET_ALL, DCB_ATTR_SET_ALL, pid, seq, flags);
  775. dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
  776. return ret;
  777. }
  778. static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlattr **tb,
  779. u32 pid, u32 seq, u16 flags, int dir)
  780. {
  781. struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
  782. struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
  783. int ret = -EINVAL;
  784. int i;
  785. u8 pgid;
  786. u8 up_map;
  787. u8 prio;
  788. u8 tc_pct;
  789. if (!tb[DCB_ATTR_PG_CFG] ||
  790. !netdev->dcbnl_ops->setpgtccfgtx ||
  791. !netdev->dcbnl_ops->setpgtccfgrx ||
  792. !netdev->dcbnl_ops->setpgbwgcfgtx ||
  793. !netdev->dcbnl_ops->setpgbwgcfgrx)
  794. return ret;
  795. ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX,
  796. tb[DCB_ATTR_PG_CFG], dcbnl_pg_nest);
  797. if (ret)
  798. goto err;
  799. for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
  800. if (!pg_tb[i])
  801. continue;
  802. ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
  803. pg_tb[i], dcbnl_tc_param_nest);
  804. if (ret)
  805. goto err;
  806. pgid = DCB_ATTR_VALUE_UNDEFINED;
  807. prio = DCB_ATTR_VALUE_UNDEFINED;
  808. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  809. up_map = DCB_ATTR_VALUE_UNDEFINED;
  810. if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
  811. prio =
  812. nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
  813. if (param_tb[DCB_TC_ATTR_PARAM_PGID])
  814. pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
  815. if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
  816. tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
  817. if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
  818. up_map =
  819. nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
  820. /* dir: Tx = 0, Rx = 1 */
  821. if (dir) {
  822. /* Rx */
  823. netdev->dcbnl_ops->setpgtccfgrx(netdev,
  824. i - DCB_PG_ATTR_TC_0,
  825. prio, pgid, tc_pct, up_map);
  826. } else {
  827. /* Tx */
  828. netdev->dcbnl_ops->setpgtccfgtx(netdev,
  829. i - DCB_PG_ATTR_TC_0,
  830. prio, pgid, tc_pct, up_map);
  831. }
  832. }
  833. for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
  834. if (!pg_tb[i])
  835. continue;
  836. tc_pct = nla_get_u8(pg_tb[i]);
  837. /* dir: Tx = 0, Rx = 1 */
  838. if (dir) {
  839. /* Rx */
  840. netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
  841. i - DCB_PG_ATTR_BW_ID_0, tc_pct);
  842. } else {
  843. /* Tx */
  844. netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
  845. i - DCB_PG_ATTR_BW_ID_0, tc_pct);
  846. }
  847. }
  848. ret = dcbnl_reply(0, RTM_SETDCB,
  849. (dir ? DCB_CMD_PGRX_SCFG : DCB_CMD_PGTX_SCFG),
  850. DCB_ATTR_PG_CFG, pid, seq, flags);
  851. err:
  852. return ret;
  853. }
  854. static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlattr **tb,
  855. u32 pid, u32 seq, u16 flags)
  856. {
  857. return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 0);
  858. }
  859. static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlattr **tb,
  860. u32 pid, u32 seq, u16 flags)
  861. {
  862. return __dcbnl_pg_setcfg(netdev, tb, pid, seq, flags, 1);
  863. }
  864. static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlattr **tb,
  865. u32 pid, u32 seq, u16 flags)
  866. {
  867. struct sk_buff *dcbnl_skb;
  868. struct nlmsghdr *nlh;
  869. struct dcbmsg *dcb;
  870. struct nlattr *bcn_nest;
  871. struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
  872. u8 value_byte;
  873. u32 value_integer;
  874. int ret = -EINVAL;
  875. bool getall = false;
  876. int i;
  877. if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->getbcnrp ||
  878. !netdev->dcbnl_ops->getbcncfg)
  879. return ret;
  880. ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX,
  881. tb[DCB_ATTR_BCN], dcbnl_bcn_nest);
  882. if (ret)
  883. goto err_out;
  884. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  885. if (!dcbnl_skb)
  886. goto err_out;
  887. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  888. dcb = NLMSG_DATA(nlh);
  889. dcb->dcb_family = AF_UNSPEC;
  890. dcb->cmd = DCB_CMD_BCN_GCFG;
  891. bcn_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_BCN);
  892. if (!bcn_nest)
  893. goto err;
  894. if (bcn_tb[DCB_BCN_ATTR_ALL])
  895. getall = true;
  896. for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
  897. if (!getall && !bcn_tb[i])
  898. continue;
  899. netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
  900. &value_byte);
  901. ret = nla_put_u8(dcbnl_skb, i, value_byte);
  902. if (ret)
  903. goto err_bcn;
  904. }
  905. for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
  906. if (!getall && !bcn_tb[i])
  907. continue;
  908. netdev->dcbnl_ops->getbcncfg(netdev, i,
  909. &value_integer);
  910. ret = nla_put_u32(dcbnl_skb, i, value_integer);
  911. if (ret)
  912. goto err_bcn;
  913. }
  914. nla_nest_end(dcbnl_skb, bcn_nest);
  915. nlmsg_end(dcbnl_skb, nlh);
  916. ret = rtnl_unicast(dcbnl_skb, &init_net, pid);
  917. if (ret)
  918. goto err_out;
  919. return 0;
  920. err_bcn:
  921. nla_nest_cancel(dcbnl_skb, bcn_nest);
  922. nlmsg_failure:
  923. err:
  924. kfree_skb(dcbnl_skb);
  925. err_out:
  926. ret = -EINVAL;
  927. return ret;
  928. }
  929. static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlattr **tb,
  930. u32 pid, u32 seq, u16 flags)
  931. {
  932. struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
  933. int i;
  934. int ret = -EINVAL;
  935. u8 value_byte;
  936. u32 value_int;
  937. if (!tb[DCB_ATTR_BCN] || !netdev->dcbnl_ops->setbcncfg ||
  938. !netdev->dcbnl_ops->setbcnrp)
  939. return ret;
  940. ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX,
  941. tb[DCB_ATTR_BCN],
  942. dcbnl_pfc_up_nest);
  943. if (ret)
  944. goto err;
  945. for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
  946. if (data[i] == NULL)
  947. continue;
  948. value_byte = nla_get_u8(data[i]);
  949. netdev->dcbnl_ops->setbcnrp(netdev,
  950. data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
  951. }
  952. for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
  953. if (data[i] == NULL)
  954. continue;
  955. value_int = nla_get_u32(data[i]);
  956. netdev->dcbnl_ops->setbcncfg(netdev,
  957. i, value_int);
  958. }
  959. ret = dcbnl_reply(0, RTM_SETDCB, DCB_CMD_BCN_SCFG, DCB_ATTR_BCN,
  960. pid, seq, flags);
  961. err:
  962. return ret;
  963. }
  964. static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
  965. int app_nested_type, int app_info_type,
  966. int app_entry_type)
  967. {
  968. struct dcb_peer_app_info info;
  969. struct dcb_app *table = NULL;
  970. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  971. u16 app_count;
  972. int err;
  973. /**
  974. * retrieve the peer app configuration form the driver. If the driver
  975. * handlers fail exit without doing anything
  976. */
  977. err = ops->peer_getappinfo(netdev, &info, &app_count);
  978. if (!err && app_count) {
  979. table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
  980. if (!table)
  981. return -ENOMEM;
  982. err = ops->peer_getapptable(netdev, table);
  983. }
  984. if (!err) {
  985. u16 i;
  986. struct nlattr *app;
  987. /**
  988. * build the message, from here on the only possible failure
  989. * is due to the skb size
  990. */
  991. err = -EMSGSIZE;
  992. app = nla_nest_start(skb, app_nested_type);
  993. if (!app)
  994. goto nla_put_failure;
  995. if (app_info_type &&
  996. nla_put(skb, app_info_type, sizeof(info), &info))
  997. goto nla_put_failure;
  998. for (i = 0; i < app_count; i++) {
  999. if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
  1000. &table[i]))
  1001. goto nla_put_failure;
  1002. }
  1003. nla_nest_end(skb, app);
  1004. }
  1005. err = 0;
  1006. nla_put_failure:
  1007. kfree(table);
  1008. return err;
  1009. }
  1010. /* Handle IEEE 802.1Qaz GET commands. */
  1011. static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
  1012. {
  1013. struct nlattr *ieee, *app;
  1014. struct dcb_app_type *itr;
  1015. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1016. int dcbx;
  1017. int err = -EMSGSIZE;
  1018. if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
  1019. goto nla_put_failure;
  1020. ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
  1021. if (!ieee)
  1022. goto nla_put_failure;
  1023. if (ops->ieee_getets) {
  1024. struct ieee_ets ets;
  1025. err = ops->ieee_getets(netdev, &ets);
  1026. if (!err &&
  1027. nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
  1028. goto nla_put_failure;
  1029. }
  1030. if (ops->ieee_getmaxrate) {
  1031. struct ieee_maxrate maxrate;
  1032. err = ops->ieee_getmaxrate(netdev, &maxrate);
  1033. if (!err) {
  1034. err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
  1035. sizeof(maxrate), &maxrate);
  1036. if (err)
  1037. goto nla_put_failure;
  1038. }
  1039. }
  1040. if (ops->ieee_getpfc) {
  1041. struct ieee_pfc pfc;
  1042. err = ops->ieee_getpfc(netdev, &pfc);
  1043. if (!err &&
  1044. nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
  1045. goto nla_put_failure;
  1046. }
  1047. app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
  1048. if (!app)
  1049. goto nla_put_failure;
  1050. spin_lock(&dcb_lock);
  1051. list_for_each_entry(itr, &dcb_app_list, list) {
  1052. if (itr->ifindex == netdev->ifindex) {
  1053. err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
  1054. &itr->app);
  1055. if (err) {
  1056. spin_unlock(&dcb_lock);
  1057. goto nla_put_failure;
  1058. }
  1059. }
  1060. }
  1061. if (netdev->dcbnl_ops->getdcbx)
  1062. dcbx = netdev->dcbnl_ops->getdcbx(netdev);
  1063. else
  1064. dcbx = -EOPNOTSUPP;
  1065. spin_unlock(&dcb_lock);
  1066. nla_nest_end(skb, app);
  1067. /* get peer info if available */
  1068. if (ops->ieee_peer_getets) {
  1069. struct ieee_ets ets;
  1070. err = ops->ieee_peer_getets(netdev, &ets);
  1071. if (!err &&
  1072. nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
  1073. goto nla_put_failure;
  1074. }
  1075. if (ops->ieee_peer_getpfc) {
  1076. struct ieee_pfc pfc;
  1077. err = ops->ieee_peer_getpfc(netdev, &pfc);
  1078. if (!err &&
  1079. nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
  1080. goto nla_put_failure;
  1081. }
  1082. if (ops->peer_getappinfo && ops->peer_getapptable) {
  1083. err = dcbnl_build_peer_app(netdev, skb,
  1084. DCB_ATTR_IEEE_PEER_APP,
  1085. DCB_ATTR_IEEE_APP_UNSPEC,
  1086. DCB_ATTR_IEEE_APP);
  1087. if (err)
  1088. goto nla_put_failure;
  1089. }
  1090. nla_nest_end(skb, ieee);
  1091. if (dcbx >= 0) {
  1092. err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
  1093. if (err)
  1094. goto nla_put_failure;
  1095. }
  1096. return 0;
  1097. nla_put_failure:
  1098. return err;
  1099. }
  1100. static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
  1101. int dir)
  1102. {
  1103. u8 pgid, up_map, prio, tc_pct;
  1104. const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
  1105. int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
  1106. struct nlattr *pg = nla_nest_start(skb, i);
  1107. if (!pg)
  1108. goto nla_put_failure;
  1109. for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
  1110. struct nlattr *tc_nest = nla_nest_start(skb, i);
  1111. if (!tc_nest)
  1112. goto nla_put_failure;
  1113. pgid = DCB_ATTR_VALUE_UNDEFINED;
  1114. prio = DCB_ATTR_VALUE_UNDEFINED;
  1115. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  1116. up_map = DCB_ATTR_VALUE_UNDEFINED;
  1117. if (!dir)
  1118. ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
  1119. &prio, &pgid, &tc_pct, &up_map);
  1120. else
  1121. ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
  1122. &prio, &pgid, &tc_pct, &up_map);
  1123. if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
  1124. nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
  1125. nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
  1126. nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
  1127. goto nla_put_failure;
  1128. nla_nest_end(skb, tc_nest);
  1129. }
  1130. for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
  1131. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  1132. if (!dir)
  1133. ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
  1134. &tc_pct);
  1135. else
  1136. ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
  1137. &tc_pct);
  1138. if (nla_put_u8(skb, i, tc_pct))
  1139. goto nla_put_failure;
  1140. }
  1141. nla_nest_end(skb, pg);
  1142. return 0;
  1143. nla_put_failure:
  1144. return -EMSGSIZE;
  1145. }
  1146. static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
  1147. {
  1148. struct nlattr *cee, *app;
  1149. struct dcb_app_type *itr;
  1150. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1151. int dcbx, i, err = -EMSGSIZE;
  1152. u8 value;
  1153. if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
  1154. goto nla_put_failure;
  1155. cee = nla_nest_start(skb, DCB_ATTR_CEE);
  1156. if (!cee)
  1157. goto nla_put_failure;
  1158. /* local pg */
  1159. if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
  1160. err = dcbnl_cee_pg_fill(skb, netdev, 1);
  1161. if (err)
  1162. goto nla_put_failure;
  1163. }
  1164. if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
  1165. err = dcbnl_cee_pg_fill(skb, netdev, 0);
  1166. if (err)
  1167. goto nla_put_failure;
  1168. }
  1169. /* local pfc */
  1170. if (ops->getpfccfg) {
  1171. struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
  1172. if (!pfc_nest)
  1173. goto nla_put_failure;
  1174. for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
  1175. ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
  1176. if (nla_put_u8(skb, i, value))
  1177. goto nla_put_failure;
  1178. }
  1179. nla_nest_end(skb, pfc_nest);
  1180. }
  1181. /* local app */
  1182. spin_lock(&dcb_lock);
  1183. app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
  1184. if (!app)
  1185. goto dcb_unlock;
  1186. list_for_each_entry(itr, &dcb_app_list, list) {
  1187. if (itr->ifindex == netdev->ifindex) {
  1188. struct nlattr *app_nest = nla_nest_start(skb,
  1189. DCB_ATTR_APP);
  1190. if (!app_nest)
  1191. goto dcb_unlock;
  1192. err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
  1193. itr->app.selector);
  1194. if (err)
  1195. goto dcb_unlock;
  1196. err = nla_put_u16(skb, DCB_APP_ATTR_ID,
  1197. itr->app.protocol);
  1198. if (err)
  1199. goto dcb_unlock;
  1200. err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
  1201. itr->app.priority);
  1202. if (err)
  1203. goto dcb_unlock;
  1204. nla_nest_end(skb, app_nest);
  1205. }
  1206. }
  1207. nla_nest_end(skb, app);
  1208. if (netdev->dcbnl_ops->getdcbx)
  1209. dcbx = netdev->dcbnl_ops->getdcbx(netdev);
  1210. else
  1211. dcbx = -EOPNOTSUPP;
  1212. spin_unlock(&dcb_lock);
  1213. /* features flags */
  1214. if (ops->getfeatcfg) {
  1215. struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
  1216. if (!feat)
  1217. goto nla_put_failure;
  1218. for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
  1219. i++)
  1220. if (!ops->getfeatcfg(netdev, i, &value) &&
  1221. nla_put_u8(skb, i, value))
  1222. goto nla_put_failure;
  1223. nla_nest_end(skb, feat);
  1224. }
  1225. /* peer info if available */
  1226. if (ops->cee_peer_getpg) {
  1227. struct cee_pg pg;
  1228. err = ops->cee_peer_getpg(netdev, &pg);
  1229. if (!err &&
  1230. nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
  1231. goto nla_put_failure;
  1232. }
  1233. if (ops->cee_peer_getpfc) {
  1234. struct cee_pfc pfc;
  1235. err = ops->cee_peer_getpfc(netdev, &pfc);
  1236. if (!err &&
  1237. nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
  1238. goto nla_put_failure;
  1239. }
  1240. if (ops->peer_getappinfo && ops->peer_getapptable) {
  1241. err = dcbnl_build_peer_app(netdev, skb,
  1242. DCB_ATTR_CEE_PEER_APP_TABLE,
  1243. DCB_ATTR_CEE_PEER_APP_INFO,
  1244. DCB_ATTR_CEE_PEER_APP);
  1245. if (err)
  1246. goto nla_put_failure;
  1247. }
  1248. nla_nest_end(skb, cee);
  1249. /* DCBX state */
  1250. if (dcbx >= 0) {
  1251. err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
  1252. if (err)
  1253. goto nla_put_failure;
  1254. }
  1255. return 0;
  1256. dcb_unlock:
  1257. spin_unlock(&dcb_lock);
  1258. nla_put_failure:
  1259. return err;
  1260. }
  1261. static int dcbnl_notify(struct net_device *dev, int event, int cmd,
  1262. u32 seq, u32 pid, int dcbx_ver)
  1263. {
  1264. struct net *net = dev_net(dev);
  1265. struct sk_buff *skb;
  1266. struct nlmsghdr *nlh;
  1267. struct dcbmsg *dcb;
  1268. const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
  1269. int err;
  1270. if (!ops)
  1271. return -EOPNOTSUPP;
  1272. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1273. if (!skb)
  1274. return -ENOBUFS;
  1275. nlh = nlmsg_put(skb, pid, 0, event, sizeof(*dcb), 0);
  1276. if (nlh == NULL) {
  1277. nlmsg_free(skb);
  1278. return -EMSGSIZE;
  1279. }
  1280. dcb = NLMSG_DATA(nlh);
  1281. dcb->dcb_family = AF_UNSPEC;
  1282. dcb->cmd = cmd;
  1283. if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
  1284. err = dcbnl_ieee_fill(skb, dev);
  1285. else
  1286. err = dcbnl_cee_fill(skb, dev);
  1287. if (err < 0) {
  1288. /* Report error to broadcast listeners */
  1289. nlmsg_cancel(skb, nlh);
  1290. kfree_skb(skb);
  1291. rtnl_set_sk_err(net, RTNLGRP_DCB, err);
  1292. } else {
  1293. /* End nlmsg and notify broadcast listeners */
  1294. nlmsg_end(skb, nlh);
  1295. rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
  1296. }
  1297. return err;
  1298. }
  1299. int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
  1300. u32 seq, u32 pid)
  1301. {
  1302. return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_IEEE);
  1303. }
  1304. EXPORT_SYMBOL(dcbnl_ieee_notify);
  1305. int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
  1306. u32 seq, u32 pid)
  1307. {
  1308. return dcbnl_notify(dev, event, cmd, seq, pid, DCB_CAP_DCBX_VER_CEE);
  1309. }
  1310. EXPORT_SYMBOL(dcbnl_cee_notify);
  1311. /* Handle IEEE 802.1Qaz SET commands. If any requested operation can not
  1312. * be completed the entire msg is aborted and error value is returned.
  1313. * No attempt is made to reconcile the case where only part of the
  1314. * cmd can be completed.
  1315. */
  1316. static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb,
  1317. u32 pid, u32 seq, u16 flags)
  1318. {
  1319. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1320. struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
  1321. int err = -EOPNOTSUPP;
  1322. if (!ops)
  1323. return err;
  1324. if (!tb[DCB_ATTR_IEEE])
  1325. return -EINVAL;
  1326. err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
  1327. tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
  1328. if (err)
  1329. return err;
  1330. if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
  1331. struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
  1332. err = ops->ieee_setets(netdev, ets);
  1333. if (err)
  1334. goto err;
  1335. }
  1336. if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
  1337. struct ieee_maxrate *maxrate =
  1338. nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
  1339. err = ops->ieee_setmaxrate(netdev, maxrate);
  1340. if (err)
  1341. goto err;
  1342. }
  1343. if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
  1344. struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
  1345. err = ops->ieee_setpfc(netdev, pfc);
  1346. if (err)
  1347. goto err;
  1348. }
  1349. if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
  1350. struct nlattr *attr;
  1351. int rem;
  1352. nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
  1353. struct dcb_app *app_data;
  1354. if (nla_type(attr) != DCB_ATTR_IEEE_APP)
  1355. continue;
  1356. app_data = nla_data(attr);
  1357. if (ops->ieee_setapp)
  1358. err = ops->ieee_setapp(netdev, app_data);
  1359. else
  1360. err = dcb_ieee_setapp(netdev, app_data);
  1361. if (err)
  1362. goto err;
  1363. }
  1364. }
  1365. err:
  1366. dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_SET, DCB_ATTR_IEEE,
  1367. pid, seq, flags);
  1368. dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
  1369. return err;
  1370. }
  1371. static int dcbnl_ieee_get(struct net_device *netdev, struct nlattr **tb,
  1372. u32 pid, u32 seq, u16 flags)
  1373. {
  1374. struct net *net = dev_net(netdev);
  1375. struct sk_buff *skb;
  1376. struct nlmsghdr *nlh;
  1377. struct dcbmsg *dcb;
  1378. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1379. int err;
  1380. if (!ops)
  1381. return -EOPNOTSUPP;
  1382. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1383. if (!skb)
  1384. return -ENOBUFS;
  1385. nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  1386. if (nlh == NULL) {
  1387. nlmsg_free(skb);
  1388. return -EMSGSIZE;
  1389. }
  1390. dcb = NLMSG_DATA(nlh);
  1391. dcb->dcb_family = AF_UNSPEC;
  1392. dcb->cmd = DCB_CMD_IEEE_GET;
  1393. err = dcbnl_ieee_fill(skb, netdev);
  1394. if (err < 0) {
  1395. nlmsg_cancel(skb, nlh);
  1396. kfree_skb(skb);
  1397. } else {
  1398. nlmsg_end(skb, nlh);
  1399. err = rtnl_unicast(skb, net, pid);
  1400. }
  1401. return err;
  1402. }
  1403. static int dcbnl_ieee_del(struct net_device *netdev, struct nlattr **tb,
  1404. u32 pid, u32 seq, u16 flags)
  1405. {
  1406. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1407. struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
  1408. int err = -EOPNOTSUPP;
  1409. if (!ops)
  1410. return -EOPNOTSUPP;
  1411. if (!tb[DCB_ATTR_IEEE])
  1412. return -EINVAL;
  1413. err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX,
  1414. tb[DCB_ATTR_IEEE], dcbnl_ieee_policy);
  1415. if (err)
  1416. return err;
  1417. if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
  1418. struct nlattr *attr;
  1419. int rem;
  1420. nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
  1421. struct dcb_app *app_data;
  1422. if (nla_type(attr) != DCB_ATTR_IEEE_APP)
  1423. continue;
  1424. app_data = nla_data(attr);
  1425. if (ops->ieee_delapp)
  1426. err = ops->ieee_delapp(netdev, app_data);
  1427. else
  1428. err = dcb_ieee_delapp(netdev, app_data);
  1429. if (err)
  1430. goto err;
  1431. }
  1432. }
  1433. err:
  1434. dcbnl_reply(err, RTM_SETDCB, DCB_CMD_IEEE_DEL, DCB_ATTR_IEEE,
  1435. pid, seq, flags);
  1436. dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
  1437. return err;
  1438. }
  1439. /* DCBX configuration */
  1440. static int dcbnl_getdcbx(struct net_device *netdev, struct nlattr **tb,
  1441. u32 pid, u32 seq, u16 flags)
  1442. {
  1443. int ret;
  1444. if (!netdev->dcbnl_ops->getdcbx)
  1445. return -EOPNOTSUPP;
  1446. ret = dcbnl_reply(netdev->dcbnl_ops->getdcbx(netdev), RTM_GETDCB,
  1447. DCB_CMD_GDCBX, DCB_ATTR_DCBX, pid, seq, flags);
  1448. return ret;
  1449. }
  1450. static int dcbnl_setdcbx(struct net_device *netdev, struct nlattr **tb,
  1451. u32 pid, u32 seq, u16 flags)
  1452. {
  1453. int ret;
  1454. u8 value;
  1455. if (!netdev->dcbnl_ops->setdcbx)
  1456. return -EOPNOTSUPP;
  1457. if (!tb[DCB_ATTR_DCBX])
  1458. return -EINVAL;
  1459. value = nla_get_u8(tb[DCB_ATTR_DCBX]);
  1460. ret = dcbnl_reply(netdev->dcbnl_ops->setdcbx(netdev, value),
  1461. RTM_SETDCB, DCB_CMD_SDCBX, DCB_ATTR_DCBX,
  1462. pid, seq, flags);
  1463. return ret;
  1464. }
  1465. static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlattr **tb,
  1466. u32 pid, u32 seq, u16 flags)
  1467. {
  1468. struct sk_buff *dcbnl_skb;
  1469. struct nlmsghdr *nlh;
  1470. struct dcbmsg *dcb;
  1471. struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
  1472. u8 value;
  1473. int ret, i;
  1474. int getall = 0;
  1475. if (!netdev->dcbnl_ops->getfeatcfg)
  1476. return -EOPNOTSUPP;
  1477. if (!tb[DCB_ATTR_FEATCFG])
  1478. return -EINVAL;
  1479. ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
  1480. dcbnl_featcfg_nest);
  1481. if (ret)
  1482. goto err_out;
  1483. dcbnl_skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1484. if (!dcbnl_skb) {
  1485. ret = -ENOBUFS;
  1486. goto err_out;
  1487. }
  1488. nlh = NLMSG_NEW(dcbnl_skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  1489. dcb = NLMSG_DATA(nlh);
  1490. dcb->dcb_family = AF_UNSPEC;
  1491. dcb->cmd = DCB_CMD_GFEATCFG;
  1492. nest = nla_nest_start(dcbnl_skb, DCB_ATTR_FEATCFG);
  1493. if (!nest) {
  1494. ret = -EMSGSIZE;
  1495. goto nla_put_failure;
  1496. }
  1497. if (data[DCB_FEATCFG_ATTR_ALL])
  1498. getall = 1;
  1499. for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
  1500. if (!getall && !data[i])
  1501. continue;
  1502. ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
  1503. if (!ret)
  1504. ret = nla_put_u8(dcbnl_skb, i, value);
  1505. if (ret) {
  1506. nla_nest_cancel(dcbnl_skb, nest);
  1507. goto nla_put_failure;
  1508. }
  1509. }
  1510. nla_nest_end(dcbnl_skb, nest);
  1511. nlmsg_end(dcbnl_skb, nlh);
  1512. return rtnl_unicast(dcbnl_skb, &init_net, pid);
  1513. nla_put_failure:
  1514. nlmsg_cancel(dcbnl_skb, nlh);
  1515. nlmsg_failure:
  1516. kfree_skb(dcbnl_skb);
  1517. err_out:
  1518. return ret;
  1519. }
  1520. static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlattr **tb,
  1521. u32 pid, u32 seq, u16 flags)
  1522. {
  1523. struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
  1524. int ret, i;
  1525. u8 value;
  1526. if (!netdev->dcbnl_ops->setfeatcfg)
  1527. return -ENOTSUPP;
  1528. if (!tb[DCB_ATTR_FEATCFG])
  1529. return -EINVAL;
  1530. ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX, tb[DCB_ATTR_FEATCFG],
  1531. dcbnl_featcfg_nest);
  1532. if (ret)
  1533. goto err;
  1534. for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
  1535. if (data[i] == NULL)
  1536. continue;
  1537. value = nla_get_u8(data[i]);
  1538. ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
  1539. if (ret)
  1540. goto err;
  1541. }
  1542. err:
  1543. dcbnl_reply(ret, RTM_SETDCB, DCB_CMD_SFEATCFG, DCB_ATTR_FEATCFG,
  1544. pid, seq, flags);
  1545. return ret;
  1546. }
  1547. /* Handle CEE DCBX GET commands. */
  1548. static int dcbnl_cee_get(struct net_device *netdev, struct nlattr **tb,
  1549. u32 pid, u32 seq, u16 flags)
  1550. {
  1551. struct net *net = dev_net(netdev);
  1552. struct sk_buff *skb;
  1553. struct nlmsghdr *nlh;
  1554. struct dcbmsg *dcb;
  1555. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1556. int err;
  1557. if (!ops)
  1558. return -EOPNOTSUPP;
  1559. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1560. if (!skb)
  1561. return -ENOBUFS;
  1562. nlh = nlmsg_put(skb, pid, seq, RTM_GETDCB, sizeof(*dcb), flags);
  1563. if (nlh == NULL) {
  1564. nlmsg_free(skb);
  1565. return -EMSGSIZE;
  1566. }
  1567. dcb = NLMSG_DATA(nlh);
  1568. dcb->dcb_family = AF_UNSPEC;
  1569. dcb->cmd = DCB_CMD_CEE_GET;
  1570. err = dcbnl_cee_fill(skb, netdev);
  1571. if (err < 0) {
  1572. nlmsg_cancel(skb, nlh);
  1573. nlmsg_free(skb);
  1574. } else {
  1575. nlmsg_end(skb, nlh);
  1576. err = rtnl_unicast(skb, net, pid);
  1577. }
  1578. return err;
  1579. }
  1580. static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
  1581. {
  1582. struct net *net = sock_net(skb->sk);
  1583. struct net_device *netdev;
  1584. struct dcbmsg *dcb = (struct dcbmsg *)NLMSG_DATA(nlh);
  1585. struct nlattr *tb[DCB_ATTR_MAX + 1];
  1586. u32 pid = skb ? NETLINK_CB(skb).pid : 0;
  1587. int ret = -EINVAL;
  1588. if (!net_eq(net, &init_net))
  1589. return -EINVAL;
  1590. ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
  1591. dcbnl_rtnl_policy);
  1592. if (ret < 0)
  1593. return ret;
  1594. if (!tb[DCB_ATTR_IFNAME])
  1595. return -EINVAL;
  1596. netdev = dev_get_by_name(&init_net, nla_data(tb[DCB_ATTR_IFNAME]));
  1597. if (!netdev)
  1598. return -EINVAL;
  1599. if (!netdev->dcbnl_ops)
  1600. goto errout;
  1601. switch (dcb->cmd) {
  1602. case DCB_CMD_GSTATE:
  1603. ret = dcbnl_getstate(netdev, tb, pid, nlh->nlmsg_seq,
  1604. nlh->nlmsg_flags);
  1605. goto out;
  1606. case DCB_CMD_PFC_GCFG:
  1607. ret = dcbnl_getpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
  1608. nlh->nlmsg_flags);
  1609. goto out;
  1610. case DCB_CMD_GPERM_HWADDR:
  1611. ret = dcbnl_getperm_hwaddr(netdev, tb, pid, nlh->nlmsg_seq,
  1612. nlh->nlmsg_flags);
  1613. goto out;
  1614. case DCB_CMD_PGTX_GCFG:
  1615. ret = dcbnl_pgtx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
  1616. nlh->nlmsg_flags);
  1617. goto out;
  1618. case DCB_CMD_PGRX_GCFG:
  1619. ret = dcbnl_pgrx_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
  1620. nlh->nlmsg_flags);
  1621. goto out;
  1622. case DCB_CMD_BCN_GCFG:
  1623. ret = dcbnl_bcn_getcfg(netdev, tb, pid, nlh->nlmsg_seq,
  1624. nlh->nlmsg_flags);
  1625. goto out;
  1626. case DCB_CMD_SSTATE:
  1627. ret = dcbnl_setstate(netdev, tb, pid, nlh->nlmsg_seq,
  1628. nlh->nlmsg_flags);
  1629. goto out;
  1630. case DCB_CMD_PFC_SCFG:
  1631. ret = dcbnl_setpfccfg(netdev, tb, pid, nlh->nlmsg_seq,
  1632. nlh->nlmsg_flags);
  1633. goto out;
  1634. case DCB_CMD_SET_ALL:
  1635. ret = dcbnl_setall(netdev, tb, pid, nlh->nlmsg_seq,
  1636. nlh->nlmsg_flags);
  1637. goto out;
  1638. case DCB_CMD_PGTX_SCFG:
  1639. ret = dcbnl_pgtx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
  1640. nlh->nlmsg_flags);
  1641. goto out;
  1642. case DCB_CMD_PGRX_SCFG:
  1643. ret = dcbnl_pgrx_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
  1644. nlh->nlmsg_flags);
  1645. goto out;
  1646. case DCB_CMD_GCAP:
  1647. ret = dcbnl_getcap(netdev, tb, pid, nlh->nlmsg_seq,
  1648. nlh->nlmsg_flags);
  1649. goto out;
  1650. case DCB_CMD_GNUMTCS:
  1651. ret = dcbnl_getnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
  1652. nlh->nlmsg_flags);
  1653. goto out;
  1654. case DCB_CMD_SNUMTCS:
  1655. ret = dcbnl_setnumtcs(netdev, tb, pid, nlh->nlmsg_seq,
  1656. nlh->nlmsg_flags);
  1657. goto out;
  1658. case DCB_CMD_PFC_GSTATE:
  1659. ret = dcbnl_getpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
  1660. nlh->nlmsg_flags);
  1661. goto out;
  1662. case DCB_CMD_PFC_SSTATE:
  1663. ret = dcbnl_setpfcstate(netdev, tb, pid, nlh->nlmsg_seq,
  1664. nlh->nlmsg_flags);
  1665. goto out;
  1666. case DCB_CMD_BCN_SCFG:
  1667. ret = dcbnl_bcn_setcfg(netdev, tb, pid, nlh->nlmsg_seq,
  1668. nlh->nlmsg_flags);
  1669. goto out;
  1670. case DCB_CMD_GAPP:
  1671. ret = dcbnl_getapp(netdev, tb, pid, nlh->nlmsg_seq,
  1672. nlh->nlmsg_flags);
  1673. goto out;
  1674. case DCB_CMD_SAPP:
  1675. ret = dcbnl_setapp(netdev, tb, pid, nlh->nlmsg_seq,
  1676. nlh->nlmsg_flags);
  1677. goto out;
  1678. case DCB_CMD_IEEE_SET:
  1679. ret = dcbnl_ieee_set(netdev, tb, pid, nlh->nlmsg_seq,
  1680. nlh->nlmsg_flags);
  1681. goto out;
  1682. case DCB_CMD_IEEE_GET:
  1683. ret = dcbnl_ieee_get(netdev, tb, pid, nlh->nlmsg_seq,
  1684. nlh->nlmsg_flags);
  1685. goto out;
  1686. case DCB_CMD_IEEE_DEL:
  1687. ret = dcbnl_ieee_del(netdev, tb, pid, nlh->nlmsg_seq,
  1688. nlh->nlmsg_flags);
  1689. goto out;
  1690. case DCB_CMD_GDCBX:
  1691. ret = dcbnl_getdcbx(netdev, tb, pid, nlh->nlmsg_seq,
  1692. nlh->nlmsg_flags);
  1693. goto out;
  1694. case DCB_CMD_SDCBX:
  1695. ret = dcbnl_setdcbx(netdev, tb, pid, nlh->nlmsg_seq,
  1696. nlh->nlmsg_flags);
  1697. goto out;
  1698. case DCB_CMD_GFEATCFG:
  1699. ret = dcbnl_getfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
  1700. nlh->nlmsg_flags);
  1701. goto out;
  1702. case DCB_CMD_SFEATCFG:
  1703. ret = dcbnl_setfeatcfg(netdev, tb, pid, nlh->nlmsg_seq,
  1704. nlh->nlmsg_flags);
  1705. goto out;
  1706. case DCB_CMD_CEE_GET:
  1707. ret = dcbnl_cee_get(netdev, tb, pid, nlh->nlmsg_seq,
  1708. nlh->nlmsg_flags);
  1709. goto out;
  1710. default:
  1711. goto errout;
  1712. }
  1713. errout:
  1714. ret = -EINVAL;
  1715. out:
  1716. dev_put(netdev);
  1717. return ret;
  1718. }
  1719. /**
  1720. * dcb_getapp - retrieve the DCBX application user priority
  1721. *
  1722. * On success returns a non-zero 802.1p user priority bitmap
  1723. * otherwise returns 0 as the invalid user priority bitmap to
  1724. * indicate an error.
  1725. */
  1726. u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
  1727. {
  1728. struct dcb_app_type *itr;
  1729. u8 prio = 0;
  1730. spin_lock(&dcb_lock);
  1731. list_for_each_entry(itr, &dcb_app_list, list) {
  1732. if (itr->app.selector == app->selector &&
  1733. itr->app.protocol == app->protocol &&
  1734. itr->ifindex == dev->ifindex) {
  1735. prio = itr->app.priority;
  1736. break;
  1737. }
  1738. }
  1739. spin_unlock(&dcb_lock);
  1740. return prio;
  1741. }
  1742. EXPORT_SYMBOL(dcb_getapp);
  1743. /**
  1744. * dcb_setapp - add CEE dcb application data to app list
  1745. *
  1746. * Priority 0 is an invalid priority in CEE spec. This routine
  1747. * removes applications from the app list if the priority is
  1748. * set to zero.
  1749. */
  1750. int dcb_setapp(struct net_device *dev, struct dcb_app *new)
  1751. {
  1752. struct dcb_app_type *itr;
  1753. struct dcb_app_type event;
  1754. event.ifindex = dev->ifindex;
  1755. memcpy(&event.app, new, sizeof(event.app));
  1756. if (dev->dcbnl_ops->getdcbx)
  1757. event.dcbx = dev->dcbnl_ops->getdcbx(dev);
  1758. spin_lock(&dcb_lock);
  1759. /* Search for existing match and replace */
  1760. list_for_each_entry(itr, &dcb_app_list, list) {
  1761. if (itr->app.selector == new->selector &&
  1762. itr->app.protocol == new->protocol &&
  1763. itr->ifindex == dev->ifindex) {
  1764. if (new->priority)
  1765. itr->app.priority = new->priority;
  1766. else {
  1767. list_del(&itr->list);
  1768. kfree(itr);
  1769. }
  1770. goto out;
  1771. }
  1772. }
  1773. /* App type does not exist add new application type */
  1774. if (new->priority) {
  1775. struct dcb_app_type *entry;
  1776. entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
  1777. if (!entry) {
  1778. spin_unlock(&dcb_lock);
  1779. return -ENOMEM;
  1780. }
  1781. memcpy(&entry->app, new, sizeof(*new));
  1782. entry->ifindex = dev->ifindex;
  1783. list_add(&entry->list, &dcb_app_list);
  1784. }
  1785. out:
  1786. spin_unlock(&dcb_lock);
  1787. call_dcbevent_notifiers(DCB_APP_EVENT, &event);
  1788. return 0;
  1789. }
  1790. EXPORT_SYMBOL(dcb_setapp);
  1791. /**
  1792. * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
  1793. *
  1794. * Helper routine which on success returns a non-zero 802.1Qaz user
  1795. * priority bitmap otherwise returns 0 to indicate the dcb_app was
  1796. * not found in APP list.
  1797. */
  1798. u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
  1799. {
  1800. struct dcb_app_type *itr;
  1801. u8 prio = 0;
  1802. spin_lock(&dcb_lock);
  1803. list_for_each_entry(itr, &dcb_app_list, list) {
  1804. if (itr->app.selector == app->selector &&
  1805. itr->app.protocol == app->protocol &&
  1806. itr->ifindex == dev->ifindex) {
  1807. prio |= 1 << itr->app.priority;
  1808. }
  1809. }
  1810. spin_unlock(&dcb_lock);
  1811. return prio;
  1812. }
  1813. EXPORT_SYMBOL(dcb_ieee_getapp_mask);
  1814. /**
  1815. * dcb_ieee_setapp - add IEEE dcb application data to app list
  1816. *
  1817. * This adds Application data to the list. Multiple application
  1818. * entries may exists for the same selector and protocol as long
  1819. * as the priorities are different.
  1820. */
  1821. int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
  1822. {
  1823. struct dcb_app_type *itr, *entry;
  1824. struct dcb_app_type event;
  1825. int err = 0;
  1826. event.ifindex = dev->ifindex;
  1827. memcpy(&event.app, new, sizeof(event.app));
  1828. if (dev->dcbnl_ops->getdcbx)
  1829. event.dcbx = dev->dcbnl_ops->getdcbx(dev);
  1830. spin_lock(&dcb_lock);
  1831. /* Search for existing match and abort if found */
  1832. list_for_each_entry(itr, &dcb_app_list, list) {
  1833. if (itr->app.selector == new->selector &&
  1834. itr->app.protocol == new->protocol &&
  1835. itr->app.priority == new->priority &&
  1836. itr->ifindex == dev->ifindex) {
  1837. err = -EEXIST;
  1838. goto out;
  1839. }
  1840. }
  1841. /* App entry does not exist add new entry */
  1842. entry = kmalloc(sizeof(struct dcb_app_type), GFP_ATOMIC);
  1843. if (!entry) {
  1844. err = -ENOMEM;
  1845. goto out;
  1846. }
  1847. memcpy(&entry->app, new, sizeof(*new));
  1848. entry->ifindex = dev->ifindex;
  1849. list_add(&entry->list, &dcb_app_list);
  1850. out:
  1851. spin_unlock(&dcb_lock);
  1852. if (!err)
  1853. call_dcbevent_notifiers(DCB_APP_EVENT, &event);
  1854. return err;
  1855. }
  1856. EXPORT_SYMBOL(dcb_ieee_setapp);
  1857. /**
  1858. * dcb_ieee_delapp - delete IEEE dcb application data from list
  1859. *
  1860. * This removes a matching APP data from the APP list
  1861. */
  1862. int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
  1863. {
  1864. struct dcb_app_type *itr;
  1865. struct dcb_app_type event;
  1866. int err = -ENOENT;
  1867. event.ifindex = dev->ifindex;
  1868. memcpy(&event.app, del, sizeof(event.app));
  1869. if (dev->dcbnl_ops->getdcbx)
  1870. event.dcbx = dev->dcbnl_ops->getdcbx(dev);
  1871. spin_lock(&dcb_lock);
  1872. /* Search for existing match and remove it. */
  1873. list_for_each_entry(itr, &dcb_app_list, list) {
  1874. if (itr->app.selector == del->selector &&
  1875. itr->app.protocol == del->protocol &&
  1876. itr->app.priority == del->priority &&
  1877. itr->ifindex == dev->ifindex) {
  1878. list_del(&itr->list);
  1879. kfree(itr);
  1880. err = 0;
  1881. goto out;
  1882. }
  1883. }
  1884. out:
  1885. spin_unlock(&dcb_lock);
  1886. if (!err)
  1887. call_dcbevent_notifiers(DCB_APP_EVENT, &event);
  1888. return err;
  1889. }
  1890. EXPORT_SYMBOL(dcb_ieee_delapp);
  1891. static void dcb_flushapp(void)
  1892. {
  1893. struct dcb_app_type *app;
  1894. struct dcb_app_type *tmp;
  1895. spin_lock(&dcb_lock);
  1896. list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
  1897. list_del(&app->list);
  1898. kfree(app);
  1899. }
  1900. spin_unlock(&dcb_lock);
  1901. }
  1902. static int __init dcbnl_init(void)
  1903. {
  1904. INIT_LIST_HEAD(&dcb_app_list);
  1905. rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, NULL);
  1906. rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, NULL);
  1907. return 0;
  1908. }
  1909. module_init(dcbnl_init);
  1910. static void __exit dcbnl_exit(void)
  1911. {
  1912. rtnl_unregister(PF_UNSPEC, RTM_GETDCB);
  1913. rtnl_unregister(PF_UNSPEC, RTM_SETDCB);
  1914. dcb_flushapp();
  1915. }
  1916. module_exit(dcbnl_exit);