dcbnl.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952
  1. /*
  2. * Copyright (c) 2008-2011, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, see <http://www.gnu.org/licenses/>.
  15. *
  16. * Description: Data Center Bridging netlink interface
  17. * Author: Lucy Liu <lucy.liu@intel.com>
  18. */
  19. #include <linux/netdevice.h>
  20. #include <linux/netlink.h>
  21. #include <linux/slab.h>
  22. #include <net/netlink.h>
  23. #include <net/rtnetlink.h>
  24. #include <linux/dcbnl.h>
  25. #include <net/dcbevent.h>
  26. #include <linux/rtnetlink.h>
  27. #include <linux/init.h>
  28. #include <net/sock.h>
  29. /* Data Center Bridging (DCB) is a collection of Ethernet enhancements
  30. * intended to allow network traffic with differing requirements
  31. * (highly reliable, no drops vs. best effort vs. low latency) to operate
  32. * and co-exist on Ethernet. Current DCB features are:
  33. *
  34. * Enhanced Transmission Selection (aka Priority Grouping [PG]) - provides a
  35. * framework for assigning bandwidth guarantees to traffic classes.
  36. *
  37. * Priority-based Flow Control (PFC) - provides a flow control mechanism which
  38. * can work independently for each 802.1p priority.
  39. *
  40. * Congestion Notification - provides a mechanism for end-to-end congestion
  41. * control for protocols which do not have built-in congestion management.
  42. *
  43. * More information about the emerging standards for these Ethernet features
  44. * can be found at: http://www.ieee802.org/1/pages/dcbridges.html
  45. *
  46. * This file implements an rtnetlink interface to allow configuration of DCB
  47. * features for capable devices.
  48. */
  49. /**************** DCB attribute policies *************************************/
  50. /* DCB netlink attributes policy */
  51. static const struct nla_policy dcbnl_rtnl_policy[DCB_ATTR_MAX + 1] = {
  52. [DCB_ATTR_IFNAME] = {.type = NLA_NUL_STRING, .len = IFNAMSIZ - 1},
  53. [DCB_ATTR_STATE] = {.type = NLA_U8},
  54. [DCB_ATTR_PFC_CFG] = {.type = NLA_NESTED},
  55. [DCB_ATTR_PG_CFG] = {.type = NLA_NESTED},
  56. [DCB_ATTR_SET_ALL] = {.type = NLA_U8},
  57. [DCB_ATTR_PERM_HWADDR] = {.type = NLA_FLAG},
  58. [DCB_ATTR_CAP] = {.type = NLA_NESTED},
  59. [DCB_ATTR_PFC_STATE] = {.type = NLA_U8},
  60. [DCB_ATTR_BCN] = {.type = NLA_NESTED},
  61. [DCB_ATTR_APP] = {.type = NLA_NESTED},
  62. [DCB_ATTR_IEEE] = {.type = NLA_NESTED},
  63. [DCB_ATTR_DCBX] = {.type = NLA_U8},
  64. [DCB_ATTR_FEATCFG] = {.type = NLA_NESTED},
  65. };
  66. /* DCB priority flow control to User Priority nested attributes */
  67. static const struct nla_policy dcbnl_pfc_up_nest[DCB_PFC_UP_ATTR_MAX + 1] = {
  68. [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8},
  69. [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8},
  70. [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8},
  71. [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8},
  72. [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8},
  73. [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8},
  74. [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8},
  75. [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8},
  76. [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG},
  77. };
  78. /* DCB priority grouping nested attributes */
  79. static const struct nla_policy dcbnl_pg_nest[DCB_PG_ATTR_MAX + 1] = {
  80. [DCB_PG_ATTR_TC_0] = {.type = NLA_NESTED},
  81. [DCB_PG_ATTR_TC_1] = {.type = NLA_NESTED},
  82. [DCB_PG_ATTR_TC_2] = {.type = NLA_NESTED},
  83. [DCB_PG_ATTR_TC_3] = {.type = NLA_NESTED},
  84. [DCB_PG_ATTR_TC_4] = {.type = NLA_NESTED},
  85. [DCB_PG_ATTR_TC_5] = {.type = NLA_NESTED},
  86. [DCB_PG_ATTR_TC_6] = {.type = NLA_NESTED},
  87. [DCB_PG_ATTR_TC_7] = {.type = NLA_NESTED},
  88. [DCB_PG_ATTR_TC_ALL] = {.type = NLA_NESTED},
  89. [DCB_PG_ATTR_BW_ID_0] = {.type = NLA_U8},
  90. [DCB_PG_ATTR_BW_ID_1] = {.type = NLA_U8},
  91. [DCB_PG_ATTR_BW_ID_2] = {.type = NLA_U8},
  92. [DCB_PG_ATTR_BW_ID_3] = {.type = NLA_U8},
  93. [DCB_PG_ATTR_BW_ID_4] = {.type = NLA_U8},
  94. [DCB_PG_ATTR_BW_ID_5] = {.type = NLA_U8},
  95. [DCB_PG_ATTR_BW_ID_6] = {.type = NLA_U8},
  96. [DCB_PG_ATTR_BW_ID_7] = {.type = NLA_U8},
  97. [DCB_PG_ATTR_BW_ID_ALL] = {.type = NLA_FLAG},
  98. };
  99. /* DCB traffic class nested attributes. */
  100. static const struct nla_policy dcbnl_tc_param_nest[DCB_TC_ATTR_PARAM_MAX + 1] = {
  101. [DCB_TC_ATTR_PARAM_PGID] = {.type = NLA_U8},
  102. [DCB_TC_ATTR_PARAM_UP_MAPPING] = {.type = NLA_U8},
  103. [DCB_TC_ATTR_PARAM_STRICT_PRIO] = {.type = NLA_U8},
  104. [DCB_TC_ATTR_PARAM_BW_PCT] = {.type = NLA_U8},
  105. [DCB_TC_ATTR_PARAM_ALL] = {.type = NLA_FLAG},
  106. };
  107. /* DCB capabilities nested attributes. */
  108. static const struct nla_policy dcbnl_cap_nest[DCB_CAP_ATTR_MAX + 1] = {
  109. [DCB_CAP_ATTR_ALL] = {.type = NLA_FLAG},
  110. [DCB_CAP_ATTR_PG] = {.type = NLA_U8},
  111. [DCB_CAP_ATTR_PFC] = {.type = NLA_U8},
  112. [DCB_CAP_ATTR_UP2TC] = {.type = NLA_U8},
  113. [DCB_CAP_ATTR_PG_TCS] = {.type = NLA_U8},
  114. [DCB_CAP_ATTR_PFC_TCS] = {.type = NLA_U8},
  115. [DCB_CAP_ATTR_GSP] = {.type = NLA_U8},
  116. [DCB_CAP_ATTR_BCN] = {.type = NLA_U8},
  117. [DCB_CAP_ATTR_DCBX] = {.type = NLA_U8},
  118. };
  119. /* DCB capabilities nested attributes. */
  120. static const struct nla_policy dcbnl_numtcs_nest[DCB_NUMTCS_ATTR_MAX + 1] = {
  121. [DCB_NUMTCS_ATTR_ALL] = {.type = NLA_FLAG},
  122. [DCB_NUMTCS_ATTR_PG] = {.type = NLA_U8},
  123. [DCB_NUMTCS_ATTR_PFC] = {.type = NLA_U8},
  124. };
  125. /* DCB BCN nested attributes. */
  126. static const struct nla_policy dcbnl_bcn_nest[DCB_BCN_ATTR_MAX + 1] = {
  127. [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8},
  128. [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8},
  129. [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8},
  130. [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8},
  131. [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8},
  132. [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8},
  133. [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8},
  134. [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8},
  135. [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG},
  136. [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32},
  137. [DCB_BCN_ATTR_BCNA_1] = {.type = NLA_U32},
  138. [DCB_BCN_ATTR_ALPHA] = {.type = NLA_U32},
  139. [DCB_BCN_ATTR_BETA] = {.type = NLA_U32},
  140. [DCB_BCN_ATTR_GD] = {.type = NLA_U32},
  141. [DCB_BCN_ATTR_GI] = {.type = NLA_U32},
  142. [DCB_BCN_ATTR_TMAX] = {.type = NLA_U32},
  143. [DCB_BCN_ATTR_TD] = {.type = NLA_U32},
  144. [DCB_BCN_ATTR_RMIN] = {.type = NLA_U32},
  145. [DCB_BCN_ATTR_W] = {.type = NLA_U32},
  146. [DCB_BCN_ATTR_RD] = {.type = NLA_U32},
  147. [DCB_BCN_ATTR_RU] = {.type = NLA_U32},
  148. [DCB_BCN_ATTR_WRTT] = {.type = NLA_U32},
  149. [DCB_BCN_ATTR_RI] = {.type = NLA_U32},
  150. [DCB_BCN_ATTR_C] = {.type = NLA_U32},
  151. [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG},
  152. };
  153. /* DCB APP nested attributes. */
  154. static const struct nla_policy dcbnl_app_nest[DCB_APP_ATTR_MAX + 1] = {
  155. [DCB_APP_ATTR_IDTYPE] = {.type = NLA_U8},
  156. [DCB_APP_ATTR_ID] = {.type = NLA_U16},
  157. [DCB_APP_ATTR_PRIORITY] = {.type = NLA_U8},
  158. };
  159. /* IEEE 802.1Qaz nested attributes. */
  160. static const struct nla_policy dcbnl_ieee_policy[DCB_ATTR_IEEE_MAX + 1] = {
  161. [DCB_ATTR_IEEE_ETS] = {.len = sizeof(struct ieee_ets)},
  162. [DCB_ATTR_IEEE_PFC] = {.len = sizeof(struct ieee_pfc)},
  163. [DCB_ATTR_IEEE_APP_TABLE] = {.type = NLA_NESTED},
  164. [DCB_ATTR_IEEE_MAXRATE] = {.len = sizeof(struct ieee_maxrate)},
  165. [DCB_ATTR_IEEE_QCN] = {.len = sizeof(struct ieee_qcn)},
  166. [DCB_ATTR_IEEE_QCN_STATS] = {.len = sizeof(struct ieee_qcn_stats)},
  167. };
  168. /* DCB number of traffic classes nested attributes. */
  169. static const struct nla_policy dcbnl_featcfg_nest[DCB_FEATCFG_ATTR_MAX + 1] = {
  170. [DCB_FEATCFG_ATTR_ALL] = {.type = NLA_FLAG},
  171. [DCB_FEATCFG_ATTR_PG] = {.type = NLA_U8},
  172. [DCB_FEATCFG_ATTR_PFC] = {.type = NLA_U8},
  173. [DCB_FEATCFG_ATTR_APP] = {.type = NLA_U8},
  174. };
  175. static LIST_HEAD(dcb_app_list);
  176. static DEFINE_SPINLOCK(dcb_lock);
  177. static struct sk_buff *dcbnl_newmsg(int type, u8 cmd, u32 port, u32 seq,
  178. u32 flags, struct nlmsghdr **nlhp)
  179. {
  180. struct sk_buff *skb;
  181. struct dcbmsg *dcb;
  182. struct nlmsghdr *nlh;
  183. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  184. if (!skb)
  185. return NULL;
  186. nlh = nlmsg_put(skb, port, seq, type, sizeof(*dcb), flags);
  187. BUG_ON(!nlh);
  188. dcb = nlmsg_data(nlh);
  189. dcb->dcb_family = AF_UNSPEC;
  190. dcb->cmd = cmd;
  191. dcb->dcb_pad = 0;
  192. if (nlhp)
  193. *nlhp = nlh;
  194. return skb;
  195. }
  196. static int dcbnl_getstate(struct net_device *netdev, struct nlmsghdr *nlh,
  197. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  198. {
  199. /* if (!tb[DCB_ATTR_STATE] || !netdev->dcbnl_ops->getstate) */
  200. if (!netdev->dcbnl_ops->getstate)
  201. return -EOPNOTSUPP;
  202. return nla_put_u8(skb, DCB_ATTR_STATE,
  203. netdev->dcbnl_ops->getstate(netdev));
  204. }
  205. static int dcbnl_getpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
  206. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  207. {
  208. struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1], *nest;
  209. u8 value;
  210. int ret;
  211. int i;
  212. int getall = 0;
  213. if (!tb[DCB_ATTR_PFC_CFG])
  214. return -EINVAL;
  215. if (!netdev->dcbnl_ops->getpfccfg)
  216. return -EOPNOTSUPP;
  217. ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
  218. tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL);
  219. if (ret)
  220. return ret;
  221. nest = nla_nest_start(skb, DCB_ATTR_PFC_CFG);
  222. if (!nest)
  223. return -EMSGSIZE;
  224. if (data[DCB_PFC_UP_ATTR_ALL])
  225. getall = 1;
  226. for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
  227. if (!getall && !data[i])
  228. continue;
  229. netdev->dcbnl_ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0,
  230. &value);
  231. ret = nla_put_u8(skb, i, value);
  232. if (ret) {
  233. nla_nest_cancel(skb, nest);
  234. return ret;
  235. }
  236. }
  237. nla_nest_end(skb, nest);
  238. return 0;
  239. }
  240. static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
  241. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  242. {
  243. u8 perm_addr[MAX_ADDR_LEN];
  244. if (!netdev->dcbnl_ops->getpermhwaddr)
  245. return -EOPNOTSUPP;
  246. memset(perm_addr, 0, sizeof(perm_addr));
  247. netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
  248. return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
  249. }
  250. static int dcbnl_getcap(struct net_device *netdev, struct nlmsghdr *nlh,
  251. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  252. {
  253. struct nlattr *data[DCB_CAP_ATTR_MAX + 1], *nest;
  254. u8 value;
  255. int ret;
  256. int i;
  257. int getall = 0;
  258. if (!tb[DCB_ATTR_CAP])
  259. return -EINVAL;
  260. if (!netdev->dcbnl_ops->getcap)
  261. return -EOPNOTSUPP;
  262. ret = nla_parse_nested(data, DCB_CAP_ATTR_MAX, tb[DCB_ATTR_CAP],
  263. dcbnl_cap_nest, NULL);
  264. if (ret)
  265. return ret;
  266. nest = nla_nest_start(skb, DCB_ATTR_CAP);
  267. if (!nest)
  268. return -EMSGSIZE;
  269. if (data[DCB_CAP_ATTR_ALL])
  270. getall = 1;
  271. for (i = DCB_CAP_ATTR_ALL+1; i <= DCB_CAP_ATTR_MAX; i++) {
  272. if (!getall && !data[i])
  273. continue;
  274. if (!netdev->dcbnl_ops->getcap(netdev, i, &value)) {
  275. ret = nla_put_u8(skb, i, value);
  276. if (ret) {
  277. nla_nest_cancel(skb, nest);
  278. return ret;
  279. }
  280. }
  281. }
  282. nla_nest_end(skb, nest);
  283. return 0;
  284. }
  285. static int dcbnl_getnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
  286. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  287. {
  288. struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1], *nest;
  289. u8 value;
  290. int ret;
  291. int i;
  292. int getall = 0;
  293. if (!tb[DCB_ATTR_NUMTCS])
  294. return -EINVAL;
  295. if (!netdev->dcbnl_ops->getnumtcs)
  296. return -EOPNOTSUPP;
  297. ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
  298. dcbnl_numtcs_nest, NULL);
  299. if (ret)
  300. return ret;
  301. nest = nla_nest_start(skb, DCB_ATTR_NUMTCS);
  302. if (!nest)
  303. return -EMSGSIZE;
  304. if (data[DCB_NUMTCS_ATTR_ALL])
  305. getall = 1;
  306. for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
  307. if (!getall && !data[i])
  308. continue;
  309. ret = netdev->dcbnl_ops->getnumtcs(netdev, i, &value);
  310. if (!ret) {
  311. ret = nla_put_u8(skb, i, value);
  312. if (ret) {
  313. nla_nest_cancel(skb, nest);
  314. return ret;
  315. }
  316. } else
  317. return -EINVAL;
  318. }
  319. nla_nest_end(skb, nest);
  320. return 0;
  321. }
  322. static int dcbnl_setnumtcs(struct net_device *netdev, struct nlmsghdr *nlh,
  323. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  324. {
  325. struct nlattr *data[DCB_NUMTCS_ATTR_MAX + 1];
  326. int ret;
  327. u8 value;
  328. int i;
  329. if (!tb[DCB_ATTR_NUMTCS])
  330. return -EINVAL;
  331. if (!netdev->dcbnl_ops->setnumtcs)
  332. return -EOPNOTSUPP;
  333. ret = nla_parse_nested(data, DCB_NUMTCS_ATTR_MAX, tb[DCB_ATTR_NUMTCS],
  334. dcbnl_numtcs_nest, NULL);
  335. if (ret)
  336. return ret;
  337. for (i = DCB_NUMTCS_ATTR_ALL+1; i <= DCB_NUMTCS_ATTR_MAX; i++) {
  338. if (data[i] == NULL)
  339. continue;
  340. value = nla_get_u8(data[i]);
  341. ret = netdev->dcbnl_ops->setnumtcs(netdev, i, value);
  342. if (ret)
  343. break;
  344. }
  345. return nla_put_u8(skb, DCB_ATTR_NUMTCS, !!ret);
  346. }
  347. static int dcbnl_getpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
  348. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  349. {
  350. if (!netdev->dcbnl_ops->getpfcstate)
  351. return -EOPNOTSUPP;
  352. return nla_put_u8(skb, DCB_ATTR_PFC_STATE,
  353. netdev->dcbnl_ops->getpfcstate(netdev));
  354. }
  355. static int dcbnl_setpfcstate(struct net_device *netdev, struct nlmsghdr *nlh,
  356. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  357. {
  358. u8 value;
  359. if (!tb[DCB_ATTR_PFC_STATE])
  360. return -EINVAL;
  361. if (!netdev->dcbnl_ops->setpfcstate)
  362. return -EOPNOTSUPP;
  363. value = nla_get_u8(tb[DCB_ATTR_PFC_STATE]);
  364. netdev->dcbnl_ops->setpfcstate(netdev, value);
  365. return nla_put_u8(skb, DCB_ATTR_PFC_STATE, 0);
  366. }
  367. static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
  368. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  369. {
  370. struct nlattr *app_nest;
  371. struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
  372. u16 id;
  373. u8 up, idtype;
  374. int ret;
  375. if (!tb[DCB_ATTR_APP])
  376. return -EINVAL;
  377. ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
  378. dcbnl_app_nest, NULL);
  379. if (ret)
  380. return ret;
  381. /* all must be non-null */
  382. if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
  383. (!app_tb[DCB_APP_ATTR_ID]))
  384. return -EINVAL;
  385. /* either by eth type or by socket number */
  386. idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
  387. if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
  388. (idtype != DCB_APP_IDTYPE_PORTNUM))
  389. return -EINVAL;
  390. id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
  391. if (netdev->dcbnl_ops->getapp) {
  392. ret = netdev->dcbnl_ops->getapp(netdev, idtype, id);
  393. if (ret < 0)
  394. return ret;
  395. else
  396. up = ret;
  397. } else {
  398. struct dcb_app app = {
  399. .selector = idtype,
  400. .protocol = id,
  401. };
  402. up = dcb_getapp(netdev, &app);
  403. }
  404. app_nest = nla_nest_start(skb, DCB_ATTR_APP);
  405. if (!app_nest)
  406. return -EMSGSIZE;
  407. ret = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE, idtype);
  408. if (ret)
  409. goto out_cancel;
  410. ret = nla_put_u16(skb, DCB_APP_ATTR_ID, id);
  411. if (ret)
  412. goto out_cancel;
  413. ret = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY, up);
  414. if (ret)
  415. goto out_cancel;
  416. nla_nest_end(skb, app_nest);
  417. return 0;
  418. out_cancel:
  419. nla_nest_cancel(skb, app_nest);
  420. return ret;
  421. }
  422. static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
  423. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  424. {
  425. int ret;
  426. u16 id;
  427. u8 up, idtype;
  428. struct nlattr *app_tb[DCB_APP_ATTR_MAX + 1];
  429. if (!tb[DCB_ATTR_APP])
  430. return -EINVAL;
  431. ret = nla_parse_nested(app_tb, DCB_APP_ATTR_MAX, tb[DCB_ATTR_APP],
  432. dcbnl_app_nest, NULL);
  433. if (ret)
  434. return ret;
  435. /* all must be non-null */
  436. if ((!app_tb[DCB_APP_ATTR_IDTYPE]) ||
  437. (!app_tb[DCB_APP_ATTR_ID]) ||
  438. (!app_tb[DCB_APP_ATTR_PRIORITY]))
  439. return -EINVAL;
  440. /* either by eth type or by socket number */
  441. idtype = nla_get_u8(app_tb[DCB_APP_ATTR_IDTYPE]);
  442. if ((idtype != DCB_APP_IDTYPE_ETHTYPE) &&
  443. (idtype != DCB_APP_IDTYPE_PORTNUM))
  444. return -EINVAL;
  445. id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
  446. up = nla_get_u8(app_tb[DCB_APP_ATTR_PRIORITY]);
  447. if (netdev->dcbnl_ops->setapp) {
  448. ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
  449. if (ret < 0)
  450. return ret;
  451. } else {
  452. struct dcb_app app;
  453. app.selector = idtype;
  454. app.protocol = id;
  455. app.priority = up;
  456. ret = dcb_setapp(netdev, &app);
  457. }
  458. ret = nla_put_u8(skb, DCB_ATTR_APP, ret);
  459. dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SAPP, seq, 0);
  460. return ret;
  461. }
  462. static int __dcbnl_pg_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  463. struct nlattr **tb, struct sk_buff *skb, int dir)
  464. {
  465. struct nlattr *pg_nest, *param_nest, *data;
  466. struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
  467. struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
  468. u8 prio, pgid, tc_pct, up_map;
  469. int ret;
  470. int getall = 0;
  471. int i;
  472. if (!tb[DCB_ATTR_PG_CFG])
  473. return -EINVAL;
  474. if (!netdev->dcbnl_ops->getpgtccfgtx ||
  475. !netdev->dcbnl_ops->getpgtccfgrx ||
  476. !netdev->dcbnl_ops->getpgbwgcfgtx ||
  477. !netdev->dcbnl_ops->getpgbwgcfgrx)
  478. return -EOPNOTSUPP;
  479. ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG],
  480. dcbnl_pg_nest, NULL);
  481. if (ret)
  482. return ret;
  483. pg_nest = nla_nest_start(skb, DCB_ATTR_PG_CFG);
  484. if (!pg_nest)
  485. return -EMSGSIZE;
  486. if (pg_tb[DCB_PG_ATTR_TC_ALL])
  487. getall = 1;
  488. for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
  489. if (!getall && !pg_tb[i])
  490. continue;
  491. if (pg_tb[DCB_PG_ATTR_TC_ALL])
  492. data = pg_tb[DCB_PG_ATTR_TC_ALL];
  493. else
  494. data = pg_tb[i];
  495. ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX, data,
  496. dcbnl_tc_param_nest, NULL);
  497. if (ret)
  498. goto err_pg;
  499. param_nest = nla_nest_start(skb, i);
  500. if (!param_nest)
  501. goto err_pg;
  502. pgid = DCB_ATTR_VALUE_UNDEFINED;
  503. prio = DCB_ATTR_VALUE_UNDEFINED;
  504. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  505. up_map = DCB_ATTR_VALUE_UNDEFINED;
  506. if (dir) {
  507. /* Rx */
  508. netdev->dcbnl_ops->getpgtccfgrx(netdev,
  509. i - DCB_PG_ATTR_TC_0, &prio,
  510. &pgid, &tc_pct, &up_map);
  511. } else {
  512. /* Tx */
  513. netdev->dcbnl_ops->getpgtccfgtx(netdev,
  514. i - DCB_PG_ATTR_TC_0, &prio,
  515. &pgid, &tc_pct, &up_map);
  516. }
  517. if (param_tb[DCB_TC_ATTR_PARAM_PGID] ||
  518. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  519. ret = nla_put_u8(skb,
  520. DCB_TC_ATTR_PARAM_PGID, pgid);
  521. if (ret)
  522. goto err_param;
  523. }
  524. if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING] ||
  525. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  526. ret = nla_put_u8(skb,
  527. DCB_TC_ATTR_PARAM_UP_MAPPING, up_map);
  528. if (ret)
  529. goto err_param;
  530. }
  531. if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO] ||
  532. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  533. ret = nla_put_u8(skb,
  534. DCB_TC_ATTR_PARAM_STRICT_PRIO, prio);
  535. if (ret)
  536. goto err_param;
  537. }
  538. if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT] ||
  539. param_tb[DCB_TC_ATTR_PARAM_ALL]) {
  540. ret = nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT,
  541. tc_pct);
  542. if (ret)
  543. goto err_param;
  544. }
  545. nla_nest_end(skb, param_nest);
  546. }
  547. if (pg_tb[DCB_PG_ATTR_BW_ID_ALL])
  548. getall = 1;
  549. else
  550. getall = 0;
  551. for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
  552. if (!getall && !pg_tb[i])
  553. continue;
  554. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  555. if (dir) {
  556. /* Rx */
  557. netdev->dcbnl_ops->getpgbwgcfgrx(netdev,
  558. i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
  559. } else {
  560. /* Tx */
  561. netdev->dcbnl_ops->getpgbwgcfgtx(netdev,
  562. i - DCB_PG_ATTR_BW_ID_0, &tc_pct);
  563. }
  564. ret = nla_put_u8(skb, i, tc_pct);
  565. if (ret)
  566. goto err_pg;
  567. }
  568. nla_nest_end(skb, pg_nest);
  569. return 0;
  570. err_param:
  571. nla_nest_cancel(skb, param_nest);
  572. err_pg:
  573. nla_nest_cancel(skb, pg_nest);
  574. return -EMSGSIZE;
  575. }
  576. static int dcbnl_pgtx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  577. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  578. {
  579. return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 0);
  580. }
  581. static int dcbnl_pgrx_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  582. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  583. {
  584. return __dcbnl_pg_getcfg(netdev, nlh, tb, skb, 1);
  585. }
  586. static int dcbnl_setstate(struct net_device *netdev, struct nlmsghdr *nlh,
  587. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  588. {
  589. u8 value;
  590. if (!tb[DCB_ATTR_STATE])
  591. return -EINVAL;
  592. if (!netdev->dcbnl_ops->setstate)
  593. return -EOPNOTSUPP;
  594. value = nla_get_u8(tb[DCB_ATTR_STATE]);
  595. return nla_put_u8(skb, DCB_ATTR_STATE,
  596. netdev->dcbnl_ops->setstate(netdev, value));
  597. }
  598. static int dcbnl_setpfccfg(struct net_device *netdev, struct nlmsghdr *nlh,
  599. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  600. {
  601. struct nlattr *data[DCB_PFC_UP_ATTR_MAX + 1];
  602. int i;
  603. int ret;
  604. u8 value;
  605. if (!tb[DCB_ATTR_PFC_CFG])
  606. return -EINVAL;
  607. if (!netdev->dcbnl_ops->setpfccfg)
  608. return -EOPNOTSUPP;
  609. ret = nla_parse_nested(data, DCB_PFC_UP_ATTR_MAX,
  610. tb[DCB_ATTR_PFC_CFG], dcbnl_pfc_up_nest, NULL);
  611. if (ret)
  612. return ret;
  613. for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
  614. if (data[i] == NULL)
  615. continue;
  616. value = nla_get_u8(data[i]);
  617. netdev->dcbnl_ops->setpfccfg(netdev,
  618. data[i]->nla_type - DCB_PFC_UP_ATTR_0, value);
  619. }
  620. return nla_put_u8(skb, DCB_ATTR_PFC_CFG, 0);
  621. }
  622. static int dcbnl_setall(struct net_device *netdev, struct nlmsghdr *nlh,
  623. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  624. {
  625. int ret;
  626. if (!tb[DCB_ATTR_SET_ALL])
  627. return -EINVAL;
  628. if (!netdev->dcbnl_ops->setall)
  629. return -EOPNOTSUPP;
  630. ret = nla_put_u8(skb, DCB_ATTR_SET_ALL,
  631. netdev->dcbnl_ops->setall(netdev));
  632. dcbnl_cee_notify(netdev, RTM_SETDCB, DCB_CMD_SET_ALL, seq, 0);
  633. return ret;
  634. }
  635. static int __dcbnl_pg_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  636. u32 seq, struct nlattr **tb, struct sk_buff *skb,
  637. int dir)
  638. {
  639. struct nlattr *pg_tb[DCB_PG_ATTR_MAX + 1];
  640. struct nlattr *param_tb[DCB_TC_ATTR_PARAM_MAX + 1];
  641. int ret;
  642. int i;
  643. u8 pgid;
  644. u8 up_map;
  645. u8 prio;
  646. u8 tc_pct;
  647. if (!tb[DCB_ATTR_PG_CFG])
  648. return -EINVAL;
  649. if (!netdev->dcbnl_ops->setpgtccfgtx ||
  650. !netdev->dcbnl_ops->setpgtccfgrx ||
  651. !netdev->dcbnl_ops->setpgbwgcfgtx ||
  652. !netdev->dcbnl_ops->setpgbwgcfgrx)
  653. return -EOPNOTSUPP;
  654. ret = nla_parse_nested(pg_tb, DCB_PG_ATTR_MAX, tb[DCB_ATTR_PG_CFG],
  655. dcbnl_pg_nest, NULL);
  656. if (ret)
  657. return ret;
  658. for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
  659. if (!pg_tb[i])
  660. continue;
  661. ret = nla_parse_nested(param_tb, DCB_TC_ATTR_PARAM_MAX,
  662. pg_tb[i], dcbnl_tc_param_nest, NULL);
  663. if (ret)
  664. return ret;
  665. pgid = DCB_ATTR_VALUE_UNDEFINED;
  666. prio = DCB_ATTR_VALUE_UNDEFINED;
  667. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  668. up_map = DCB_ATTR_VALUE_UNDEFINED;
  669. if (param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO])
  670. prio =
  671. nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_STRICT_PRIO]);
  672. if (param_tb[DCB_TC_ATTR_PARAM_PGID])
  673. pgid = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_PGID]);
  674. if (param_tb[DCB_TC_ATTR_PARAM_BW_PCT])
  675. tc_pct = nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_BW_PCT]);
  676. if (param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING])
  677. up_map =
  678. nla_get_u8(param_tb[DCB_TC_ATTR_PARAM_UP_MAPPING]);
  679. /* dir: Tx = 0, Rx = 1 */
  680. if (dir) {
  681. /* Rx */
  682. netdev->dcbnl_ops->setpgtccfgrx(netdev,
  683. i - DCB_PG_ATTR_TC_0,
  684. prio, pgid, tc_pct, up_map);
  685. } else {
  686. /* Tx */
  687. netdev->dcbnl_ops->setpgtccfgtx(netdev,
  688. i - DCB_PG_ATTR_TC_0,
  689. prio, pgid, tc_pct, up_map);
  690. }
  691. }
  692. for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
  693. if (!pg_tb[i])
  694. continue;
  695. tc_pct = nla_get_u8(pg_tb[i]);
  696. /* dir: Tx = 0, Rx = 1 */
  697. if (dir) {
  698. /* Rx */
  699. netdev->dcbnl_ops->setpgbwgcfgrx(netdev,
  700. i - DCB_PG_ATTR_BW_ID_0, tc_pct);
  701. } else {
  702. /* Tx */
  703. netdev->dcbnl_ops->setpgbwgcfgtx(netdev,
  704. i - DCB_PG_ATTR_BW_ID_0, tc_pct);
  705. }
  706. }
  707. return nla_put_u8(skb, DCB_ATTR_PG_CFG, 0);
  708. }
  709. static int dcbnl_pgtx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  710. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  711. {
  712. return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 0);
  713. }
  714. static int dcbnl_pgrx_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  715. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  716. {
  717. return __dcbnl_pg_setcfg(netdev, nlh, seq, tb, skb, 1);
  718. }
  719. static int dcbnl_bcn_getcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  720. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  721. {
  722. struct nlattr *bcn_nest;
  723. struct nlattr *bcn_tb[DCB_BCN_ATTR_MAX + 1];
  724. u8 value_byte;
  725. u32 value_integer;
  726. int ret;
  727. bool getall = false;
  728. int i;
  729. if (!tb[DCB_ATTR_BCN])
  730. return -EINVAL;
  731. if (!netdev->dcbnl_ops->getbcnrp ||
  732. !netdev->dcbnl_ops->getbcncfg)
  733. return -EOPNOTSUPP;
  734. ret = nla_parse_nested(bcn_tb, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN],
  735. dcbnl_bcn_nest, NULL);
  736. if (ret)
  737. return ret;
  738. bcn_nest = nla_nest_start(skb, DCB_ATTR_BCN);
  739. if (!bcn_nest)
  740. return -EMSGSIZE;
  741. if (bcn_tb[DCB_BCN_ATTR_ALL])
  742. getall = true;
  743. for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
  744. if (!getall && !bcn_tb[i])
  745. continue;
  746. netdev->dcbnl_ops->getbcnrp(netdev, i - DCB_BCN_ATTR_RP_0,
  747. &value_byte);
  748. ret = nla_put_u8(skb, i, value_byte);
  749. if (ret)
  750. goto err_bcn;
  751. }
  752. for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
  753. if (!getall && !bcn_tb[i])
  754. continue;
  755. netdev->dcbnl_ops->getbcncfg(netdev, i,
  756. &value_integer);
  757. ret = nla_put_u32(skb, i, value_integer);
  758. if (ret)
  759. goto err_bcn;
  760. }
  761. nla_nest_end(skb, bcn_nest);
  762. return 0;
  763. err_bcn:
  764. nla_nest_cancel(skb, bcn_nest);
  765. return ret;
  766. }
  767. static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  768. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  769. {
  770. struct nlattr *data[DCB_BCN_ATTR_MAX + 1];
  771. int i;
  772. int ret;
  773. u8 value_byte;
  774. u32 value_int;
  775. if (!tb[DCB_ATTR_BCN])
  776. return -EINVAL;
  777. if (!netdev->dcbnl_ops->setbcncfg ||
  778. !netdev->dcbnl_ops->setbcnrp)
  779. return -EOPNOTSUPP;
  780. ret = nla_parse_nested(data, DCB_BCN_ATTR_MAX, tb[DCB_ATTR_BCN],
  781. dcbnl_pfc_up_nest, NULL);
  782. if (ret)
  783. return ret;
  784. for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) {
  785. if (data[i] == NULL)
  786. continue;
  787. value_byte = nla_get_u8(data[i]);
  788. netdev->dcbnl_ops->setbcnrp(netdev,
  789. data[i]->nla_type - DCB_BCN_ATTR_RP_0, value_byte);
  790. }
  791. for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) {
  792. if (data[i] == NULL)
  793. continue;
  794. value_int = nla_get_u32(data[i]);
  795. netdev->dcbnl_ops->setbcncfg(netdev,
  796. i, value_int);
  797. }
  798. return nla_put_u8(skb, DCB_ATTR_BCN, 0);
  799. }
  800. static int dcbnl_build_peer_app(struct net_device *netdev, struct sk_buff* skb,
  801. int app_nested_type, int app_info_type,
  802. int app_entry_type)
  803. {
  804. struct dcb_peer_app_info info;
  805. struct dcb_app *table = NULL;
  806. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  807. u16 app_count;
  808. int err;
  809. /**
  810. * retrieve the peer app configuration form the driver. If the driver
  811. * handlers fail exit without doing anything
  812. */
  813. err = ops->peer_getappinfo(netdev, &info, &app_count);
  814. if (!err && app_count) {
  815. table = kmalloc(sizeof(struct dcb_app) * app_count, GFP_KERNEL);
  816. if (!table)
  817. return -ENOMEM;
  818. err = ops->peer_getapptable(netdev, table);
  819. }
  820. if (!err) {
  821. u16 i;
  822. struct nlattr *app;
  823. /**
  824. * build the message, from here on the only possible failure
  825. * is due to the skb size
  826. */
  827. err = -EMSGSIZE;
  828. app = nla_nest_start(skb, app_nested_type);
  829. if (!app)
  830. goto nla_put_failure;
  831. if (app_info_type &&
  832. nla_put(skb, app_info_type, sizeof(info), &info))
  833. goto nla_put_failure;
  834. for (i = 0; i < app_count; i++) {
  835. if (nla_put(skb, app_entry_type, sizeof(struct dcb_app),
  836. &table[i]))
  837. goto nla_put_failure;
  838. }
  839. nla_nest_end(skb, app);
  840. }
  841. err = 0;
  842. nla_put_failure:
  843. kfree(table);
  844. return err;
  845. }
  846. /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb GET commands. */
  847. static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
  848. {
  849. struct nlattr *ieee, *app;
  850. struct dcb_app_type *itr;
  851. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  852. int dcbx;
  853. int err;
  854. if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
  855. return -EMSGSIZE;
  856. ieee = nla_nest_start(skb, DCB_ATTR_IEEE);
  857. if (!ieee)
  858. return -EMSGSIZE;
  859. if (ops->ieee_getets) {
  860. struct ieee_ets ets;
  861. memset(&ets, 0, sizeof(ets));
  862. err = ops->ieee_getets(netdev, &ets);
  863. if (!err &&
  864. nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
  865. return -EMSGSIZE;
  866. }
  867. if (ops->ieee_getmaxrate) {
  868. struct ieee_maxrate maxrate;
  869. memset(&maxrate, 0, sizeof(maxrate));
  870. err = ops->ieee_getmaxrate(netdev, &maxrate);
  871. if (!err) {
  872. err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
  873. sizeof(maxrate), &maxrate);
  874. if (err)
  875. return -EMSGSIZE;
  876. }
  877. }
  878. if (ops->ieee_getqcn) {
  879. struct ieee_qcn qcn;
  880. memset(&qcn, 0, sizeof(qcn));
  881. err = ops->ieee_getqcn(netdev, &qcn);
  882. if (!err) {
  883. err = nla_put(skb, DCB_ATTR_IEEE_QCN,
  884. sizeof(qcn), &qcn);
  885. if (err)
  886. return -EMSGSIZE;
  887. }
  888. }
  889. if (ops->ieee_getqcnstats) {
  890. struct ieee_qcn_stats qcn_stats;
  891. memset(&qcn_stats, 0, sizeof(qcn_stats));
  892. err = ops->ieee_getqcnstats(netdev, &qcn_stats);
  893. if (!err) {
  894. err = nla_put(skb, DCB_ATTR_IEEE_QCN_STATS,
  895. sizeof(qcn_stats), &qcn_stats);
  896. if (err)
  897. return -EMSGSIZE;
  898. }
  899. }
  900. if (ops->ieee_getpfc) {
  901. struct ieee_pfc pfc;
  902. memset(&pfc, 0, sizeof(pfc));
  903. err = ops->ieee_getpfc(netdev, &pfc);
  904. if (!err &&
  905. nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
  906. return -EMSGSIZE;
  907. }
  908. app = nla_nest_start(skb, DCB_ATTR_IEEE_APP_TABLE);
  909. if (!app)
  910. return -EMSGSIZE;
  911. spin_lock_bh(&dcb_lock);
  912. list_for_each_entry(itr, &dcb_app_list, list) {
  913. if (itr->ifindex == netdev->ifindex) {
  914. err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
  915. &itr->app);
  916. if (err) {
  917. spin_unlock_bh(&dcb_lock);
  918. return -EMSGSIZE;
  919. }
  920. }
  921. }
  922. if (netdev->dcbnl_ops->getdcbx)
  923. dcbx = netdev->dcbnl_ops->getdcbx(netdev);
  924. else
  925. dcbx = -EOPNOTSUPP;
  926. spin_unlock_bh(&dcb_lock);
  927. nla_nest_end(skb, app);
  928. /* get peer info if available */
  929. if (ops->ieee_peer_getets) {
  930. struct ieee_ets ets;
  931. memset(&ets, 0, sizeof(ets));
  932. err = ops->ieee_peer_getets(netdev, &ets);
  933. if (!err &&
  934. nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
  935. return -EMSGSIZE;
  936. }
  937. if (ops->ieee_peer_getpfc) {
  938. struct ieee_pfc pfc;
  939. memset(&pfc, 0, sizeof(pfc));
  940. err = ops->ieee_peer_getpfc(netdev, &pfc);
  941. if (!err &&
  942. nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
  943. return -EMSGSIZE;
  944. }
  945. if (ops->peer_getappinfo && ops->peer_getapptable) {
  946. err = dcbnl_build_peer_app(netdev, skb,
  947. DCB_ATTR_IEEE_PEER_APP,
  948. DCB_ATTR_IEEE_APP_UNSPEC,
  949. DCB_ATTR_IEEE_APP);
  950. if (err)
  951. return -EMSGSIZE;
  952. }
  953. nla_nest_end(skb, ieee);
  954. if (dcbx >= 0) {
  955. err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
  956. if (err)
  957. return -EMSGSIZE;
  958. }
  959. return 0;
  960. }
  961. static int dcbnl_cee_pg_fill(struct sk_buff *skb, struct net_device *dev,
  962. int dir)
  963. {
  964. u8 pgid, up_map, prio, tc_pct;
  965. const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
  966. int i = dir ? DCB_ATTR_CEE_TX_PG : DCB_ATTR_CEE_RX_PG;
  967. struct nlattr *pg = nla_nest_start(skb, i);
  968. if (!pg)
  969. return -EMSGSIZE;
  970. for (i = DCB_PG_ATTR_TC_0; i <= DCB_PG_ATTR_TC_7; i++) {
  971. struct nlattr *tc_nest = nla_nest_start(skb, i);
  972. if (!tc_nest)
  973. return -EMSGSIZE;
  974. pgid = DCB_ATTR_VALUE_UNDEFINED;
  975. prio = DCB_ATTR_VALUE_UNDEFINED;
  976. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  977. up_map = DCB_ATTR_VALUE_UNDEFINED;
  978. if (!dir)
  979. ops->getpgtccfgrx(dev, i - DCB_PG_ATTR_TC_0,
  980. &prio, &pgid, &tc_pct, &up_map);
  981. else
  982. ops->getpgtccfgtx(dev, i - DCB_PG_ATTR_TC_0,
  983. &prio, &pgid, &tc_pct, &up_map);
  984. if (nla_put_u8(skb, DCB_TC_ATTR_PARAM_PGID, pgid) ||
  985. nla_put_u8(skb, DCB_TC_ATTR_PARAM_UP_MAPPING, up_map) ||
  986. nla_put_u8(skb, DCB_TC_ATTR_PARAM_STRICT_PRIO, prio) ||
  987. nla_put_u8(skb, DCB_TC_ATTR_PARAM_BW_PCT, tc_pct))
  988. return -EMSGSIZE;
  989. nla_nest_end(skb, tc_nest);
  990. }
  991. for (i = DCB_PG_ATTR_BW_ID_0; i <= DCB_PG_ATTR_BW_ID_7; i++) {
  992. tc_pct = DCB_ATTR_VALUE_UNDEFINED;
  993. if (!dir)
  994. ops->getpgbwgcfgrx(dev, i - DCB_PG_ATTR_BW_ID_0,
  995. &tc_pct);
  996. else
  997. ops->getpgbwgcfgtx(dev, i - DCB_PG_ATTR_BW_ID_0,
  998. &tc_pct);
  999. if (nla_put_u8(skb, i, tc_pct))
  1000. return -EMSGSIZE;
  1001. }
  1002. nla_nest_end(skb, pg);
  1003. return 0;
  1004. }
  1005. static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
  1006. {
  1007. struct nlattr *cee, *app;
  1008. struct dcb_app_type *itr;
  1009. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1010. int dcbx, i, err = -EMSGSIZE;
  1011. u8 value;
  1012. if (nla_put_string(skb, DCB_ATTR_IFNAME, netdev->name))
  1013. goto nla_put_failure;
  1014. cee = nla_nest_start(skb, DCB_ATTR_CEE);
  1015. if (!cee)
  1016. goto nla_put_failure;
  1017. /* local pg */
  1018. if (ops->getpgtccfgtx && ops->getpgbwgcfgtx) {
  1019. err = dcbnl_cee_pg_fill(skb, netdev, 1);
  1020. if (err)
  1021. goto nla_put_failure;
  1022. }
  1023. if (ops->getpgtccfgrx && ops->getpgbwgcfgrx) {
  1024. err = dcbnl_cee_pg_fill(skb, netdev, 0);
  1025. if (err)
  1026. goto nla_put_failure;
  1027. }
  1028. /* local pfc */
  1029. if (ops->getpfccfg) {
  1030. struct nlattr *pfc_nest = nla_nest_start(skb, DCB_ATTR_CEE_PFC);
  1031. if (!pfc_nest)
  1032. goto nla_put_failure;
  1033. for (i = DCB_PFC_UP_ATTR_0; i <= DCB_PFC_UP_ATTR_7; i++) {
  1034. ops->getpfccfg(netdev, i - DCB_PFC_UP_ATTR_0, &value);
  1035. if (nla_put_u8(skb, i, value))
  1036. goto nla_put_failure;
  1037. }
  1038. nla_nest_end(skb, pfc_nest);
  1039. }
  1040. /* local app */
  1041. spin_lock_bh(&dcb_lock);
  1042. app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
  1043. if (!app)
  1044. goto dcb_unlock;
  1045. list_for_each_entry(itr, &dcb_app_list, list) {
  1046. if (itr->ifindex == netdev->ifindex) {
  1047. struct nlattr *app_nest = nla_nest_start(skb,
  1048. DCB_ATTR_APP);
  1049. if (!app_nest)
  1050. goto dcb_unlock;
  1051. err = nla_put_u8(skb, DCB_APP_ATTR_IDTYPE,
  1052. itr->app.selector);
  1053. if (err)
  1054. goto dcb_unlock;
  1055. err = nla_put_u16(skb, DCB_APP_ATTR_ID,
  1056. itr->app.protocol);
  1057. if (err)
  1058. goto dcb_unlock;
  1059. err = nla_put_u8(skb, DCB_APP_ATTR_PRIORITY,
  1060. itr->app.priority);
  1061. if (err)
  1062. goto dcb_unlock;
  1063. nla_nest_end(skb, app_nest);
  1064. }
  1065. }
  1066. nla_nest_end(skb, app);
  1067. if (netdev->dcbnl_ops->getdcbx)
  1068. dcbx = netdev->dcbnl_ops->getdcbx(netdev);
  1069. else
  1070. dcbx = -EOPNOTSUPP;
  1071. spin_unlock_bh(&dcb_lock);
  1072. /* features flags */
  1073. if (ops->getfeatcfg) {
  1074. struct nlattr *feat = nla_nest_start(skb, DCB_ATTR_CEE_FEAT);
  1075. if (!feat)
  1076. goto nla_put_failure;
  1077. for (i = DCB_FEATCFG_ATTR_ALL + 1; i <= DCB_FEATCFG_ATTR_MAX;
  1078. i++)
  1079. if (!ops->getfeatcfg(netdev, i, &value) &&
  1080. nla_put_u8(skb, i, value))
  1081. goto nla_put_failure;
  1082. nla_nest_end(skb, feat);
  1083. }
  1084. /* peer info if available */
  1085. if (ops->cee_peer_getpg) {
  1086. struct cee_pg pg;
  1087. memset(&pg, 0, sizeof(pg));
  1088. err = ops->cee_peer_getpg(netdev, &pg);
  1089. if (!err &&
  1090. nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
  1091. goto nla_put_failure;
  1092. }
  1093. if (ops->cee_peer_getpfc) {
  1094. struct cee_pfc pfc;
  1095. memset(&pfc, 0, sizeof(pfc));
  1096. err = ops->cee_peer_getpfc(netdev, &pfc);
  1097. if (!err &&
  1098. nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
  1099. goto nla_put_failure;
  1100. }
  1101. if (ops->peer_getappinfo && ops->peer_getapptable) {
  1102. err = dcbnl_build_peer_app(netdev, skb,
  1103. DCB_ATTR_CEE_PEER_APP_TABLE,
  1104. DCB_ATTR_CEE_PEER_APP_INFO,
  1105. DCB_ATTR_CEE_PEER_APP);
  1106. if (err)
  1107. goto nla_put_failure;
  1108. }
  1109. nla_nest_end(skb, cee);
  1110. /* DCBX state */
  1111. if (dcbx >= 0) {
  1112. err = nla_put_u8(skb, DCB_ATTR_DCBX, dcbx);
  1113. if (err)
  1114. goto nla_put_failure;
  1115. }
  1116. return 0;
  1117. dcb_unlock:
  1118. spin_unlock_bh(&dcb_lock);
  1119. nla_put_failure:
  1120. err = -EMSGSIZE;
  1121. return err;
  1122. }
  1123. static int dcbnl_notify(struct net_device *dev, int event, int cmd,
  1124. u32 seq, u32 portid, int dcbx_ver)
  1125. {
  1126. struct net *net = dev_net(dev);
  1127. struct sk_buff *skb;
  1128. struct nlmsghdr *nlh;
  1129. const struct dcbnl_rtnl_ops *ops = dev->dcbnl_ops;
  1130. int err;
  1131. if (!ops)
  1132. return -EOPNOTSUPP;
  1133. skb = dcbnl_newmsg(event, cmd, portid, seq, 0, &nlh);
  1134. if (!skb)
  1135. return -ENOBUFS;
  1136. if (dcbx_ver == DCB_CAP_DCBX_VER_IEEE)
  1137. err = dcbnl_ieee_fill(skb, dev);
  1138. else
  1139. err = dcbnl_cee_fill(skb, dev);
  1140. if (err < 0) {
  1141. /* Report error to broadcast listeners */
  1142. nlmsg_free(skb);
  1143. rtnl_set_sk_err(net, RTNLGRP_DCB, err);
  1144. } else {
  1145. /* End nlmsg and notify broadcast listeners */
  1146. nlmsg_end(skb, nlh);
  1147. rtnl_notify(skb, net, 0, RTNLGRP_DCB, NULL, GFP_KERNEL);
  1148. }
  1149. return err;
  1150. }
  1151. int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
  1152. u32 seq, u32 portid)
  1153. {
  1154. return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_IEEE);
  1155. }
  1156. EXPORT_SYMBOL(dcbnl_ieee_notify);
  1157. int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
  1158. u32 seq, u32 portid)
  1159. {
  1160. return dcbnl_notify(dev, event, cmd, seq, portid, DCB_CAP_DCBX_VER_CEE);
  1161. }
  1162. EXPORT_SYMBOL(dcbnl_cee_notify);
  1163. /* Handle IEEE 802.1Qaz/802.1Qau/802.1Qbb SET commands.
  1164. * If any requested operation can not be completed
  1165. * the entire msg is aborted and error value is returned.
  1166. * No attempt is made to reconcile the case where only part of the
  1167. * cmd can be completed.
  1168. */
  1169. static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh,
  1170. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1171. {
  1172. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1173. struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
  1174. int err;
  1175. if (!ops)
  1176. return -EOPNOTSUPP;
  1177. if (!tb[DCB_ATTR_IEEE])
  1178. return -EINVAL;
  1179. err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE],
  1180. dcbnl_ieee_policy, NULL);
  1181. if (err)
  1182. return err;
  1183. if (ieee[DCB_ATTR_IEEE_ETS] && ops->ieee_setets) {
  1184. struct ieee_ets *ets = nla_data(ieee[DCB_ATTR_IEEE_ETS]);
  1185. err = ops->ieee_setets(netdev, ets);
  1186. if (err)
  1187. goto err;
  1188. }
  1189. if (ieee[DCB_ATTR_IEEE_MAXRATE] && ops->ieee_setmaxrate) {
  1190. struct ieee_maxrate *maxrate =
  1191. nla_data(ieee[DCB_ATTR_IEEE_MAXRATE]);
  1192. err = ops->ieee_setmaxrate(netdev, maxrate);
  1193. if (err)
  1194. goto err;
  1195. }
  1196. if (ieee[DCB_ATTR_IEEE_QCN] && ops->ieee_setqcn) {
  1197. struct ieee_qcn *qcn =
  1198. nla_data(ieee[DCB_ATTR_IEEE_QCN]);
  1199. err = ops->ieee_setqcn(netdev, qcn);
  1200. if (err)
  1201. goto err;
  1202. }
  1203. if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) {
  1204. struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]);
  1205. err = ops->ieee_setpfc(netdev, pfc);
  1206. if (err)
  1207. goto err;
  1208. }
  1209. if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
  1210. struct nlattr *attr;
  1211. int rem;
  1212. nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
  1213. struct dcb_app *app_data;
  1214. if (nla_type(attr) != DCB_ATTR_IEEE_APP)
  1215. continue;
  1216. if (nla_len(attr) < sizeof(struct dcb_app)) {
  1217. err = -ERANGE;
  1218. goto err;
  1219. }
  1220. app_data = nla_data(attr);
  1221. if (ops->ieee_setapp)
  1222. err = ops->ieee_setapp(netdev, app_data);
  1223. else
  1224. err = dcb_ieee_setapp(netdev, app_data);
  1225. if (err)
  1226. goto err;
  1227. }
  1228. }
  1229. err:
  1230. err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
  1231. dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_SET, seq, 0);
  1232. return err;
  1233. }
  1234. static int dcbnl_ieee_get(struct net_device *netdev, struct nlmsghdr *nlh,
  1235. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1236. {
  1237. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1238. if (!ops)
  1239. return -EOPNOTSUPP;
  1240. return dcbnl_ieee_fill(skb, netdev);
  1241. }
  1242. static int dcbnl_ieee_del(struct net_device *netdev, struct nlmsghdr *nlh,
  1243. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1244. {
  1245. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1246. struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1];
  1247. int err;
  1248. if (!ops)
  1249. return -EOPNOTSUPP;
  1250. if (!tb[DCB_ATTR_IEEE])
  1251. return -EINVAL;
  1252. err = nla_parse_nested(ieee, DCB_ATTR_IEEE_MAX, tb[DCB_ATTR_IEEE],
  1253. dcbnl_ieee_policy, NULL);
  1254. if (err)
  1255. return err;
  1256. if (ieee[DCB_ATTR_IEEE_APP_TABLE]) {
  1257. struct nlattr *attr;
  1258. int rem;
  1259. nla_for_each_nested(attr, ieee[DCB_ATTR_IEEE_APP_TABLE], rem) {
  1260. struct dcb_app *app_data;
  1261. if (nla_type(attr) != DCB_ATTR_IEEE_APP)
  1262. continue;
  1263. app_data = nla_data(attr);
  1264. if (ops->ieee_delapp)
  1265. err = ops->ieee_delapp(netdev, app_data);
  1266. else
  1267. err = dcb_ieee_delapp(netdev, app_data);
  1268. if (err)
  1269. goto err;
  1270. }
  1271. }
  1272. err:
  1273. err = nla_put_u8(skb, DCB_ATTR_IEEE, err);
  1274. dcbnl_ieee_notify(netdev, RTM_SETDCB, DCB_CMD_IEEE_DEL, seq, 0);
  1275. return err;
  1276. }
  1277. /* DCBX configuration */
  1278. static int dcbnl_getdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
  1279. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1280. {
  1281. if (!netdev->dcbnl_ops->getdcbx)
  1282. return -EOPNOTSUPP;
  1283. return nla_put_u8(skb, DCB_ATTR_DCBX,
  1284. netdev->dcbnl_ops->getdcbx(netdev));
  1285. }
  1286. static int dcbnl_setdcbx(struct net_device *netdev, struct nlmsghdr *nlh,
  1287. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1288. {
  1289. u8 value;
  1290. if (!netdev->dcbnl_ops->setdcbx)
  1291. return -EOPNOTSUPP;
  1292. if (!tb[DCB_ATTR_DCBX])
  1293. return -EINVAL;
  1294. value = nla_get_u8(tb[DCB_ATTR_DCBX]);
  1295. return nla_put_u8(skb, DCB_ATTR_DCBX,
  1296. netdev->dcbnl_ops->setdcbx(netdev, value));
  1297. }
  1298. static int dcbnl_getfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  1299. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1300. {
  1301. struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1], *nest;
  1302. u8 value;
  1303. int ret, i;
  1304. int getall = 0;
  1305. if (!netdev->dcbnl_ops->getfeatcfg)
  1306. return -EOPNOTSUPP;
  1307. if (!tb[DCB_ATTR_FEATCFG])
  1308. return -EINVAL;
  1309. ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX,
  1310. tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest, NULL);
  1311. if (ret)
  1312. return ret;
  1313. nest = nla_nest_start(skb, DCB_ATTR_FEATCFG);
  1314. if (!nest)
  1315. return -EMSGSIZE;
  1316. if (data[DCB_FEATCFG_ATTR_ALL])
  1317. getall = 1;
  1318. for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
  1319. if (!getall && !data[i])
  1320. continue;
  1321. ret = netdev->dcbnl_ops->getfeatcfg(netdev, i, &value);
  1322. if (!ret)
  1323. ret = nla_put_u8(skb, i, value);
  1324. if (ret) {
  1325. nla_nest_cancel(skb, nest);
  1326. goto nla_put_failure;
  1327. }
  1328. }
  1329. nla_nest_end(skb, nest);
  1330. nla_put_failure:
  1331. return ret;
  1332. }
  1333. static int dcbnl_setfeatcfg(struct net_device *netdev, struct nlmsghdr *nlh,
  1334. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1335. {
  1336. struct nlattr *data[DCB_FEATCFG_ATTR_MAX + 1];
  1337. int ret, i;
  1338. u8 value;
  1339. if (!netdev->dcbnl_ops->setfeatcfg)
  1340. return -ENOTSUPP;
  1341. if (!tb[DCB_ATTR_FEATCFG])
  1342. return -EINVAL;
  1343. ret = nla_parse_nested(data, DCB_FEATCFG_ATTR_MAX,
  1344. tb[DCB_ATTR_FEATCFG], dcbnl_featcfg_nest, NULL);
  1345. if (ret)
  1346. goto err;
  1347. for (i = DCB_FEATCFG_ATTR_ALL+1; i <= DCB_FEATCFG_ATTR_MAX; i++) {
  1348. if (data[i] == NULL)
  1349. continue;
  1350. value = nla_get_u8(data[i]);
  1351. ret = netdev->dcbnl_ops->setfeatcfg(netdev, i, value);
  1352. if (ret)
  1353. goto err;
  1354. }
  1355. err:
  1356. ret = nla_put_u8(skb, DCB_ATTR_FEATCFG, ret);
  1357. return ret;
  1358. }
  1359. /* Handle CEE DCBX GET commands. */
  1360. static int dcbnl_cee_get(struct net_device *netdev, struct nlmsghdr *nlh,
  1361. u32 seq, struct nlattr **tb, struct sk_buff *skb)
  1362. {
  1363. const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops;
  1364. if (!ops)
  1365. return -EOPNOTSUPP;
  1366. return dcbnl_cee_fill(skb, netdev);
  1367. }
  1368. struct reply_func {
  1369. /* reply netlink message type */
  1370. int type;
  1371. /* function to fill message contents */
  1372. int (*cb)(struct net_device *, struct nlmsghdr *, u32,
  1373. struct nlattr **, struct sk_buff *);
  1374. };
  1375. static const struct reply_func reply_funcs[DCB_CMD_MAX+1] = {
  1376. [DCB_CMD_GSTATE] = { RTM_GETDCB, dcbnl_getstate },
  1377. [DCB_CMD_SSTATE] = { RTM_SETDCB, dcbnl_setstate },
  1378. [DCB_CMD_PFC_GCFG] = { RTM_GETDCB, dcbnl_getpfccfg },
  1379. [DCB_CMD_PFC_SCFG] = { RTM_SETDCB, dcbnl_setpfccfg },
  1380. [DCB_CMD_GPERM_HWADDR] = { RTM_GETDCB, dcbnl_getperm_hwaddr },
  1381. [DCB_CMD_GCAP] = { RTM_GETDCB, dcbnl_getcap },
  1382. [DCB_CMD_GNUMTCS] = { RTM_GETDCB, dcbnl_getnumtcs },
  1383. [DCB_CMD_SNUMTCS] = { RTM_SETDCB, dcbnl_setnumtcs },
  1384. [DCB_CMD_PFC_GSTATE] = { RTM_GETDCB, dcbnl_getpfcstate },
  1385. [DCB_CMD_PFC_SSTATE] = { RTM_SETDCB, dcbnl_setpfcstate },
  1386. [DCB_CMD_GAPP] = { RTM_GETDCB, dcbnl_getapp },
  1387. [DCB_CMD_SAPP] = { RTM_SETDCB, dcbnl_setapp },
  1388. [DCB_CMD_PGTX_GCFG] = { RTM_GETDCB, dcbnl_pgtx_getcfg },
  1389. [DCB_CMD_PGTX_SCFG] = { RTM_SETDCB, dcbnl_pgtx_setcfg },
  1390. [DCB_CMD_PGRX_GCFG] = { RTM_GETDCB, dcbnl_pgrx_getcfg },
  1391. [DCB_CMD_PGRX_SCFG] = { RTM_SETDCB, dcbnl_pgrx_setcfg },
  1392. [DCB_CMD_SET_ALL] = { RTM_SETDCB, dcbnl_setall },
  1393. [DCB_CMD_BCN_GCFG] = { RTM_GETDCB, dcbnl_bcn_getcfg },
  1394. [DCB_CMD_BCN_SCFG] = { RTM_SETDCB, dcbnl_bcn_setcfg },
  1395. [DCB_CMD_IEEE_GET] = { RTM_GETDCB, dcbnl_ieee_get },
  1396. [DCB_CMD_IEEE_SET] = { RTM_SETDCB, dcbnl_ieee_set },
  1397. [DCB_CMD_IEEE_DEL] = { RTM_SETDCB, dcbnl_ieee_del },
  1398. [DCB_CMD_GDCBX] = { RTM_GETDCB, dcbnl_getdcbx },
  1399. [DCB_CMD_SDCBX] = { RTM_SETDCB, dcbnl_setdcbx },
  1400. [DCB_CMD_GFEATCFG] = { RTM_GETDCB, dcbnl_getfeatcfg },
  1401. [DCB_CMD_SFEATCFG] = { RTM_SETDCB, dcbnl_setfeatcfg },
  1402. [DCB_CMD_CEE_GET] = { RTM_GETDCB, dcbnl_cee_get },
  1403. };
  1404. static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
  1405. struct netlink_ext_ack *extack)
  1406. {
  1407. struct net *net = sock_net(skb->sk);
  1408. struct net_device *netdev;
  1409. struct dcbmsg *dcb = nlmsg_data(nlh);
  1410. struct nlattr *tb[DCB_ATTR_MAX + 1];
  1411. u32 portid = skb ? NETLINK_CB(skb).portid : 0;
  1412. int ret = -EINVAL;
  1413. struct sk_buff *reply_skb;
  1414. struct nlmsghdr *reply_nlh = NULL;
  1415. const struct reply_func *fn;
  1416. if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
  1417. return -EPERM;
  1418. ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
  1419. dcbnl_rtnl_policy, extack);
  1420. if (ret < 0)
  1421. return ret;
  1422. if (dcb->cmd > DCB_CMD_MAX)
  1423. return -EINVAL;
  1424. /* check if a reply function has been defined for the command */
  1425. fn = &reply_funcs[dcb->cmd];
  1426. if (!fn->cb)
  1427. return -EOPNOTSUPP;
  1428. if (fn->type == RTM_SETDCB && !netlink_capable(skb, CAP_NET_ADMIN))
  1429. return -EPERM;
  1430. if (!tb[DCB_ATTR_IFNAME])
  1431. return -EINVAL;
  1432. netdev = __dev_get_by_name(net, nla_data(tb[DCB_ATTR_IFNAME]));
  1433. if (!netdev)
  1434. return -ENODEV;
  1435. if (!netdev->dcbnl_ops)
  1436. return -EOPNOTSUPP;
  1437. reply_skb = dcbnl_newmsg(fn->type, dcb->cmd, portid, nlh->nlmsg_seq,
  1438. nlh->nlmsg_flags, &reply_nlh);
  1439. if (!reply_skb)
  1440. return -ENOBUFS;
  1441. ret = fn->cb(netdev, nlh, nlh->nlmsg_seq, tb, reply_skb);
  1442. if (ret < 0) {
  1443. nlmsg_free(reply_skb);
  1444. goto out;
  1445. }
  1446. nlmsg_end(reply_skb, reply_nlh);
  1447. ret = rtnl_unicast(reply_skb, net, portid);
  1448. out:
  1449. return ret;
  1450. }
  1451. static struct dcb_app_type *dcb_app_lookup(const struct dcb_app *app,
  1452. int ifindex, int prio)
  1453. {
  1454. struct dcb_app_type *itr;
  1455. list_for_each_entry(itr, &dcb_app_list, list) {
  1456. if (itr->app.selector == app->selector &&
  1457. itr->app.protocol == app->protocol &&
  1458. itr->ifindex == ifindex &&
  1459. ((prio == -1) || itr->app.priority == prio))
  1460. return itr;
  1461. }
  1462. return NULL;
  1463. }
  1464. static int dcb_app_add(const struct dcb_app *app, int ifindex)
  1465. {
  1466. struct dcb_app_type *entry;
  1467. entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
  1468. if (!entry)
  1469. return -ENOMEM;
  1470. memcpy(&entry->app, app, sizeof(*app));
  1471. entry->ifindex = ifindex;
  1472. list_add(&entry->list, &dcb_app_list);
  1473. return 0;
  1474. }
  1475. /**
  1476. * dcb_getapp - retrieve the DCBX application user priority
  1477. *
  1478. * On success returns a non-zero 802.1p user priority bitmap
  1479. * otherwise returns 0 as the invalid user priority bitmap to
  1480. * indicate an error.
  1481. */
  1482. u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
  1483. {
  1484. struct dcb_app_type *itr;
  1485. u8 prio = 0;
  1486. spin_lock_bh(&dcb_lock);
  1487. itr = dcb_app_lookup(app, dev->ifindex, -1);
  1488. if (itr)
  1489. prio = itr->app.priority;
  1490. spin_unlock_bh(&dcb_lock);
  1491. return prio;
  1492. }
  1493. EXPORT_SYMBOL(dcb_getapp);
  1494. /**
  1495. * dcb_setapp - add CEE dcb application data to app list
  1496. *
  1497. * Priority 0 is an invalid priority in CEE spec. This routine
  1498. * removes applications from the app list if the priority is
  1499. * set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap
  1500. */
  1501. int dcb_setapp(struct net_device *dev, struct dcb_app *new)
  1502. {
  1503. struct dcb_app_type *itr;
  1504. struct dcb_app_type event;
  1505. int err = 0;
  1506. event.ifindex = dev->ifindex;
  1507. memcpy(&event.app, new, sizeof(event.app));
  1508. if (dev->dcbnl_ops->getdcbx)
  1509. event.dcbx = dev->dcbnl_ops->getdcbx(dev);
  1510. spin_lock_bh(&dcb_lock);
  1511. /* Search for existing match and replace */
  1512. itr = dcb_app_lookup(new, dev->ifindex, -1);
  1513. if (itr) {
  1514. if (new->priority)
  1515. itr->app.priority = new->priority;
  1516. else {
  1517. list_del(&itr->list);
  1518. kfree(itr);
  1519. }
  1520. goto out;
  1521. }
  1522. /* App type does not exist add new application type */
  1523. if (new->priority)
  1524. err = dcb_app_add(new, dev->ifindex);
  1525. out:
  1526. spin_unlock_bh(&dcb_lock);
  1527. if (!err)
  1528. call_dcbevent_notifiers(DCB_APP_EVENT, &event);
  1529. return err;
  1530. }
  1531. EXPORT_SYMBOL(dcb_setapp);
  1532. /**
  1533. * dcb_ieee_getapp_mask - retrieve the IEEE DCB application priority
  1534. *
  1535. * Helper routine which on success returns a non-zero 802.1Qaz user
  1536. * priority bitmap otherwise returns 0 to indicate the dcb_app was
  1537. * not found in APP list.
  1538. */
  1539. u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
  1540. {
  1541. struct dcb_app_type *itr;
  1542. u8 prio = 0;
  1543. spin_lock_bh(&dcb_lock);
  1544. itr = dcb_app_lookup(app, dev->ifindex, -1);
  1545. if (itr)
  1546. prio |= 1 << itr->app.priority;
  1547. spin_unlock_bh(&dcb_lock);
  1548. return prio;
  1549. }
  1550. EXPORT_SYMBOL(dcb_ieee_getapp_mask);
  1551. /**
  1552. * dcb_ieee_setapp - add IEEE dcb application data to app list
  1553. *
  1554. * This adds Application data to the list. Multiple application
  1555. * entries may exists for the same selector and protocol as long
  1556. * as the priorities are different. Priority is expected to be a
  1557. * 3-bit unsigned integer
  1558. */
  1559. int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
  1560. {
  1561. struct dcb_app_type event;
  1562. int err = 0;
  1563. event.ifindex = dev->ifindex;
  1564. memcpy(&event.app, new, sizeof(event.app));
  1565. if (dev->dcbnl_ops->getdcbx)
  1566. event.dcbx = dev->dcbnl_ops->getdcbx(dev);
  1567. spin_lock_bh(&dcb_lock);
  1568. /* Search for existing match and abort if found */
  1569. if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
  1570. err = -EEXIST;
  1571. goto out;
  1572. }
  1573. err = dcb_app_add(new, dev->ifindex);
  1574. out:
  1575. spin_unlock_bh(&dcb_lock);
  1576. if (!err)
  1577. call_dcbevent_notifiers(DCB_APP_EVENT, &event);
  1578. return err;
  1579. }
  1580. EXPORT_SYMBOL(dcb_ieee_setapp);
  1581. /**
  1582. * dcb_ieee_delapp - delete IEEE dcb application data from list
  1583. *
  1584. * This removes a matching APP data from the APP list
  1585. */
  1586. int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
  1587. {
  1588. struct dcb_app_type *itr;
  1589. struct dcb_app_type event;
  1590. int err = -ENOENT;
  1591. event.ifindex = dev->ifindex;
  1592. memcpy(&event.app, del, sizeof(event.app));
  1593. if (dev->dcbnl_ops->getdcbx)
  1594. event.dcbx = dev->dcbnl_ops->getdcbx(dev);
  1595. spin_lock_bh(&dcb_lock);
  1596. /* Search for existing match and remove it. */
  1597. if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
  1598. list_del(&itr->list);
  1599. kfree(itr);
  1600. err = 0;
  1601. }
  1602. spin_unlock_bh(&dcb_lock);
  1603. if (!err)
  1604. call_dcbevent_notifiers(DCB_APP_EVENT, &event);
  1605. return err;
  1606. }
  1607. EXPORT_SYMBOL(dcb_ieee_delapp);
  1608. static int __init dcbnl_init(void)
  1609. {
  1610. INIT_LIST_HEAD(&dcb_app_list);
  1611. rtnl_register(PF_UNSPEC, RTM_GETDCB, dcb_doit, NULL, 0);
  1612. rtnl_register(PF_UNSPEC, RTM_SETDCB, dcb_doit, NULL, 0);
  1613. return 0;
  1614. }
  1615. device_initcall(dcbnl_init);