en_netdev.c 57 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/etherdevice.h>
  34. #include <linux/tcp.h>
  35. #include <linux/if_vlan.h>
  36. #include <linux/delay.h>
  37. #include <linux/slab.h>
  38. #include <linux/hash.h>
  39. #include <net/ip.h>
  40. #include <linux/mlx4/driver.h>
  41. #include <linux/mlx4/device.h>
  42. #include <linux/mlx4/cmd.h>
  43. #include <linux/mlx4/cq.h>
  44. #include "mlx4_en.h"
  45. #include "en_port.h"
  46. int mlx4_en_setup_tc(struct net_device *dev, u8 up)
  47. {
  48. struct mlx4_en_priv *priv = netdev_priv(dev);
  49. int i;
  50. unsigned int offset = 0;
  51. if (up && up != MLX4_EN_NUM_UP)
  52. return -EINVAL;
  53. netdev_set_num_tc(dev, up);
  54. /* Partition Tx queues evenly amongst UP's */
  55. for (i = 0; i < up; i++) {
  56. netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
  57. offset += priv->num_tx_rings_p_up;
  58. }
  59. return 0;
  60. }
  61. #ifdef CONFIG_RFS_ACCEL
  62. struct mlx4_en_filter {
  63. struct list_head next;
  64. struct work_struct work;
  65. __be32 src_ip;
  66. __be32 dst_ip;
  67. __be16 src_port;
  68. __be16 dst_port;
  69. int rxq_index;
  70. struct mlx4_en_priv *priv;
  71. u32 flow_id; /* RFS infrastructure id */
  72. int id; /* mlx4_en driver id */
  73. u64 reg_id; /* Flow steering API id */
  74. u8 activated; /* Used to prevent expiry before filter
  75. * is attached
  76. */
  77. struct hlist_node filter_chain;
  78. };
  79. static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
  80. static void mlx4_en_filter_work(struct work_struct *work)
  81. {
  82. struct mlx4_en_filter *filter = container_of(work,
  83. struct mlx4_en_filter,
  84. work);
  85. struct mlx4_en_priv *priv = filter->priv;
  86. struct mlx4_spec_list spec_tcp = {
  87. .id = MLX4_NET_TRANS_RULE_ID_TCP,
  88. {
  89. .tcp_udp = {
  90. .dst_port = filter->dst_port,
  91. .dst_port_msk = (__force __be16)-1,
  92. .src_port = filter->src_port,
  93. .src_port_msk = (__force __be16)-1,
  94. },
  95. },
  96. };
  97. struct mlx4_spec_list spec_ip = {
  98. .id = MLX4_NET_TRANS_RULE_ID_IPV4,
  99. {
  100. .ipv4 = {
  101. .dst_ip = filter->dst_ip,
  102. .dst_ip_msk = (__force __be32)-1,
  103. .src_ip = filter->src_ip,
  104. .src_ip_msk = (__force __be32)-1,
  105. },
  106. },
  107. };
  108. struct mlx4_spec_list spec_eth = {
  109. .id = MLX4_NET_TRANS_RULE_ID_ETH,
  110. };
  111. struct mlx4_net_trans_rule rule = {
  112. .list = LIST_HEAD_INIT(rule.list),
  113. .queue_mode = MLX4_NET_TRANS_Q_LIFO,
  114. .exclusive = 1,
  115. .allow_loopback = 1,
  116. .promisc_mode = MLX4_FS_PROMISC_NONE,
  117. .port = priv->port,
  118. .priority = MLX4_DOMAIN_RFS,
  119. };
  120. int rc;
  121. __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
  122. list_add_tail(&spec_eth.list, &rule.list);
  123. list_add_tail(&spec_ip.list, &rule.list);
  124. list_add_tail(&spec_tcp.list, &rule.list);
  125. rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
  126. memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
  127. memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
  128. filter->activated = 0;
  129. if (filter->reg_id) {
  130. rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
  131. if (rc && rc != -ENOENT)
  132. en_err(priv, "Error detaching flow. rc = %d\n", rc);
  133. }
  134. rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
  135. if (rc)
  136. en_err(priv, "Error attaching flow. err = %d\n", rc);
  137. mlx4_en_filter_rfs_expire(priv);
  138. filter->activated = 1;
  139. }
  140. static inline struct hlist_head *
  141. filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
  142. __be16 src_port, __be16 dst_port)
  143. {
  144. unsigned long l;
  145. int bucket_idx;
  146. l = (__force unsigned long)src_port |
  147. ((__force unsigned long)dst_port << 2);
  148. l ^= (__force unsigned long)(src_ip ^ dst_ip);
  149. bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
  150. return &priv->filter_hash[bucket_idx];
  151. }
  152. static struct mlx4_en_filter *
  153. mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
  154. __be32 dst_ip, __be16 src_port, __be16 dst_port,
  155. u32 flow_id)
  156. {
  157. struct mlx4_en_filter *filter = NULL;
  158. filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
  159. if (!filter)
  160. return NULL;
  161. filter->priv = priv;
  162. filter->rxq_index = rxq_index;
  163. INIT_WORK(&filter->work, mlx4_en_filter_work);
  164. filter->src_ip = src_ip;
  165. filter->dst_ip = dst_ip;
  166. filter->src_port = src_port;
  167. filter->dst_port = dst_port;
  168. filter->flow_id = flow_id;
  169. filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
  170. list_add_tail(&filter->next, &priv->filters);
  171. hlist_add_head(&filter->filter_chain,
  172. filter_hash_bucket(priv, src_ip, dst_ip, src_port,
  173. dst_port));
  174. return filter;
  175. }
  176. static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
  177. {
  178. struct mlx4_en_priv *priv = filter->priv;
  179. int rc;
  180. list_del(&filter->next);
  181. rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
  182. if (rc && rc != -ENOENT)
  183. en_err(priv, "Error detaching flow. rc = %d\n", rc);
  184. kfree(filter);
  185. }
  186. static inline struct mlx4_en_filter *
  187. mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
  188. __be16 src_port, __be16 dst_port)
  189. {
  190. struct mlx4_en_filter *filter;
  191. struct mlx4_en_filter *ret = NULL;
  192. hlist_for_each_entry(filter,
  193. filter_hash_bucket(priv, src_ip, dst_ip,
  194. src_port, dst_port),
  195. filter_chain) {
  196. if (filter->src_ip == src_ip &&
  197. filter->dst_ip == dst_ip &&
  198. filter->src_port == src_port &&
  199. filter->dst_port == dst_port) {
  200. ret = filter;
  201. break;
  202. }
  203. }
  204. return ret;
  205. }
  206. static int
  207. mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
  208. u16 rxq_index, u32 flow_id)
  209. {
  210. struct mlx4_en_priv *priv = netdev_priv(net_dev);
  211. struct mlx4_en_filter *filter;
  212. const struct iphdr *ip;
  213. const __be16 *ports;
  214. __be32 src_ip;
  215. __be32 dst_ip;
  216. __be16 src_port;
  217. __be16 dst_port;
  218. int nhoff = skb_network_offset(skb);
  219. int ret = 0;
  220. if (skb->protocol != htons(ETH_P_IP))
  221. return -EPROTONOSUPPORT;
  222. ip = (const struct iphdr *)(skb->data + nhoff);
  223. if (ip_is_fragment(ip))
  224. return -EPROTONOSUPPORT;
  225. ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
  226. src_ip = ip->saddr;
  227. dst_ip = ip->daddr;
  228. src_port = ports[0];
  229. dst_port = ports[1];
  230. if (ip->protocol != IPPROTO_TCP)
  231. return -EPROTONOSUPPORT;
  232. spin_lock_bh(&priv->filters_lock);
  233. filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port);
  234. if (filter) {
  235. if (filter->rxq_index == rxq_index)
  236. goto out;
  237. filter->rxq_index = rxq_index;
  238. } else {
  239. filter = mlx4_en_filter_alloc(priv, rxq_index,
  240. src_ip, dst_ip,
  241. src_port, dst_port, flow_id);
  242. if (!filter) {
  243. ret = -ENOMEM;
  244. goto err;
  245. }
  246. }
  247. queue_work(priv->mdev->workqueue, &filter->work);
  248. out:
  249. ret = filter->id;
  250. err:
  251. spin_unlock_bh(&priv->filters_lock);
  252. return ret;
  253. }
  254. void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
  255. struct mlx4_en_rx_ring *rx_ring)
  256. {
  257. struct mlx4_en_filter *filter, *tmp;
  258. LIST_HEAD(del_list);
  259. spin_lock_bh(&priv->filters_lock);
  260. list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
  261. list_move(&filter->next, &del_list);
  262. hlist_del(&filter->filter_chain);
  263. }
  264. spin_unlock_bh(&priv->filters_lock);
  265. list_for_each_entry_safe(filter, tmp, &del_list, next) {
  266. cancel_work_sync(&filter->work);
  267. mlx4_en_filter_free(filter);
  268. }
  269. }
  270. static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
  271. {
  272. struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
  273. LIST_HEAD(del_list);
  274. int i = 0;
  275. spin_lock_bh(&priv->filters_lock);
  276. list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
  277. if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
  278. break;
  279. if (filter->activated &&
  280. !work_pending(&filter->work) &&
  281. rps_may_expire_flow(priv->dev,
  282. filter->rxq_index, filter->flow_id,
  283. filter->id)) {
  284. list_move(&filter->next, &del_list);
  285. hlist_del(&filter->filter_chain);
  286. } else
  287. last_filter = filter;
  288. i++;
  289. }
  290. if (last_filter && (&last_filter->next != priv->filters.next))
  291. list_move(&priv->filters, &last_filter->next);
  292. spin_unlock_bh(&priv->filters_lock);
  293. list_for_each_entry_safe(filter, tmp, &del_list, next)
  294. mlx4_en_filter_free(filter);
  295. }
  296. #endif
  297. static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
  298. {
  299. struct mlx4_en_priv *priv = netdev_priv(dev);
  300. struct mlx4_en_dev *mdev = priv->mdev;
  301. int err;
  302. int idx;
  303. en_dbg(HW, priv, "adding VLAN:%d\n", vid);
  304. set_bit(vid, priv->active_vlans);
  305. /* Add VID to port VLAN filter */
  306. mutex_lock(&mdev->state_lock);
  307. if (mdev->device_up && priv->port_up) {
  308. err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
  309. if (err)
  310. en_err(priv, "Failed configuring VLAN filter\n");
  311. }
  312. if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
  313. en_err(priv, "failed adding vlan %d\n", vid);
  314. mutex_unlock(&mdev->state_lock);
  315. return 0;
  316. }
  317. static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
  318. {
  319. struct mlx4_en_priv *priv = netdev_priv(dev);
  320. struct mlx4_en_dev *mdev = priv->mdev;
  321. int err;
  322. int idx;
  323. en_dbg(HW, priv, "Killing VID:%d\n", vid);
  324. clear_bit(vid, priv->active_vlans);
  325. /* Remove VID from port VLAN filter */
  326. mutex_lock(&mdev->state_lock);
  327. if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
  328. mlx4_unregister_vlan(mdev->dev, priv->port, idx);
  329. else
  330. en_err(priv, "could not find vid %d in cache\n", vid);
  331. if (mdev->device_up && priv->port_up) {
  332. err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
  333. if (err)
  334. en_err(priv, "Failed configuring VLAN filter\n");
  335. }
  336. mutex_unlock(&mdev->state_lock);
  337. return 0;
  338. }
  339. static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
  340. {
  341. unsigned int i;
  342. for (i = ETH_ALEN - 1; i; --i) {
  343. dst_mac[i] = src_mac & 0xff;
  344. src_mac >>= 8;
  345. }
  346. memset(&dst_mac[ETH_ALEN], 0, 2);
  347. }
  348. static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
  349. unsigned char *mac, int *qpn, u64 *reg_id)
  350. {
  351. struct mlx4_en_dev *mdev = priv->mdev;
  352. struct mlx4_dev *dev = mdev->dev;
  353. int err;
  354. switch (dev->caps.steering_mode) {
  355. case MLX4_STEERING_MODE_B0: {
  356. struct mlx4_qp qp;
  357. u8 gid[16] = {0};
  358. qp.qpn = *qpn;
  359. memcpy(&gid[10], mac, ETH_ALEN);
  360. gid[5] = priv->port;
  361. err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
  362. break;
  363. }
  364. case MLX4_STEERING_MODE_DEVICE_MANAGED: {
  365. struct mlx4_spec_list spec_eth = { {NULL} };
  366. __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
  367. struct mlx4_net_trans_rule rule = {
  368. .queue_mode = MLX4_NET_TRANS_Q_FIFO,
  369. .exclusive = 0,
  370. .allow_loopback = 1,
  371. .promisc_mode = MLX4_FS_PROMISC_NONE,
  372. .priority = MLX4_DOMAIN_NIC,
  373. };
  374. rule.port = priv->port;
  375. rule.qpn = *qpn;
  376. INIT_LIST_HEAD(&rule.list);
  377. spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
  378. memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
  379. memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
  380. list_add_tail(&spec_eth.list, &rule.list);
  381. err = mlx4_flow_attach(dev, &rule, reg_id);
  382. break;
  383. }
  384. default:
  385. return -EINVAL;
  386. }
  387. if (err)
  388. en_warn(priv, "Failed Attaching Unicast\n");
  389. return err;
  390. }
  391. static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
  392. unsigned char *mac, int qpn, u64 reg_id)
  393. {
  394. struct mlx4_en_dev *mdev = priv->mdev;
  395. struct mlx4_dev *dev = mdev->dev;
  396. switch (dev->caps.steering_mode) {
  397. case MLX4_STEERING_MODE_B0: {
  398. struct mlx4_qp qp;
  399. u8 gid[16] = {0};
  400. qp.qpn = qpn;
  401. memcpy(&gid[10], mac, ETH_ALEN);
  402. gid[5] = priv->port;
  403. mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
  404. break;
  405. }
  406. case MLX4_STEERING_MODE_DEVICE_MANAGED: {
  407. mlx4_flow_detach(dev, reg_id);
  408. break;
  409. }
  410. default:
  411. en_err(priv, "Invalid steering mode.\n");
  412. }
  413. }
  414. static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
  415. {
  416. struct mlx4_en_dev *mdev = priv->mdev;
  417. struct mlx4_dev *dev = mdev->dev;
  418. struct mlx4_mac_entry *entry;
  419. int index = 0;
  420. int err = 0;
  421. u64 reg_id;
  422. int *qpn = &priv->base_qpn;
  423. u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
  424. en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
  425. priv->dev->dev_addr);
  426. index = mlx4_register_mac(dev, priv->port, mac);
  427. if (index < 0) {
  428. err = index;
  429. en_err(priv, "Failed adding MAC: %pM\n",
  430. priv->dev->dev_addr);
  431. return err;
  432. }
  433. if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
  434. int base_qpn = mlx4_get_base_qpn(dev, priv->port);
  435. *qpn = base_qpn + index;
  436. return 0;
  437. }
  438. err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
  439. en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
  440. if (err) {
  441. en_err(priv, "Failed to reserve qp for mac registration\n");
  442. goto qp_err;
  443. }
  444. err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
  445. if (err)
  446. goto steer_err;
  447. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  448. if (!entry) {
  449. err = -ENOMEM;
  450. goto alloc_err;
  451. }
  452. memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
  453. entry->reg_id = reg_id;
  454. hlist_add_head_rcu(&entry->hlist,
  455. &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
  456. return 0;
  457. alloc_err:
  458. mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
  459. steer_err:
  460. mlx4_qp_release_range(dev, *qpn, 1);
  461. qp_err:
  462. mlx4_unregister_mac(dev, priv->port, mac);
  463. return err;
  464. }
  465. static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
  466. {
  467. struct mlx4_en_dev *mdev = priv->mdev;
  468. struct mlx4_dev *dev = mdev->dev;
  469. int qpn = priv->base_qpn;
  470. u64 mac;
  471. if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
  472. mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
  473. en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
  474. priv->dev->dev_addr);
  475. mlx4_unregister_mac(dev, priv->port, mac);
  476. } else {
  477. struct mlx4_mac_entry *entry;
  478. struct hlist_node *tmp;
  479. struct hlist_head *bucket;
  480. unsigned int i;
  481. for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
  482. bucket = &priv->mac_hash[i];
  483. hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
  484. mac = mlx4_en_mac_to_u64(entry->mac);
  485. en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
  486. entry->mac);
  487. mlx4_en_uc_steer_release(priv, entry->mac,
  488. qpn, entry->reg_id);
  489. mlx4_unregister_mac(dev, priv->port, mac);
  490. hlist_del_rcu(&entry->hlist);
  491. kfree_rcu(entry, rcu);
  492. }
  493. }
  494. en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n",
  495. priv->port, qpn);
  496. mlx4_qp_release_range(dev, qpn, 1);
  497. priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
  498. }
  499. }
  500. static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
  501. unsigned char *new_mac, unsigned char *prev_mac)
  502. {
  503. struct mlx4_en_dev *mdev = priv->mdev;
  504. struct mlx4_dev *dev = mdev->dev;
  505. int err = 0;
  506. u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac);
  507. if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
  508. struct hlist_head *bucket;
  509. unsigned int mac_hash;
  510. struct mlx4_mac_entry *entry;
  511. struct hlist_node *tmp;
  512. u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
  513. bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
  514. hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
  515. if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
  516. mlx4_en_uc_steer_release(priv, entry->mac,
  517. qpn, entry->reg_id);
  518. mlx4_unregister_mac(dev, priv->port,
  519. prev_mac_u64);
  520. hlist_del_rcu(&entry->hlist);
  521. synchronize_rcu();
  522. memcpy(entry->mac, new_mac, ETH_ALEN);
  523. entry->reg_id = 0;
  524. mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
  525. hlist_add_head_rcu(&entry->hlist,
  526. &priv->mac_hash[mac_hash]);
  527. mlx4_register_mac(dev, priv->port, new_mac_u64);
  528. err = mlx4_en_uc_steer_add(priv, new_mac,
  529. &qpn,
  530. &entry->reg_id);
  531. return err;
  532. }
  533. }
  534. return -EINVAL;
  535. }
  536. return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
  537. }
  538. u64 mlx4_en_mac_to_u64(u8 *addr)
  539. {
  540. u64 mac = 0;
  541. int i;
  542. for (i = 0; i < ETH_ALEN; i++) {
  543. mac <<= 8;
  544. mac |= addr[i];
  545. }
  546. return mac;
  547. }
  548. static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
  549. {
  550. int err = 0;
  551. if (priv->port_up) {
  552. /* Remove old MAC and insert the new one */
  553. err = mlx4_en_replace_mac(priv, priv->base_qpn,
  554. priv->dev->dev_addr, priv->prev_mac);
  555. if (err)
  556. en_err(priv, "Failed changing HW MAC address\n");
  557. memcpy(priv->prev_mac, priv->dev->dev_addr,
  558. sizeof(priv->prev_mac));
  559. } else
  560. en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
  561. return err;
  562. }
  563. static int mlx4_en_set_mac(struct net_device *dev, void *addr)
  564. {
  565. struct mlx4_en_priv *priv = netdev_priv(dev);
  566. struct mlx4_en_dev *mdev = priv->mdev;
  567. struct sockaddr *saddr = addr;
  568. int err;
  569. if (!is_valid_ether_addr(saddr->sa_data))
  570. return -EADDRNOTAVAIL;
  571. memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
  572. mutex_lock(&mdev->state_lock);
  573. err = mlx4_en_do_set_mac(priv);
  574. mutex_unlock(&mdev->state_lock);
  575. return err;
  576. }
  577. static void mlx4_en_clear_list(struct net_device *dev)
  578. {
  579. struct mlx4_en_priv *priv = netdev_priv(dev);
  580. struct mlx4_en_mc_list *tmp, *mc_to_del;
  581. list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
  582. list_del(&mc_to_del->list);
  583. kfree(mc_to_del);
  584. }
  585. }
  586. static void mlx4_en_cache_mclist(struct net_device *dev)
  587. {
  588. struct mlx4_en_priv *priv = netdev_priv(dev);
  589. struct netdev_hw_addr *ha;
  590. struct mlx4_en_mc_list *tmp;
  591. mlx4_en_clear_list(dev);
  592. netdev_for_each_mc_addr(ha, dev) {
  593. tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
  594. if (!tmp) {
  595. mlx4_en_clear_list(dev);
  596. return;
  597. }
  598. memcpy(tmp->addr, ha->addr, ETH_ALEN);
  599. list_add_tail(&tmp->list, &priv->mc_list);
  600. }
  601. }
  602. static void update_mclist_flags(struct mlx4_en_priv *priv,
  603. struct list_head *dst,
  604. struct list_head *src)
  605. {
  606. struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
  607. bool found;
  608. /* Find all the entries that should be removed from dst,
  609. * These are the entries that are not found in src
  610. */
  611. list_for_each_entry(dst_tmp, dst, list) {
  612. found = false;
  613. list_for_each_entry(src_tmp, src, list) {
  614. if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
  615. found = true;
  616. break;
  617. }
  618. }
  619. if (!found)
  620. dst_tmp->action = MCLIST_REM;
  621. }
  622. /* Add entries that exist in src but not in dst
  623. * mark them as need to add
  624. */
  625. list_for_each_entry(src_tmp, src, list) {
  626. found = false;
  627. list_for_each_entry(dst_tmp, dst, list) {
  628. if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
  629. dst_tmp->action = MCLIST_NONE;
  630. found = true;
  631. break;
  632. }
  633. }
  634. if (!found) {
  635. new_mc = kmemdup(src_tmp,
  636. sizeof(struct mlx4_en_mc_list),
  637. GFP_KERNEL);
  638. if (!new_mc)
  639. return;
  640. new_mc->action = MCLIST_ADD;
  641. list_add_tail(&new_mc->list, dst);
  642. }
  643. }
  644. }
  645. static void mlx4_en_set_rx_mode(struct net_device *dev)
  646. {
  647. struct mlx4_en_priv *priv = netdev_priv(dev);
  648. if (!priv->port_up)
  649. return;
  650. queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
  651. }
  652. static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
  653. struct mlx4_en_dev *mdev)
  654. {
  655. int err = 0;
  656. if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
  657. if (netif_msg_rx_status(priv))
  658. en_warn(priv, "Entering promiscuous mode\n");
  659. priv->flags |= MLX4_EN_FLAG_PROMISC;
  660. /* Enable promiscouos mode */
  661. switch (mdev->dev->caps.steering_mode) {
  662. case MLX4_STEERING_MODE_DEVICE_MANAGED:
  663. err = mlx4_flow_steer_promisc_add(mdev->dev,
  664. priv->port,
  665. priv->base_qpn,
  666. MLX4_FS_PROMISC_UPLINK);
  667. if (err)
  668. en_err(priv, "Failed enabling promiscuous mode\n");
  669. priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
  670. break;
  671. case MLX4_STEERING_MODE_B0:
  672. err = mlx4_unicast_promisc_add(mdev->dev,
  673. priv->base_qpn,
  674. priv->port);
  675. if (err)
  676. en_err(priv, "Failed enabling unicast promiscuous mode\n");
  677. /* Add the default qp number as multicast
  678. * promisc
  679. */
  680. if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
  681. err = mlx4_multicast_promisc_add(mdev->dev,
  682. priv->base_qpn,
  683. priv->port);
  684. if (err)
  685. en_err(priv, "Failed enabling multicast promiscuous mode\n");
  686. priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
  687. }
  688. break;
  689. case MLX4_STEERING_MODE_A0:
  690. err = mlx4_SET_PORT_qpn_calc(mdev->dev,
  691. priv->port,
  692. priv->base_qpn,
  693. 1);
  694. if (err)
  695. en_err(priv, "Failed enabling promiscuous mode\n");
  696. break;
  697. }
  698. /* Disable port multicast filter (unconditionally) */
  699. err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
  700. 0, MLX4_MCAST_DISABLE);
  701. if (err)
  702. en_err(priv, "Failed disabling multicast filter\n");
  703. /* Disable port VLAN filter */
  704. err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
  705. if (err)
  706. en_err(priv, "Failed disabling VLAN filter\n");
  707. }
  708. }
  709. static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
  710. struct mlx4_en_dev *mdev)
  711. {
  712. int err = 0;
  713. if (netif_msg_rx_status(priv))
  714. en_warn(priv, "Leaving promiscuous mode\n");
  715. priv->flags &= ~MLX4_EN_FLAG_PROMISC;
  716. /* Disable promiscouos mode */
  717. switch (mdev->dev->caps.steering_mode) {
  718. case MLX4_STEERING_MODE_DEVICE_MANAGED:
  719. err = mlx4_flow_steer_promisc_remove(mdev->dev,
  720. priv->port,
  721. MLX4_FS_PROMISC_UPLINK);
  722. if (err)
  723. en_err(priv, "Failed disabling promiscuous mode\n");
  724. priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
  725. break;
  726. case MLX4_STEERING_MODE_B0:
  727. err = mlx4_unicast_promisc_remove(mdev->dev,
  728. priv->base_qpn,
  729. priv->port);
  730. if (err)
  731. en_err(priv, "Failed disabling unicast promiscuous mode\n");
  732. /* Disable Multicast promisc */
  733. if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
  734. err = mlx4_multicast_promisc_remove(mdev->dev,
  735. priv->base_qpn,
  736. priv->port);
  737. if (err)
  738. en_err(priv, "Failed disabling multicast promiscuous mode\n");
  739. priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
  740. }
  741. break;
  742. case MLX4_STEERING_MODE_A0:
  743. err = mlx4_SET_PORT_qpn_calc(mdev->dev,
  744. priv->port,
  745. priv->base_qpn, 0);
  746. if (err)
  747. en_err(priv, "Failed disabling promiscuous mode\n");
  748. break;
  749. }
  750. /* Enable port VLAN filter */
  751. err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
  752. if (err)
  753. en_err(priv, "Failed enabling VLAN filter\n");
  754. }
  755. static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
  756. struct net_device *dev,
  757. struct mlx4_en_dev *mdev)
  758. {
  759. struct mlx4_en_mc_list *mclist, *tmp;
  760. u64 mcast_addr = 0;
  761. u8 mc_list[16] = {0};
  762. int err = 0;
  763. /* Enable/disable the multicast filter according to IFF_ALLMULTI */
  764. if (dev->flags & IFF_ALLMULTI) {
  765. err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
  766. 0, MLX4_MCAST_DISABLE);
  767. if (err)
  768. en_err(priv, "Failed disabling multicast filter\n");
  769. /* Add the default qp number as multicast promisc */
  770. if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
  771. switch (mdev->dev->caps.steering_mode) {
  772. case MLX4_STEERING_MODE_DEVICE_MANAGED:
  773. err = mlx4_flow_steer_promisc_add(mdev->dev,
  774. priv->port,
  775. priv->base_qpn,
  776. MLX4_FS_PROMISC_ALL_MULTI);
  777. break;
  778. case MLX4_STEERING_MODE_B0:
  779. err = mlx4_multicast_promisc_add(mdev->dev,
  780. priv->base_qpn,
  781. priv->port);
  782. break;
  783. case MLX4_STEERING_MODE_A0:
  784. break;
  785. }
  786. if (err)
  787. en_err(priv, "Failed entering multicast promisc mode\n");
  788. priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
  789. }
  790. } else {
  791. /* Disable Multicast promisc */
  792. if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
  793. switch (mdev->dev->caps.steering_mode) {
  794. case MLX4_STEERING_MODE_DEVICE_MANAGED:
  795. err = mlx4_flow_steer_promisc_remove(mdev->dev,
  796. priv->port,
  797. MLX4_FS_PROMISC_ALL_MULTI);
  798. break;
  799. case MLX4_STEERING_MODE_B0:
  800. err = mlx4_multicast_promisc_remove(mdev->dev,
  801. priv->base_qpn,
  802. priv->port);
  803. break;
  804. case MLX4_STEERING_MODE_A0:
  805. break;
  806. }
  807. if (err)
  808. en_err(priv, "Failed disabling multicast promiscuous mode\n");
  809. priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
  810. }
  811. err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
  812. 0, MLX4_MCAST_DISABLE);
  813. if (err)
  814. en_err(priv, "Failed disabling multicast filter\n");
  815. /* Flush mcast filter and init it with broadcast address */
  816. mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
  817. 1, MLX4_MCAST_CONFIG);
  818. /* Update multicast list - we cache all addresses so they won't
  819. * change while HW is updated holding the command semaphor */
  820. netif_addr_lock_bh(dev);
  821. mlx4_en_cache_mclist(dev);
  822. netif_addr_unlock_bh(dev);
  823. list_for_each_entry(mclist, &priv->mc_list, list) {
  824. mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
  825. mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
  826. mcast_addr, 0, MLX4_MCAST_CONFIG);
  827. }
  828. err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
  829. 0, MLX4_MCAST_ENABLE);
  830. if (err)
  831. en_err(priv, "Failed enabling multicast filter\n");
  832. update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
  833. list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
  834. if (mclist->action == MCLIST_REM) {
  835. /* detach this address and delete from list */
  836. memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
  837. mc_list[5] = priv->port;
  838. err = mlx4_multicast_detach(mdev->dev,
  839. &priv->rss_map.indir_qp,
  840. mc_list,
  841. MLX4_PROT_ETH,
  842. mclist->reg_id);
  843. if (err)
  844. en_err(priv, "Fail to detach multicast address\n");
  845. /* remove from list */
  846. list_del(&mclist->list);
  847. kfree(mclist);
  848. } else if (mclist->action == MCLIST_ADD) {
  849. /* attach the address */
  850. memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
  851. /* needed for B0 steering support */
  852. mc_list[5] = priv->port;
  853. err = mlx4_multicast_attach(mdev->dev,
  854. &priv->rss_map.indir_qp,
  855. mc_list,
  856. priv->port, 0,
  857. MLX4_PROT_ETH,
  858. &mclist->reg_id);
  859. if (err)
  860. en_err(priv, "Fail to attach multicast address\n");
  861. }
  862. }
  863. }
  864. }
  865. static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
  866. struct net_device *dev,
  867. struct mlx4_en_dev *mdev)
  868. {
  869. struct netdev_hw_addr *ha;
  870. struct mlx4_mac_entry *entry;
  871. struct hlist_node *tmp;
  872. bool found;
  873. u64 mac;
  874. int err = 0;
  875. struct hlist_head *bucket;
  876. unsigned int i;
  877. int removed = 0;
  878. u32 prev_flags;
  879. /* Note that we do not need to protect our mac_hash traversal with rcu,
  880. * since all modification code is protected by mdev->state_lock
  881. */
  882. /* find what to remove */
  883. for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
  884. bucket = &priv->mac_hash[i];
  885. hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
  886. found = false;
  887. netdev_for_each_uc_addr(ha, dev) {
  888. if (ether_addr_equal_64bits(entry->mac,
  889. ha->addr)) {
  890. found = true;
  891. break;
  892. }
  893. }
  894. /* MAC address of the port is not in uc list */
  895. if (ether_addr_equal_64bits(entry->mac, dev->dev_addr))
  896. found = true;
  897. if (!found) {
  898. mac = mlx4_en_mac_to_u64(entry->mac);
  899. mlx4_en_uc_steer_release(priv, entry->mac,
  900. priv->base_qpn,
  901. entry->reg_id);
  902. mlx4_unregister_mac(mdev->dev, priv->port, mac);
  903. hlist_del_rcu(&entry->hlist);
  904. kfree_rcu(entry, rcu);
  905. en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
  906. entry->mac, priv->port);
  907. ++removed;
  908. }
  909. }
  910. }
  911. /* if we didn't remove anything, there is no use in trying to add
  912. * again once we are in a forced promisc mode state
  913. */
  914. if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
  915. return;
  916. prev_flags = priv->flags;
  917. priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
  918. /* find what to add */
  919. netdev_for_each_uc_addr(ha, dev) {
  920. found = false;
  921. bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
  922. hlist_for_each_entry(entry, bucket, hlist) {
  923. if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
  924. found = true;
  925. break;
  926. }
  927. }
  928. if (!found) {
  929. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  930. if (!entry) {
  931. en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
  932. ha->addr, priv->port);
  933. priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
  934. break;
  935. }
  936. mac = mlx4_en_mac_to_u64(ha->addr);
  937. memcpy(entry->mac, ha->addr, ETH_ALEN);
  938. err = mlx4_register_mac(mdev->dev, priv->port, mac);
  939. if (err < 0) {
  940. en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
  941. ha->addr, priv->port, err);
  942. kfree(entry);
  943. priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
  944. break;
  945. }
  946. err = mlx4_en_uc_steer_add(priv, ha->addr,
  947. &priv->base_qpn,
  948. &entry->reg_id);
  949. if (err) {
  950. en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
  951. ha->addr, priv->port, err);
  952. mlx4_unregister_mac(mdev->dev, priv->port, mac);
  953. kfree(entry);
  954. priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
  955. break;
  956. } else {
  957. unsigned int mac_hash;
  958. en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
  959. ha->addr, priv->port);
  960. mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
  961. bucket = &priv->mac_hash[mac_hash];
  962. hlist_add_head_rcu(&entry->hlist, bucket);
  963. }
  964. }
  965. }
  966. if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
  967. en_warn(priv, "Forcing promiscuous mode on port:%d\n",
  968. priv->port);
  969. } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
  970. en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
  971. priv->port);
  972. }
  973. }
  974. static void mlx4_en_do_set_rx_mode(struct work_struct *work)
  975. {
  976. struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
  977. rx_mode_task);
  978. struct mlx4_en_dev *mdev = priv->mdev;
  979. struct net_device *dev = priv->dev;
  980. mutex_lock(&mdev->state_lock);
  981. if (!mdev->device_up) {
  982. en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
  983. goto out;
  984. }
  985. if (!priv->port_up) {
  986. en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
  987. goto out;
  988. }
  989. if (!netif_carrier_ok(dev)) {
  990. if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
  991. if (priv->port_state.link_state) {
  992. priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
  993. netif_carrier_on(dev);
  994. en_dbg(LINK, priv, "Link Up\n");
  995. }
  996. }
  997. }
  998. if (dev->priv_flags & IFF_UNICAST_FLT)
  999. mlx4_en_do_uc_filter(priv, dev, mdev);
  1000. /* Promsicuous mode: disable all filters */
  1001. if ((dev->flags & IFF_PROMISC) ||
  1002. (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
  1003. mlx4_en_set_promisc_mode(priv, mdev);
  1004. goto out;
  1005. }
  1006. /* Not in promiscuous mode */
  1007. if (priv->flags & MLX4_EN_FLAG_PROMISC)
  1008. mlx4_en_clear_promisc_mode(priv, mdev);
  1009. mlx4_en_do_multicast(priv, dev, mdev);
  1010. out:
  1011. mutex_unlock(&mdev->state_lock);
  1012. }
  1013. #ifdef CONFIG_NET_POLL_CONTROLLER
  1014. static void mlx4_en_netpoll(struct net_device *dev)
  1015. {
  1016. struct mlx4_en_priv *priv = netdev_priv(dev);
  1017. struct mlx4_en_cq *cq;
  1018. unsigned long flags;
  1019. int i;
  1020. for (i = 0; i < priv->rx_ring_num; i++) {
  1021. cq = &priv->rx_cq[i];
  1022. spin_lock_irqsave(&cq->lock, flags);
  1023. napi_synchronize(&cq->napi);
  1024. mlx4_en_process_rx_cq(dev, cq, 0);
  1025. spin_unlock_irqrestore(&cq->lock, flags);
  1026. }
  1027. }
  1028. #endif
  1029. static void mlx4_en_tx_timeout(struct net_device *dev)
  1030. {
  1031. struct mlx4_en_priv *priv = netdev_priv(dev);
  1032. struct mlx4_en_dev *mdev = priv->mdev;
  1033. if (netif_msg_timer(priv))
  1034. en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
  1035. priv->port_stats.tx_timeout++;
  1036. en_dbg(DRV, priv, "Scheduling watchdog\n");
  1037. queue_work(mdev->workqueue, &priv->watchdog_task);
  1038. }
  1039. static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
  1040. {
  1041. struct mlx4_en_priv *priv = netdev_priv(dev);
  1042. spin_lock_bh(&priv->stats_lock);
  1043. memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
  1044. spin_unlock_bh(&priv->stats_lock);
  1045. return &priv->ret_stats;
  1046. }
  1047. static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
  1048. {
  1049. struct mlx4_en_cq *cq;
  1050. int i;
  1051. /* If we haven't received a specific coalescing setting
  1052. * (module param), we set the moderation parameters as follows:
  1053. * - moder_cnt is set to the number of mtu sized packets to
  1054. * satisfy our coalescing target.
  1055. * - moder_time is set to a fixed value.
  1056. */
  1057. priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
  1058. priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
  1059. priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
  1060. priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
  1061. en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
  1062. priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
  1063. /* Setup cq moderation params */
  1064. for (i = 0; i < priv->rx_ring_num; i++) {
  1065. cq = &priv->rx_cq[i];
  1066. cq->moder_cnt = priv->rx_frames;
  1067. cq->moder_time = priv->rx_usecs;
  1068. priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
  1069. priv->last_moder_packets[i] = 0;
  1070. priv->last_moder_bytes[i] = 0;
  1071. }
  1072. for (i = 0; i < priv->tx_ring_num; i++) {
  1073. cq = &priv->tx_cq[i];
  1074. cq->moder_cnt = priv->tx_frames;
  1075. cq->moder_time = priv->tx_usecs;
  1076. }
  1077. /* Reset auto-moderation params */
  1078. priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
  1079. priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
  1080. priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
  1081. priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
  1082. priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
  1083. priv->adaptive_rx_coal = 1;
  1084. priv->last_moder_jiffies = 0;
  1085. priv->last_moder_tx_packets = 0;
  1086. }
  1087. static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
  1088. {
  1089. unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
  1090. struct mlx4_en_cq *cq;
  1091. unsigned long packets;
  1092. unsigned long rate;
  1093. unsigned long avg_pkt_size;
  1094. unsigned long rx_packets;
  1095. unsigned long rx_bytes;
  1096. unsigned long rx_pkt_diff;
  1097. int moder_time;
  1098. int ring, err;
  1099. if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
  1100. return;
  1101. for (ring = 0; ring < priv->rx_ring_num; ring++) {
  1102. spin_lock_bh(&priv->stats_lock);
  1103. rx_packets = priv->rx_ring[ring].packets;
  1104. rx_bytes = priv->rx_ring[ring].bytes;
  1105. spin_unlock_bh(&priv->stats_lock);
  1106. rx_pkt_diff = ((unsigned long) (rx_packets -
  1107. priv->last_moder_packets[ring]));
  1108. packets = rx_pkt_diff;
  1109. rate = packets * HZ / period;
  1110. avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
  1111. priv->last_moder_bytes[ring])) / packets : 0;
  1112. /* Apply auto-moderation only when packet rate
  1113. * exceeds a rate that it matters */
  1114. if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
  1115. avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
  1116. if (rate < priv->pkt_rate_low)
  1117. moder_time = priv->rx_usecs_low;
  1118. else if (rate > priv->pkt_rate_high)
  1119. moder_time = priv->rx_usecs_high;
  1120. else
  1121. moder_time = (rate - priv->pkt_rate_low) *
  1122. (priv->rx_usecs_high - priv->rx_usecs_low) /
  1123. (priv->pkt_rate_high - priv->pkt_rate_low) +
  1124. priv->rx_usecs_low;
  1125. } else {
  1126. moder_time = priv->rx_usecs_low;
  1127. }
  1128. if (moder_time != priv->last_moder_time[ring]) {
  1129. priv->last_moder_time[ring] = moder_time;
  1130. cq = &priv->rx_cq[ring];
  1131. cq->moder_time = moder_time;
  1132. err = mlx4_en_set_cq_moder(priv, cq);
  1133. if (err)
  1134. en_err(priv, "Failed modifying moderation for cq:%d\n",
  1135. ring);
  1136. }
  1137. priv->last_moder_packets[ring] = rx_packets;
  1138. priv->last_moder_bytes[ring] = rx_bytes;
  1139. }
  1140. priv->last_moder_jiffies = jiffies;
  1141. }
  1142. static void mlx4_en_do_get_stats(struct work_struct *work)
  1143. {
  1144. struct delayed_work *delay = to_delayed_work(work);
  1145. struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
  1146. stats_task);
  1147. struct mlx4_en_dev *mdev = priv->mdev;
  1148. int err;
  1149. mutex_lock(&mdev->state_lock);
  1150. if (mdev->device_up) {
  1151. err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
  1152. if (err)
  1153. en_dbg(HW, priv, "Could not update stats\n");
  1154. if (priv->port_up)
  1155. mlx4_en_auto_moderation(priv);
  1156. queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
  1157. }
  1158. if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
  1159. mlx4_en_do_set_mac(priv);
  1160. mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
  1161. }
  1162. mutex_unlock(&mdev->state_lock);
  1163. }
  1164. static void mlx4_en_linkstate(struct work_struct *work)
  1165. {
  1166. struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
  1167. linkstate_task);
  1168. struct mlx4_en_dev *mdev = priv->mdev;
  1169. int linkstate = priv->link_state;
  1170. mutex_lock(&mdev->state_lock);
  1171. /* If observable port state changed set carrier state and
  1172. * report to system log */
  1173. if (priv->last_link_state != linkstate) {
  1174. if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
  1175. en_info(priv, "Link Down\n");
  1176. netif_carrier_off(priv->dev);
  1177. } else {
  1178. en_info(priv, "Link Up\n");
  1179. netif_carrier_on(priv->dev);
  1180. }
  1181. }
  1182. priv->last_link_state = linkstate;
  1183. mutex_unlock(&mdev->state_lock);
  1184. }
  1185. int mlx4_en_start_port(struct net_device *dev)
  1186. {
  1187. struct mlx4_en_priv *priv = netdev_priv(dev);
  1188. struct mlx4_en_dev *mdev = priv->mdev;
  1189. struct mlx4_en_cq *cq;
  1190. struct mlx4_en_tx_ring *tx_ring;
  1191. int rx_index = 0;
  1192. int tx_index = 0;
  1193. int err = 0;
  1194. int i;
  1195. int j;
  1196. u8 mc_list[16] = {0};
  1197. if (priv->port_up) {
  1198. en_dbg(DRV, priv, "start port called while port already up\n");
  1199. return 0;
  1200. }
  1201. INIT_LIST_HEAD(&priv->mc_list);
  1202. INIT_LIST_HEAD(&priv->curr_list);
  1203. INIT_LIST_HEAD(&priv->ethtool_list);
  1204. memset(&priv->ethtool_rules[0], 0,
  1205. sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
  1206. /* Calculate Rx buf size */
  1207. dev->mtu = min(dev->mtu, priv->max_mtu);
  1208. mlx4_en_calc_rx_buf(dev);
  1209. en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
  1210. /* Configure rx cq's and rings */
  1211. err = mlx4_en_activate_rx_rings(priv);
  1212. if (err) {
  1213. en_err(priv, "Failed to activate RX rings\n");
  1214. return err;
  1215. }
  1216. for (i = 0; i < priv->rx_ring_num; i++) {
  1217. cq = &priv->rx_cq[i];
  1218. err = mlx4_en_activate_cq(priv, cq, i);
  1219. if (err) {
  1220. en_err(priv, "Failed activating Rx CQ\n");
  1221. goto cq_err;
  1222. }
  1223. for (j = 0; j < cq->size; j++)
  1224. cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
  1225. err = mlx4_en_set_cq_moder(priv, cq);
  1226. if (err) {
  1227. en_err(priv, "Failed setting cq moderation parameters");
  1228. mlx4_en_deactivate_cq(priv, cq);
  1229. goto cq_err;
  1230. }
  1231. mlx4_en_arm_cq(priv, cq);
  1232. priv->rx_ring[i].cqn = cq->mcq.cqn;
  1233. ++rx_index;
  1234. }
  1235. /* Set qp number */
  1236. en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
  1237. err = mlx4_en_get_qp(priv);
  1238. if (err) {
  1239. en_err(priv, "Failed getting eth qp\n");
  1240. goto cq_err;
  1241. }
  1242. mdev->mac_removed[priv->port] = 0;
  1243. err = mlx4_en_config_rss_steer(priv);
  1244. if (err) {
  1245. en_err(priv, "Failed configuring rss steering\n");
  1246. goto mac_err;
  1247. }
  1248. err = mlx4_en_create_drop_qp(priv);
  1249. if (err)
  1250. goto rss_err;
  1251. /* Configure tx cq's and rings */
  1252. for (i = 0; i < priv->tx_ring_num; i++) {
  1253. /* Configure cq */
  1254. cq = &priv->tx_cq[i];
  1255. err = mlx4_en_activate_cq(priv, cq, i);
  1256. if (err) {
  1257. en_err(priv, "Failed allocating Tx CQ\n");
  1258. goto tx_err;
  1259. }
  1260. err = mlx4_en_set_cq_moder(priv, cq);
  1261. if (err) {
  1262. en_err(priv, "Failed setting cq moderation parameters");
  1263. mlx4_en_deactivate_cq(priv, cq);
  1264. goto tx_err;
  1265. }
  1266. en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
  1267. cq->buf->wqe_index = cpu_to_be16(0xffff);
  1268. /* Configure ring */
  1269. tx_ring = &priv->tx_ring[i];
  1270. err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
  1271. i / priv->num_tx_rings_p_up);
  1272. if (err) {
  1273. en_err(priv, "Failed allocating Tx ring\n");
  1274. mlx4_en_deactivate_cq(priv, cq);
  1275. goto tx_err;
  1276. }
  1277. tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
  1278. /* Arm CQ for TX completions */
  1279. mlx4_en_arm_cq(priv, cq);
  1280. /* Set initial ownership of all Tx TXBBs to SW (1) */
  1281. for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
  1282. *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
  1283. ++tx_index;
  1284. }
  1285. /* Configure port */
  1286. err = mlx4_SET_PORT_general(mdev->dev, priv->port,
  1287. priv->rx_skb_size + ETH_FCS_LEN,
  1288. priv->prof->tx_pause,
  1289. priv->prof->tx_ppp,
  1290. priv->prof->rx_pause,
  1291. priv->prof->rx_ppp);
  1292. if (err) {
  1293. en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
  1294. priv->port, err);
  1295. goto tx_err;
  1296. }
  1297. /* Set default qp number */
  1298. err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
  1299. if (err) {
  1300. en_err(priv, "Failed setting default qp numbers\n");
  1301. goto tx_err;
  1302. }
  1303. /* Init port */
  1304. en_dbg(HW, priv, "Initializing port\n");
  1305. err = mlx4_INIT_PORT(mdev->dev, priv->port);
  1306. if (err) {
  1307. en_err(priv, "Failed Initializing port\n");
  1308. goto tx_err;
  1309. }
  1310. /* Attach rx QP to bradcast address */
  1311. memset(&mc_list[10], 0xff, ETH_ALEN);
  1312. mc_list[5] = priv->port; /* needed for B0 steering support */
  1313. if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
  1314. priv->port, 0, MLX4_PROT_ETH,
  1315. &priv->broadcast_id))
  1316. mlx4_warn(mdev, "Failed Attaching Broadcast\n");
  1317. /* Must redo promiscuous mode setup. */
  1318. priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
  1319. /* Schedule multicast task to populate multicast list */
  1320. queue_work(mdev->workqueue, &priv->rx_mode_task);
  1321. mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
  1322. priv->port_up = true;
  1323. netif_tx_start_all_queues(dev);
  1324. netif_device_attach(dev);
  1325. return 0;
  1326. tx_err:
  1327. while (tx_index--) {
  1328. mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
  1329. mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
  1330. }
  1331. mlx4_en_destroy_drop_qp(priv);
  1332. rss_err:
  1333. mlx4_en_release_rss_steer(priv);
  1334. mac_err:
  1335. mlx4_en_put_qp(priv);
  1336. cq_err:
  1337. while (rx_index--)
  1338. mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
  1339. for (i = 0; i < priv->rx_ring_num; i++)
  1340. mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
  1341. return err; /* need to close devices */
  1342. }
  1343. void mlx4_en_stop_port(struct net_device *dev, int detach)
  1344. {
  1345. struct mlx4_en_priv *priv = netdev_priv(dev);
  1346. struct mlx4_en_dev *mdev = priv->mdev;
  1347. struct mlx4_en_mc_list *mclist, *tmp;
  1348. struct ethtool_flow_id *flow, *tmp_flow;
  1349. int i;
  1350. u8 mc_list[16] = {0};
  1351. if (!priv->port_up) {
  1352. en_dbg(DRV, priv, "stop port called while port already down\n");
  1353. return;
  1354. }
  1355. /* Synchronize with tx routine */
  1356. netif_tx_lock_bh(dev);
  1357. if (detach)
  1358. netif_device_detach(dev);
  1359. netif_tx_stop_all_queues(dev);
  1360. netif_tx_unlock_bh(dev);
  1361. netif_tx_disable(dev);
  1362. /* Set port as not active */
  1363. priv->port_up = false;
  1364. /* Promsicuous mode */
  1365. if (mdev->dev->caps.steering_mode ==
  1366. MLX4_STEERING_MODE_DEVICE_MANAGED) {
  1367. priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
  1368. MLX4_EN_FLAG_MC_PROMISC);
  1369. mlx4_flow_steer_promisc_remove(mdev->dev,
  1370. priv->port,
  1371. MLX4_FS_PROMISC_UPLINK);
  1372. mlx4_flow_steer_promisc_remove(mdev->dev,
  1373. priv->port,
  1374. MLX4_FS_PROMISC_ALL_MULTI);
  1375. } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
  1376. priv->flags &= ~MLX4_EN_FLAG_PROMISC;
  1377. /* Disable promiscouos mode */
  1378. mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
  1379. priv->port);
  1380. /* Disable Multicast promisc */
  1381. if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
  1382. mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
  1383. priv->port);
  1384. priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
  1385. }
  1386. }
  1387. /* Detach All multicasts */
  1388. memset(&mc_list[10], 0xff, ETH_ALEN);
  1389. mc_list[5] = priv->port; /* needed for B0 steering support */
  1390. mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
  1391. MLX4_PROT_ETH, priv->broadcast_id);
  1392. list_for_each_entry(mclist, &priv->curr_list, list) {
  1393. memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
  1394. mc_list[5] = priv->port;
  1395. mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
  1396. mc_list, MLX4_PROT_ETH, mclist->reg_id);
  1397. }
  1398. mlx4_en_clear_list(dev);
  1399. list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
  1400. list_del(&mclist->list);
  1401. kfree(mclist);
  1402. }
  1403. /* Flush multicast filter */
  1404. mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
  1405. /* Remove flow steering rules for the port*/
  1406. if (mdev->dev->caps.steering_mode ==
  1407. MLX4_STEERING_MODE_DEVICE_MANAGED) {
  1408. ASSERT_RTNL();
  1409. list_for_each_entry_safe(flow, tmp_flow,
  1410. &priv->ethtool_list, list) {
  1411. mlx4_flow_detach(mdev->dev, flow->id);
  1412. list_del(&flow->list);
  1413. }
  1414. }
  1415. mlx4_en_destroy_drop_qp(priv);
  1416. /* Free TX Rings */
  1417. for (i = 0; i < priv->tx_ring_num; i++) {
  1418. mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
  1419. mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
  1420. }
  1421. msleep(10);
  1422. for (i = 0; i < priv->tx_ring_num; i++)
  1423. mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
  1424. /* Free RSS qps */
  1425. mlx4_en_release_rss_steer(priv);
  1426. /* Unregister Mac address for the port */
  1427. mlx4_en_put_qp(priv);
  1428. if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
  1429. mdev->mac_removed[priv->port] = 1;
  1430. /* Free RX Rings */
  1431. for (i = 0; i < priv->rx_ring_num; i++) {
  1432. mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
  1433. while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
  1434. msleep(1);
  1435. mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  1436. }
  1437. /* close port*/
  1438. mlx4_CLOSE_PORT(mdev->dev, priv->port);
  1439. }
  1440. static void mlx4_en_restart(struct work_struct *work)
  1441. {
  1442. struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
  1443. watchdog_task);
  1444. struct mlx4_en_dev *mdev = priv->mdev;
  1445. struct net_device *dev = priv->dev;
  1446. en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
  1447. mutex_lock(&mdev->state_lock);
  1448. if (priv->port_up) {
  1449. mlx4_en_stop_port(dev, 1);
  1450. if (mlx4_en_start_port(dev))
  1451. en_err(priv, "Failed restarting port %d\n", priv->port);
  1452. }
  1453. mutex_unlock(&mdev->state_lock);
  1454. }
  1455. static void mlx4_en_clear_stats(struct net_device *dev)
  1456. {
  1457. struct mlx4_en_priv *priv = netdev_priv(dev);
  1458. struct mlx4_en_dev *mdev = priv->mdev;
  1459. int i;
  1460. if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
  1461. en_dbg(HW, priv, "Failed dumping statistics\n");
  1462. memset(&priv->stats, 0, sizeof(priv->stats));
  1463. memset(&priv->pstats, 0, sizeof(priv->pstats));
  1464. memset(&priv->pkstats, 0, sizeof(priv->pkstats));
  1465. memset(&priv->port_stats, 0, sizeof(priv->port_stats));
  1466. for (i = 0; i < priv->tx_ring_num; i++) {
  1467. priv->tx_ring[i].bytes = 0;
  1468. priv->tx_ring[i].packets = 0;
  1469. priv->tx_ring[i].tx_csum = 0;
  1470. }
  1471. for (i = 0; i < priv->rx_ring_num; i++) {
  1472. priv->rx_ring[i].bytes = 0;
  1473. priv->rx_ring[i].packets = 0;
  1474. priv->rx_ring[i].csum_ok = 0;
  1475. priv->rx_ring[i].csum_none = 0;
  1476. }
  1477. }
  1478. static int mlx4_en_open(struct net_device *dev)
  1479. {
  1480. struct mlx4_en_priv *priv = netdev_priv(dev);
  1481. struct mlx4_en_dev *mdev = priv->mdev;
  1482. int err = 0;
  1483. mutex_lock(&mdev->state_lock);
  1484. if (!mdev->device_up) {
  1485. en_err(priv, "Cannot open - device down/disabled\n");
  1486. err = -EBUSY;
  1487. goto out;
  1488. }
  1489. /* Reset HW statistics and SW counters */
  1490. mlx4_en_clear_stats(dev);
  1491. err = mlx4_en_start_port(dev);
  1492. if (err)
  1493. en_err(priv, "Failed starting port:%d\n", priv->port);
  1494. out:
  1495. mutex_unlock(&mdev->state_lock);
  1496. return err;
  1497. }
  1498. static int mlx4_en_close(struct net_device *dev)
  1499. {
  1500. struct mlx4_en_priv *priv = netdev_priv(dev);
  1501. struct mlx4_en_dev *mdev = priv->mdev;
  1502. en_dbg(IFDOWN, priv, "Close port called\n");
  1503. mutex_lock(&mdev->state_lock);
  1504. mlx4_en_stop_port(dev, 0);
  1505. netif_carrier_off(dev);
  1506. mutex_unlock(&mdev->state_lock);
  1507. return 0;
  1508. }
  1509. void mlx4_en_free_resources(struct mlx4_en_priv *priv)
  1510. {
  1511. int i;
  1512. #ifdef CONFIG_RFS_ACCEL
  1513. free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
  1514. priv->dev->rx_cpu_rmap = NULL;
  1515. #endif
  1516. for (i = 0; i < priv->tx_ring_num; i++) {
  1517. if (priv->tx_ring[i].tx_info)
  1518. mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
  1519. if (priv->tx_cq[i].buf)
  1520. mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
  1521. }
  1522. for (i = 0; i < priv->rx_ring_num; i++) {
  1523. if (priv->rx_ring[i].rx_info)
  1524. mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
  1525. priv->prof->rx_ring_size, priv->stride);
  1526. if (priv->rx_cq[i].buf)
  1527. mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
  1528. }
  1529. if (priv->base_tx_qpn) {
  1530. mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
  1531. priv->base_tx_qpn = 0;
  1532. }
  1533. }
  1534. int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
  1535. {
  1536. struct mlx4_en_port_profile *prof = priv->prof;
  1537. int i;
  1538. int err;
  1539. err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
  1540. if (err) {
  1541. en_err(priv, "failed reserving range for TX rings\n");
  1542. return err;
  1543. }
  1544. /* Create tx Rings */
  1545. for (i = 0; i < priv->tx_ring_num; i++) {
  1546. if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
  1547. prof->tx_ring_size, i, TX))
  1548. goto err;
  1549. if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
  1550. prof->tx_ring_size, TXBB_SIZE))
  1551. goto err;
  1552. }
  1553. /* Create rx Rings */
  1554. for (i = 0; i < priv->rx_ring_num; i++) {
  1555. if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
  1556. prof->rx_ring_size, i, RX))
  1557. goto err;
  1558. if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
  1559. prof->rx_ring_size, priv->stride))
  1560. goto err;
  1561. }
  1562. #ifdef CONFIG_RFS_ACCEL
  1563. if (priv->mdev->dev->caps.comp_pool) {
  1564. priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
  1565. if (!priv->dev->rx_cpu_rmap)
  1566. goto err;
  1567. }
  1568. #endif
  1569. return 0;
  1570. err:
  1571. en_err(priv, "Failed to allocate NIC resources\n");
  1572. return -ENOMEM;
  1573. }
  1574. void mlx4_en_destroy_netdev(struct net_device *dev)
  1575. {
  1576. struct mlx4_en_priv *priv = netdev_priv(dev);
  1577. struct mlx4_en_dev *mdev = priv->mdev;
  1578. en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
  1579. /* Unregister device - this will close the port if it was up */
  1580. if (priv->registered)
  1581. unregister_netdev(dev);
  1582. if (priv->allocated)
  1583. mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
  1584. cancel_delayed_work(&priv->stats_task);
  1585. /* flush any pending task for this netdev */
  1586. flush_workqueue(mdev->workqueue);
  1587. /* Detach the netdev so tasks would not attempt to access it */
  1588. mutex_lock(&mdev->state_lock);
  1589. mdev->pndev[priv->port] = NULL;
  1590. mutex_unlock(&mdev->state_lock);
  1591. mlx4_en_free_resources(priv);
  1592. kfree(priv->tx_ring);
  1593. kfree(priv->tx_cq);
  1594. free_netdev(dev);
  1595. }
  1596. static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
  1597. {
  1598. struct mlx4_en_priv *priv = netdev_priv(dev);
  1599. struct mlx4_en_dev *mdev = priv->mdev;
  1600. int err = 0;
  1601. en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
  1602. dev->mtu, new_mtu);
  1603. if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
  1604. en_err(priv, "Bad MTU size:%d.\n", new_mtu);
  1605. return -EPERM;
  1606. }
  1607. dev->mtu = new_mtu;
  1608. if (netif_running(dev)) {
  1609. mutex_lock(&mdev->state_lock);
  1610. if (!mdev->device_up) {
  1611. /* NIC is probably restarting - let watchdog task reset
  1612. * the port */
  1613. en_dbg(DRV, priv, "Change MTU called with card down!?\n");
  1614. } else {
  1615. mlx4_en_stop_port(dev, 1);
  1616. err = mlx4_en_start_port(dev);
  1617. if (err) {
  1618. en_err(priv, "Failed restarting port:%d\n",
  1619. priv->port);
  1620. queue_work(mdev->workqueue, &priv->watchdog_task);
  1621. }
  1622. }
  1623. mutex_unlock(&mdev->state_lock);
  1624. }
  1625. return 0;
  1626. }
  1627. static int mlx4_en_set_features(struct net_device *netdev,
  1628. netdev_features_t features)
  1629. {
  1630. struct mlx4_en_priv *priv = netdev_priv(netdev);
  1631. if (features & NETIF_F_LOOPBACK)
  1632. priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
  1633. else
  1634. priv->ctrl_flags &=
  1635. cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
  1636. mlx4_en_update_loopback_state(netdev, features);
  1637. return 0;
  1638. }
  1639. static int mlx4_en_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
  1640. struct net_device *dev,
  1641. const unsigned char *addr, u16 flags)
  1642. {
  1643. struct mlx4_en_priv *priv = netdev_priv(dev);
  1644. struct mlx4_dev *mdev = priv->mdev->dev;
  1645. int err;
  1646. if (!mlx4_is_mfunc(mdev))
  1647. return -EOPNOTSUPP;
  1648. /* Hardware does not support aging addresses, allow only
  1649. * permanent addresses if ndm_state is given
  1650. */
  1651. if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
  1652. en_info(priv, "Add FDB only supports static addresses\n");
  1653. return -EINVAL;
  1654. }
  1655. if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
  1656. err = dev_uc_add_excl(dev, addr);
  1657. else if (is_multicast_ether_addr(addr))
  1658. err = dev_mc_add_excl(dev, addr);
  1659. else
  1660. err = -EINVAL;
  1661. /* Only return duplicate errors if NLM_F_EXCL is set */
  1662. if (err == -EEXIST && !(flags & NLM_F_EXCL))
  1663. err = 0;
  1664. return err;
  1665. }
  1666. static int mlx4_en_fdb_del(struct ndmsg *ndm,
  1667. struct nlattr *tb[],
  1668. struct net_device *dev,
  1669. const unsigned char *addr)
  1670. {
  1671. struct mlx4_en_priv *priv = netdev_priv(dev);
  1672. struct mlx4_dev *mdev = priv->mdev->dev;
  1673. int err;
  1674. if (!mlx4_is_mfunc(mdev))
  1675. return -EOPNOTSUPP;
  1676. if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
  1677. en_info(priv, "Del FDB only supports static addresses\n");
  1678. return -EINVAL;
  1679. }
  1680. if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
  1681. err = dev_uc_del(dev, addr);
  1682. else if (is_multicast_ether_addr(addr))
  1683. err = dev_mc_del(dev, addr);
  1684. else
  1685. err = -EINVAL;
  1686. return err;
  1687. }
  1688. static int mlx4_en_fdb_dump(struct sk_buff *skb,
  1689. struct netlink_callback *cb,
  1690. struct net_device *dev, int idx)
  1691. {
  1692. struct mlx4_en_priv *priv = netdev_priv(dev);
  1693. struct mlx4_dev *mdev = priv->mdev->dev;
  1694. if (mlx4_is_mfunc(mdev))
  1695. idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
  1696. return idx;
  1697. }
  1698. static const struct net_device_ops mlx4_netdev_ops = {
  1699. .ndo_open = mlx4_en_open,
  1700. .ndo_stop = mlx4_en_close,
  1701. .ndo_start_xmit = mlx4_en_xmit,
  1702. .ndo_select_queue = mlx4_en_select_queue,
  1703. .ndo_get_stats = mlx4_en_get_stats,
  1704. .ndo_set_rx_mode = mlx4_en_set_rx_mode,
  1705. .ndo_set_mac_address = mlx4_en_set_mac,
  1706. .ndo_validate_addr = eth_validate_addr,
  1707. .ndo_change_mtu = mlx4_en_change_mtu,
  1708. .ndo_tx_timeout = mlx4_en_tx_timeout,
  1709. .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
  1710. .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
  1711. #ifdef CONFIG_NET_POLL_CONTROLLER
  1712. .ndo_poll_controller = mlx4_en_netpoll,
  1713. #endif
  1714. .ndo_set_features = mlx4_en_set_features,
  1715. .ndo_setup_tc = mlx4_en_setup_tc,
  1716. #ifdef CONFIG_RFS_ACCEL
  1717. .ndo_rx_flow_steer = mlx4_en_filter_rfs,
  1718. #endif
  1719. .ndo_fdb_add = mlx4_en_fdb_add,
  1720. .ndo_fdb_del = mlx4_en_fdb_del,
  1721. .ndo_fdb_dump = mlx4_en_fdb_dump,
  1722. };
  1723. int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
  1724. struct mlx4_en_port_profile *prof)
  1725. {
  1726. struct net_device *dev;
  1727. struct mlx4_en_priv *priv;
  1728. int i;
  1729. int err;
  1730. dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
  1731. MAX_TX_RINGS, MAX_RX_RINGS);
  1732. if (dev == NULL)
  1733. return -ENOMEM;
  1734. netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
  1735. netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
  1736. SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
  1737. dev->dev_id = port - 1;
  1738. /*
  1739. * Initialize driver private data
  1740. */
  1741. priv = netdev_priv(dev);
  1742. memset(priv, 0, sizeof(struct mlx4_en_priv));
  1743. priv->dev = dev;
  1744. priv->mdev = mdev;
  1745. priv->ddev = &mdev->pdev->dev;
  1746. priv->prof = prof;
  1747. priv->port = port;
  1748. priv->port_up = false;
  1749. priv->flags = prof->flags;
  1750. priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
  1751. MLX4_WQE_CTRL_SOLICITED);
  1752. priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
  1753. priv->tx_ring_num = prof->tx_ring_num;
  1754. priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS,
  1755. GFP_KERNEL);
  1756. if (!priv->tx_ring) {
  1757. err = -ENOMEM;
  1758. goto out;
  1759. }
  1760. priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_TX_RINGS,
  1761. GFP_KERNEL);
  1762. if (!priv->tx_cq) {
  1763. err = -ENOMEM;
  1764. goto out;
  1765. }
  1766. priv->rx_ring_num = prof->rx_ring_num;
  1767. priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
  1768. priv->mac_index = -1;
  1769. priv->msg_enable = MLX4_EN_MSG_LEVEL;
  1770. spin_lock_init(&priv->stats_lock);
  1771. INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
  1772. INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
  1773. INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
  1774. INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
  1775. #ifdef CONFIG_MLX4_EN_DCB
  1776. if (!mlx4_is_slave(priv->mdev->dev))
  1777. dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
  1778. #endif
  1779. for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
  1780. INIT_HLIST_HEAD(&priv->mac_hash[i]);
  1781. /* Query for default mac and max mtu */
  1782. priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
  1783. /* Set default MAC */
  1784. dev->addr_len = ETH_ALEN;
  1785. mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
  1786. if (!is_valid_ether_addr(dev->dev_addr)) {
  1787. en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
  1788. priv->port, dev->dev_addr);
  1789. err = -EINVAL;
  1790. goto out;
  1791. }
  1792. memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
  1793. priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
  1794. DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
  1795. err = mlx4_en_alloc_resources(priv);
  1796. if (err)
  1797. goto out;
  1798. #ifdef CONFIG_RFS_ACCEL
  1799. INIT_LIST_HEAD(&priv->filters);
  1800. spin_lock_init(&priv->filters_lock);
  1801. #endif
  1802. /* Allocate page for receive rings */
  1803. err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
  1804. MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
  1805. if (err) {
  1806. en_err(priv, "Failed to allocate page for rx qps\n");
  1807. goto out;
  1808. }
  1809. priv->allocated = 1;
  1810. /*
  1811. * Initialize netdev entry points
  1812. */
  1813. dev->netdev_ops = &mlx4_netdev_ops;
  1814. dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
  1815. netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
  1816. netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
  1817. SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
  1818. /*
  1819. * Set driver features
  1820. */
  1821. dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  1822. if (mdev->LSO_support)
  1823. dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
  1824. dev->vlan_features = dev->hw_features;
  1825. dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
  1826. dev->features = dev->hw_features | NETIF_F_HIGHDMA |
  1827. NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
  1828. NETIF_F_HW_VLAN_FILTER;
  1829. dev->hw_features |= NETIF_F_LOOPBACK;
  1830. if (mdev->dev->caps.steering_mode ==
  1831. MLX4_STEERING_MODE_DEVICE_MANAGED)
  1832. dev->hw_features |= NETIF_F_NTUPLE;
  1833. if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
  1834. dev->priv_flags |= IFF_UNICAST_FLT;
  1835. mdev->pndev[port] = dev;
  1836. netif_carrier_off(dev);
  1837. err = register_netdev(dev);
  1838. if (err) {
  1839. en_err(priv, "Netdev registration failed for port %d\n", port);
  1840. goto out;
  1841. }
  1842. priv->registered = 1;
  1843. en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
  1844. en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
  1845. mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
  1846. /* Configure port */
  1847. mlx4_en_calc_rx_buf(dev);
  1848. err = mlx4_SET_PORT_general(mdev->dev, priv->port,
  1849. priv->rx_skb_size + ETH_FCS_LEN,
  1850. prof->tx_pause, prof->tx_ppp,
  1851. prof->rx_pause, prof->rx_ppp);
  1852. if (err) {
  1853. en_err(priv, "Failed setting port general configurations "
  1854. "for port %d, with error %d\n", priv->port, err);
  1855. goto out;
  1856. }
  1857. /* Init port */
  1858. en_warn(priv, "Initializing port\n");
  1859. err = mlx4_INIT_PORT(mdev->dev, priv->port);
  1860. if (err) {
  1861. en_err(priv, "Failed Initializing port\n");
  1862. goto out;
  1863. }
  1864. mlx4_en_set_default_moderation(priv);
  1865. queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
  1866. return 0;
  1867. out:
  1868. mlx4_en_destroy_netdev(dev);
  1869. return err;
  1870. }