en_netdev.c 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133
  1. /*
  2. * Copyright (c) 2007 Mellanox Technologies. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. *
  32. */
  33. #include <linux/etherdevice.h>
  34. #include <linux/tcp.h>
  35. #include <linux/if_vlan.h>
  36. #include <linux/delay.h>
  37. #include <linux/slab.h>
  38. #include <linux/hash.h>
  39. #include <net/ip.h>
  40. #include <linux/mlx4/driver.h>
  41. #include <linux/mlx4/device.h>
  42. #include <linux/mlx4/cmd.h>
  43. #include <linux/mlx4/cq.h>
  44. #include "mlx4_en.h"
  45. #include "en_port.h"
  46. int mlx4_en_setup_tc(struct net_device *dev, u8 up)
  47. {
  48. struct mlx4_en_priv *priv = netdev_priv(dev);
  49. int i;
  50. unsigned int offset = 0;
  51. if (up && up != MLX4_EN_NUM_UP)
  52. return -EINVAL;
  53. netdev_set_num_tc(dev, up);
  54. /* Partition Tx queues evenly amongst UP's */
  55. for (i = 0; i < up; i++) {
  56. netdev_set_tc_queue(dev, i, priv->num_tx_rings_p_up, offset);
  57. offset += priv->num_tx_rings_p_up;
  58. }
  59. return 0;
  60. }
  61. #ifdef CONFIG_RFS_ACCEL
  62. struct mlx4_en_filter {
  63. struct list_head next;
  64. struct work_struct work;
  65. __be32 src_ip;
  66. __be32 dst_ip;
  67. __be16 src_port;
  68. __be16 dst_port;
  69. int rxq_index;
  70. struct mlx4_en_priv *priv;
  71. u32 flow_id; /* RFS infrastructure id */
  72. int id; /* mlx4_en driver id */
  73. u64 reg_id; /* Flow steering API id */
  74. u8 activated; /* Used to prevent expiry before filter
  75. * is attached
  76. */
  77. struct hlist_node filter_chain;
  78. };
  79. static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv);
  80. static void mlx4_en_filter_work(struct work_struct *work)
  81. {
  82. struct mlx4_en_filter *filter = container_of(work,
  83. struct mlx4_en_filter,
  84. work);
  85. struct mlx4_en_priv *priv = filter->priv;
  86. struct mlx4_spec_list spec_tcp = {
  87. .id = MLX4_NET_TRANS_RULE_ID_TCP,
  88. {
  89. .tcp_udp = {
  90. .dst_port = filter->dst_port,
  91. .dst_port_msk = (__force __be16)-1,
  92. .src_port = filter->src_port,
  93. .src_port_msk = (__force __be16)-1,
  94. },
  95. },
  96. };
  97. struct mlx4_spec_list spec_ip = {
  98. .id = MLX4_NET_TRANS_RULE_ID_IPV4,
  99. {
  100. .ipv4 = {
  101. .dst_ip = filter->dst_ip,
  102. .dst_ip_msk = (__force __be32)-1,
  103. .src_ip = filter->src_ip,
  104. .src_ip_msk = (__force __be32)-1,
  105. },
  106. },
  107. };
  108. struct mlx4_spec_list spec_eth = {
  109. .id = MLX4_NET_TRANS_RULE_ID_ETH,
  110. };
  111. struct mlx4_net_trans_rule rule = {
  112. .list = LIST_HEAD_INIT(rule.list),
  113. .queue_mode = MLX4_NET_TRANS_Q_LIFO,
  114. .exclusive = 1,
  115. .allow_loopback = 1,
  116. .promisc_mode = MLX4_FS_PROMISC_NONE,
  117. .port = priv->port,
  118. .priority = MLX4_DOMAIN_RFS,
  119. };
  120. int rc;
  121. __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
  122. list_add_tail(&spec_eth.list, &rule.list);
  123. list_add_tail(&spec_ip.list, &rule.list);
  124. list_add_tail(&spec_tcp.list, &rule.list);
  125. rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn;
  126. memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN);
  127. memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
  128. filter->activated = 0;
  129. if (filter->reg_id) {
  130. rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
  131. if (rc && rc != -ENOENT)
  132. en_err(priv, "Error detaching flow. rc = %d\n", rc);
  133. }
  134. rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id);
  135. if (rc)
  136. en_err(priv, "Error attaching flow. err = %d\n", rc);
  137. mlx4_en_filter_rfs_expire(priv);
  138. filter->activated = 1;
  139. }
  140. static inline struct hlist_head *
  141. filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
  142. __be16 src_port, __be16 dst_port)
  143. {
  144. unsigned long l;
  145. int bucket_idx;
  146. l = (__force unsigned long)src_port |
  147. ((__force unsigned long)dst_port << 2);
  148. l ^= (__force unsigned long)(src_ip ^ dst_ip);
  149. bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT);
  150. return &priv->filter_hash[bucket_idx];
  151. }
  152. static struct mlx4_en_filter *
  153. mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip,
  154. __be32 dst_ip, __be16 src_port, __be16 dst_port,
  155. u32 flow_id)
  156. {
  157. struct mlx4_en_filter *filter = NULL;
  158. filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC);
  159. if (!filter)
  160. return NULL;
  161. filter->priv = priv;
  162. filter->rxq_index = rxq_index;
  163. INIT_WORK(&filter->work, mlx4_en_filter_work);
  164. filter->src_ip = src_ip;
  165. filter->dst_ip = dst_ip;
  166. filter->src_port = src_port;
  167. filter->dst_port = dst_port;
  168. filter->flow_id = flow_id;
  169. filter->id = priv->last_filter_id++ % RPS_NO_FILTER;
  170. list_add_tail(&filter->next, &priv->filters);
  171. hlist_add_head(&filter->filter_chain,
  172. filter_hash_bucket(priv, src_ip, dst_ip, src_port,
  173. dst_port));
  174. return filter;
  175. }
  176. static void mlx4_en_filter_free(struct mlx4_en_filter *filter)
  177. {
  178. struct mlx4_en_priv *priv = filter->priv;
  179. int rc;
  180. list_del(&filter->next);
  181. rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id);
  182. if (rc && rc != -ENOENT)
  183. en_err(priv, "Error detaching flow. rc = %d\n", rc);
  184. kfree(filter);
  185. }
  186. static inline struct mlx4_en_filter *
  187. mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip,
  188. __be16 src_port, __be16 dst_port)
  189. {
  190. struct hlist_node *elem;
  191. struct mlx4_en_filter *filter;
  192. struct mlx4_en_filter *ret = NULL;
  193. hlist_for_each_entry(filter, elem,
  194. filter_hash_bucket(priv, src_ip, dst_ip,
  195. src_port, dst_port),
  196. filter_chain) {
  197. if (filter->src_ip == src_ip &&
  198. filter->dst_ip == dst_ip &&
  199. filter->src_port == src_port &&
  200. filter->dst_port == dst_port) {
  201. ret = filter;
  202. break;
  203. }
  204. }
  205. return ret;
  206. }
  207. static int
  208. mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
  209. u16 rxq_index, u32 flow_id)
  210. {
  211. struct mlx4_en_priv *priv = netdev_priv(net_dev);
  212. struct mlx4_en_filter *filter;
  213. const struct iphdr *ip;
  214. const __be16 *ports;
  215. __be32 src_ip;
  216. __be32 dst_ip;
  217. __be16 src_port;
  218. __be16 dst_port;
  219. int nhoff = skb_network_offset(skb);
  220. int ret = 0;
  221. if (skb->protocol != htons(ETH_P_IP))
  222. return -EPROTONOSUPPORT;
  223. ip = (const struct iphdr *)(skb->data + nhoff);
  224. if (ip_is_fragment(ip))
  225. return -EPROTONOSUPPORT;
  226. ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl);
  227. src_ip = ip->saddr;
  228. dst_ip = ip->daddr;
  229. src_port = ports[0];
  230. dst_port = ports[1];
  231. if (ip->protocol != IPPROTO_TCP)
  232. return -EPROTONOSUPPORT;
  233. spin_lock_bh(&priv->filters_lock);
  234. filter = mlx4_en_filter_find(priv, src_ip, dst_ip, src_port, dst_port);
  235. if (filter) {
  236. if (filter->rxq_index == rxq_index)
  237. goto out;
  238. filter->rxq_index = rxq_index;
  239. } else {
  240. filter = mlx4_en_filter_alloc(priv, rxq_index,
  241. src_ip, dst_ip,
  242. src_port, dst_port, flow_id);
  243. if (!filter) {
  244. ret = -ENOMEM;
  245. goto err;
  246. }
  247. }
  248. queue_work(priv->mdev->workqueue, &filter->work);
  249. out:
  250. ret = filter->id;
  251. err:
  252. spin_unlock_bh(&priv->filters_lock);
  253. return ret;
  254. }
  255. void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv,
  256. struct mlx4_en_rx_ring *rx_ring)
  257. {
  258. struct mlx4_en_filter *filter, *tmp;
  259. LIST_HEAD(del_list);
  260. spin_lock_bh(&priv->filters_lock);
  261. list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
  262. list_move(&filter->next, &del_list);
  263. hlist_del(&filter->filter_chain);
  264. }
  265. spin_unlock_bh(&priv->filters_lock);
  266. list_for_each_entry_safe(filter, tmp, &del_list, next) {
  267. cancel_work_sync(&filter->work);
  268. mlx4_en_filter_free(filter);
  269. }
  270. }
  271. static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv)
  272. {
  273. struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL;
  274. LIST_HEAD(del_list);
  275. int i = 0;
  276. spin_lock_bh(&priv->filters_lock);
  277. list_for_each_entry_safe(filter, tmp, &priv->filters, next) {
  278. if (i > MLX4_EN_FILTER_EXPIRY_QUOTA)
  279. break;
  280. if (filter->activated &&
  281. !work_pending(&filter->work) &&
  282. rps_may_expire_flow(priv->dev,
  283. filter->rxq_index, filter->flow_id,
  284. filter->id)) {
  285. list_move(&filter->next, &del_list);
  286. hlist_del(&filter->filter_chain);
  287. } else
  288. last_filter = filter;
  289. i++;
  290. }
  291. if (last_filter && (&last_filter->next != priv->filters.next))
  292. list_move(&priv->filters, &last_filter->next);
  293. spin_unlock_bh(&priv->filters_lock);
  294. list_for_each_entry_safe(filter, tmp, &del_list, next)
  295. mlx4_en_filter_free(filter);
  296. }
  297. #endif
  298. static int mlx4_en_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
  299. {
  300. struct mlx4_en_priv *priv = netdev_priv(dev);
  301. struct mlx4_en_dev *mdev = priv->mdev;
  302. int err;
  303. int idx;
  304. en_dbg(HW, priv, "adding VLAN:%d\n", vid);
  305. set_bit(vid, priv->active_vlans);
  306. /* Add VID to port VLAN filter */
  307. mutex_lock(&mdev->state_lock);
  308. if (mdev->device_up && priv->port_up) {
  309. err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
  310. if (err)
  311. en_err(priv, "Failed configuring VLAN filter\n");
  312. }
  313. if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx))
  314. en_err(priv, "failed adding vlan %d\n", vid);
  315. mutex_unlock(&mdev->state_lock);
  316. return 0;
  317. }
  318. static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
  319. {
  320. struct mlx4_en_priv *priv = netdev_priv(dev);
  321. struct mlx4_en_dev *mdev = priv->mdev;
  322. int err;
  323. int idx;
  324. en_dbg(HW, priv, "Killing VID:%d\n", vid);
  325. clear_bit(vid, priv->active_vlans);
  326. /* Remove VID from port VLAN filter */
  327. mutex_lock(&mdev->state_lock);
  328. if (!mlx4_find_cached_vlan(mdev->dev, priv->port, vid, &idx))
  329. mlx4_unregister_vlan(mdev->dev, priv->port, idx);
  330. else
  331. en_err(priv, "could not find vid %d in cache\n", vid);
  332. if (mdev->device_up && priv->port_up) {
  333. err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
  334. if (err)
  335. en_err(priv, "Failed configuring VLAN filter\n");
  336. }
  337. mutex_unlock(&mdev->state_lock);
  338. return 0;
  339. }
  340. static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
  341. {
  342. unsigned int i;
  343. for (i = ETH_ALEN - 1; i; --i) {
  344. dst_mac[i] = src_mac & 0xff;
  345. src_mac >>= 8;
  346. }
  347. memset(&dst_mac[ETH_ALEN], 0, 2);
  348. }
  349. static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv,
  350. unsigned char *mac, int *qpn, u64 *reg_id)
  351. {
  352. struct mlx4_en_dev *mdev = priv->mdev;
  353. struct mlx4_dev *dev = mdev->dev;
  354. int err;
  355. switch (dev->caps.steering_mode) {
  356. case MLX4_STEERING_MODE_B0: {
  357. struct mlx4_qp qp;
  358. u8 gid[16] = {0};
  359. qp.qpn = *qpn;
  360. memcpy(&gid[10], mac, ETH_ALEN);
  361. gid[5] = priv->port;
  362. err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH);
  363. break;
  364. }
  365. case MLX4_STEERING_MODE_DEVICE_MANAGED: {
  366. struct mlx4_spec_list spec_eth = { {NULL} };
  367. __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
  368. struct mlx4_net_trans_rule rule = {
  369. .queue_mode = MLX4_NET_TRANS_Q_FIFO,
  370. .exclusive = 0,
  371. .allow_loopback = 1,
  372. .promisc_mode = MLX4_FS_PROMISC_NONE,
  373. .priority = MLX4_DOMAIN_NIC,
  374. };
  375. rule.port = priv->port;
  376. rule.qpn = *qpn;
  377. INIT_LIST_HEAD(&rule.list);
  378. spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH;
  379. memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN);
  380. memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
  381. list_add_tail(&spec_eth.list, &rule.list);
  382. err = mlx4_flow_attach(dev, &rule, reg_id);
  383. break;
  384. }
  385. default:
  386. return -EINVAL;
  387. }
  388. if (err)
  389. en_warn(priv, "Failed Attaching Unicast\n");
  390. return err;
  391. }
  392. static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv,
  393. unsigned char *mac, int qpn, u64 reg_id)
  394. {
  395. struct mlx4_en_dev *mdev = priv->mdev;
  396. struct mlx4_dev *dev = mdev->dev;
  397. switch (dev->caps.steering_mode) {
  398. case MLX4_STEERING_MODE_B0: {
  399. struct mlx4_qp qp;
  400. u8 gid[16] = {0};
  401. qp.qpn = qpn;
  402. memcpy(&gid[10], mac, ETH_ALEN);
  403. gid[5] = priv->port;
  404. mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH);
  405. break;
  406. }
  407. case MLX4_STEERING_MODE_DEVICE_MANAGED: {
  408. mlx4_flow_detach(dev, reg_id);
  409. break;
  410. }
  411. default:
  412. en_err(priv, "Invalid steering mode.\n");
  413. }
  414. }
  415. static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
  416. {
  417. struct mlx4_en_dev *mdev = priv->mdev;
  418. struct mlx4_dev *dev = mdev->dev;
  419. struct mlx4_mac_entry *entry;
  420. int index = 0;
  421. int err = 0;
  422. u64 reg_id;
  423. int *qpn = &priv->base_qpn;
  424. u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
  425. en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
  426. priv->dev->dev_addr);
  427. index = mlx4_register_mac(dev, priv->port, mac);
  428. if (index < 0) {
  429. err = index;
  430. en_err(priv, "Failed adding MAC: %pM\n",
  431. priv->dev->dev_addr);
  432. return err;
  433. }
  434. if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
  435. int base_qpn = mlx4_get_base_qpn(dev, priv->port);
  436. *qpn = base_qpn + index;
  437. return 0;
  438. }
  439. err = mlx4_qp_reserve_range(dev, 1, 1, qpn);
  440. en_dbg(DRV, priv, "Reserved qp %d\n", *qpn);
  441. if (err) {
  442. en_err(priv, "Failed to reserve qp for mac registration\n");
  443. goto qp_err;
  444. }
  445. err = mlx4_en_uc_steer_add(priv, priv->dev->dev_addr, qpn, &reg_id);
  446. if (err)
  447. goto steer_err;
  448. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  449. if (!entry) {
  450. err = -ENOMEM;
  451. goto alloc_err;
  452. }
  453. memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
  454. entry->reg_id = reg_id;
  455. hlist_add_head_rcu(&entry->hlist,
  456. &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]);
  457. return 0;
  458. alloc_err:
  459. mlx4_en_uc_steer_release(priv, priv->dev->dev_addr, *qpn, reg_id);
  460. steer_err:
  461. mlx4_qp_release_range(dev, *qpn, 1);
  462. qp_err:
  463. mlx4_unregister_mac(dev, priv->port, mac);
  464. return err;
  465. }
  466. static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
  467. {
  468. struct mlx4_en_dev *mdev = priv->mdev;
  469. struct mlx4_dev *dev = mdev->dev;
  470. int qpn = priv->base_qpn;
  471. u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
  472. en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
  473. priv->dev->dev_addr);
  474. mlx4_unregister_mac(dev, priv->port, mac);
  475. if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
  476. struct mlx4_mac_entry *entry;
  477. struct hlist_node *n, *tmp;
  478. struct hlist_head *bucket;
  479. unsigned int mac_hash;
  480. mac_hash = priv->dev->dev_addr[MLX4_EN_MAC_HASH_IDX];
  481. bucket = &priv->mac_hash[mac_hash];
  482. hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
  483. if (ether_addr_equal_64bits(entry->mac,
  484. priv->dev->dev_addr)) {
  485. en_dbg(DRV, priv, "Releasing qp: port %d, MAC %pM, qpn %d\n",
  486. priv->port, priv->dev->dev_addr, qpn);
  487. mlx4_en_uc_steer_release(priv, entry->mac,
  488. qpn, entry->reg_id);
  489. mlx4_qp_release_range(dev, qpn, 1);
  490. hlist_del_rcu(&entry->hlist);
  491. kfree_rcu(entry, rcu);
  492. break;
  493. }
  494. }
  495. }
  496. }
  497. static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
  498. unsigned char *new_mac, unsigned char *prev_mac)
  499. {
  500. struct mlx4_en_dev *mdev = priv->mdev;
  501. struct mlx4_dev *dev = mdev->dev;
  502. int err = 0;
  503. u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac);
  504. if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
  505. struct hlist_head *bucket;
  506. unsigned int mac_hash;
  507. struct mlx4_mac_entry *entry;
  508. struct hlist_node *n, *tmp;
  509. u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
  510. bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
  511. hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
  512. if (ether_addr_equal_64bits(entry->mac, prev_mac)) {
  513. mlx4_en_uc_steer_release(priv, entry->mac,
  514. qpn, entry->reg_id);
  515. mlx4_unregister_mac(dev, priv->port,
  516. prev_mac_u64);
  517. hlist_del_rcu(&entry->hlist);
  518. synchronize_rcu();
  519. memcpy(entry->mac, new_mac, ETH_ALEN);
  520. entry->reg_id = 0;
  521. mac_hash = new_mac[MLX4_EN_MAC_HASH_IDX];
  522. hlist_add_head_rcu(&entry->hlist,
  523. &priv->mac_hash[mac_hash]);
  524. mlx4_register_mac(dev, priv->port, new_mac_u64);
  525. err = mlx4_en_uc_steer_add(priv, new_mac,
  526. &qpn,
  527. &entry->reg_id);
  528. return err;
  529. }
  530. }
  531. return -EINVAL;
  532. }
  533. return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
  534. }
  535. u64 mlx4_en_mac_to_u64(u8 *addr)
  536. {
  537. u64 mac = 0;
  538. int i;
  539. for (i = 0; i < ETH_ALEN; i++) {
  540. mac <<= 8;
  541. mac |= addr[i];
  542. }
  543. return mac;
  544. }
  545. static int mlx4_en_set_mac(struct net_device *dev, void *addr)
  546. {
  547. struct mlx4_en_priv *priv = netdev_priv(dev);
  548. struct mlx4_en_dev *mdev = priv->mdev;
  549. struct sockaddr *saddr = addr;
  550. if (!is_valid_ether_addr(saddr->sa_data))
  551. return -EADDRNOTAVAIL;
  552. memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
  553. queue_work(mdev->workqueue, &priv->mac_task);
  554. return 0;
  555. }
  556. static void mlx4_en_do_set_mac(struct work_struct *work)
  557. {
  558. struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
  559. mac_task);
  560. struct mlx4_en_dev *mdev = priv->mdev;
  561. int err = 0;
  562. mutex_lock(&mdev->state_lock);
  563. if (priv->port_up) {
  564. /* Remove old MAC and insert the new one */
  565. err = mlx4_en_replace_mac(priv, priv->base_qpn,
  566. priv->dev->dev_addr, priv->prev_mac);
  567. if (err)
  568. en_err(priv, "Failed changing HW MAC address\n");
  569. memcpy(priv->prev_mac, priv->dev->dev_addr,
  570. sizeof(priv->prev_mac));
  571. } else
  572. en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
  573. mutex_unlock(&mdev->state_lock);
  574. }
  575. static void mlx4_en_clear_list(struct net_device *dev)
  576. {
  577. struct mlx4_en_priv *priv = netdev_priv(dev);
  578. struct mlx4_en_mc_list *tmp, *mc_to_del;
  579. list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) {
  580. list_del(&mc_to_del->list);
  581. kfree(mc_to_del);
  582. }
  583. }
  584. static void mlx4_en_cache_mclist(struct net_device *dev)
  585. {
  586. struct mlx4_en_priv *priv = netdev_priv(dev);
  587. struct netdev_hw_addr *ha;
  588. struct mlx4_en_mc_list *tmp;
  589. mlx4_en_clear_list(dev);
  590. netdev_for_each_mc_addr(ha, dev) {
  591. tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC);
  592. if (!tmp) {
  593. en_err(priv, "failed to allocate multicast list\n");
  594. mlx4_en_clear_list(dev);
  595. return;
  596. }
  597. memcpy(tmp->addr, ha->addr, ETH_ALEN);
  598. list_add_tail(&tmp->list, &priv->mc_list);
  599. }
  600. }
  601. static void update_mclist_flags(struct mlx4_en_priv *priv,
  602. struct list_head *dst,
  603. struct list_head *src)
  604. {
  605. struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc;
  606. bool found;
  607. /* Find all the entries that should be removed from dst,
  608. * These are the entries that are not found in src
  609. */
  610. list_for_each_entry(dst_tmp, dst, list) {
  611. found = false;
  612. list_for_each_entry(src_tmp, src, list) {
  613. if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
  614. found = true;
  615. break;
  616. }
  617. }
  618. if (!found)
  619. dst_tmp->action = MCLIST_REM;
  620. }
  621. /* Add entries that exist in src but not in dst
  622. * mark them as need to add
  623. */
  624. list_for_each_entry(src_tmp, src, list) {
  625. found = false;
  626. list_for_each_entry(dst_tmp, dst, list) {
  627. if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) {
  628. dst_tmp->action = MCLIST_NONE;
  629. found = true;
  630. break;
  631. }
  632. }
  633. if (!found) {
  634. new_mc = kmalloc(sizeof(struct mlx4_en_mc_list),
  635. GFP_KERNEL);
  636. if (!new_mc) {
  637. en_err(priv, "Failed to allocate current multicast list\n");
  638. return;
  639. }
  640. memcpy(new_mc, src_tmp,
  641. sizeof(struct mlx4_en_mc_list));
  642. new_mc->action = MCLIST_ADD;
  643. list_add_tail(&new_mc->list, dst);
  644. }
  645. }
  646. }
  647. static void mlx4_en_set_rx_mode(struct net_device *dev)
  648. {
  649. struct mlx4_en_priv *priv = netdev_priv(dev);
  650. if (!priv->port_up)
  651. return;
  652. queue_work(priv->mdev->workqueue, &priv->rx_mode_task);
  653. }
  654. static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
  655. struct mlx4_en_dev *mdev)
  656. {
  657. int err = 0;
  658. if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) {
  659. if (netif_msg_rx_status(priv))
  660. en_warn(priv, "Entering promiscuous mode\n");
  661. priv->flags |= MLX4_EN_FLAG_PROMISC;
  662. /* Enable promiscouos mode */
  663. switch (mdev->dev->caps.steering_mode) {
  664. case MLX4_STEERING_MODE_DEVICE_MANAGED:
  665. err = mlx4_flow_steer_promisc_add(mdev->dev,
  666. priv->port,
  667. priv->base_qpn,
  668. MLX4_FS_PROMISC_UPLINK);
  669. if (err)
  670. en_err(priv, "Failed enabling promiscuous mode\n");
  671. priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
  672. break;
  673. case MLX4_STEERING_MODE_B0:
  674. err = mlx4_unicast_promisc_add(mdev->dev,
  675. priv->base_qpn,
  676. priv->port);
  677. if (err)
  678. en_err(priv, "Failed enabling unicast promiscuous mode\n");
  679. /* Add the default qp number as multicast
  680. * promisc
  681. */
  682. if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
  683. err = mlx4_multicast_promisc_add(mdev->dev,
  684. priv->base_qpn,
  685. priv->port);
  686. if (err)
  687. en_err(priv, "Failed enabling multicast promiscuous mode\n");
  688. priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
  689. }
  690. break;
  691. case MLX4_STEERING_MODE_A0:
  692. err = mlx4_SET_PORT_qpn_calc(mdev->dev,
  693. priv->port,
  694. priv->base_qpn,
  695. 1);
  696. if (err)
  697. en_err(priv, "Failed enabling promiscuous mode\n");
  698. break;
  699. }
  700. /* Disable port multicast filter (unconditionally) */
  701. err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
  702. 0, MLX4_MCAST_DISABLE);
  703. if (err)
  704. en_err(priv, "Failed disabling multicast filter\n");
  705. /* Disable port VLAN filter */
  706. err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
  707. if (err)
  708. en_err(priv, "Failed disabling VLAN filter\n");
  709. }
  710. }
  711. static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
  712. struct mlx4_en_dev *mdev)
  713. {
  714. int err = 0;
  715. if (netif_msg_rx_status(priv))
  716. en_warn(priv, "Leaving promiscuous mode\n");
  717. priv->flags &= ~MLX4_EN_FLAG_PROMISC;
  718. /* Disable promiscouos mode */
  719. switch (mdev->dev->caps.steering_mode) {
  720. case MLX4_STEERING_MODE_DEVICE_MANAGED:
  721. err = mlx4_flow_steer_promisc_remove(mdev->dev,
  722. priv->port,
  723. MLX4_FS_PROMISC_UPLINK);
  724. if (err)
  725. en_err(priv, "Failed disabling promiscuous mode\n");
  726. priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
  727. break;
  728. case MLX4_STEERING_MODE_B0:
  729. err = mlx4_unicast_promisc_remove(mdev->dev,
  730. priv->base_qpn,
  731. priv->port);
  732. if (err)
  733. en_err(priv, "Failed disabling unicast promiscuous mode\n");
  734. /* Disable Multicast promisc */
  735. if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
  736. err = mlx4_multicast_promisc_remove(mdev->dev,
  737. priv->base_qpn,
  738. priv->port);
  739. if (err)
  740. en_err(priv, "Failed disabling multicast promiscuous mode\n");
  741. priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
  742. }
  743. break;
  744. case MLX4_STEERING_MODE_A0:
  745. err = mlx4_SET_PORT_qpn_calc(mdev->dev,
  746. priv->port,
  747. priv->base_qpn, 0);
  748. if (err)
  749. en_err(priv, "Failed disabling promiscuous mode\n");
  750. break;
  751. }
  752. /* Enable port VLAN filter */
  753. err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
  754. if (err)
  755. en_err(priv, "Failed enabling VLAN filter\n");
  756. }
  757. static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
  758. struct net_device *dev,
  759. struct mlx4_en_dev *mdev)
  760. {
  761. struct mlx4_en_mc_list *mclist, *tmp;
  762. u64 mcast_addr = 0;
  763. u8 mc_list[16] = {0};
  764. int err = 0;
  765. /* Enable/disable the multicast filter according to IFF_ALLMULTI */
  766. if (dev->flags & IFF_ALLMULTI) {
  767. err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
  768. 0, MLX4_MCAST_DISABLE);
  769. if (err)
  770. en_err(priv, "Failed disabling multicast filter\n");
  771. /* Add the default qp number as multicast promisc */
  772. if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) {
  773. switch (mdev->dev->caps.steering_mode) {
  774. case MLX4_STEERING_MODE_DEVICE_MANAGED:
  775. err = mlx4_flow_steer_promisc_add(mdev->dev,
  776. priv->port,
  777. priv->base_qpn,
  778. MLX4_FS_PROMISC_ALL_MULTI);
  779. break;
  780. case MLX4_STEERING_MODE_B0:
  781. err = mlx4_multicast_promisc_add(mdev->dev,
  782. priv->base_qpn,
  783. priv->port);
  784. break;
  785. case MLX4_STEERING_MODE_A0:
  786. break;
  787. }
  788. if (err)
  789. en_err(priv, "Failed entering multicast promisc mode\n");
  790. priv->flags |= MLX4_EN_FLAG_MC_PROMISC;
  791. }
  792. } else {
  793. /* Disable Multicast promisc */
  794. if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
  795. switch (mdev->dev->caps.steering_mode) {
  796. case MLX4_STEERING_MODE_DEVICE_MANAGED:
  797. err = mlx4_flow_steer_promisc_remove(mdev->dev,
  798. priv->port,
  799. MLX4_FS_PROMISC_ALL_MULTI);
  800. break;
  801. case MLX4_STEERING_MODE_B0:
  802. err = mlx4_multicast_promisc_remove(mdev->dev,
  803. priv->base_qpn,
  804. priv->port);
  805. break;
  806. case MLX4_STEERING_MODE_A0:
  807. break;
  808. }
  809. if (err)
  810. en_err(priv, "Failed disabling multicast promiscuous mode\n");
  811. priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
  812. }
  813. err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
  814. 0, MLX4_MCAST_DISABLE);
  815. if (err)
  816. en_err(priv, "Failed disabling multicast filter\n");
  817. /* Flush mcast filter and init it with broadcast address */
  818. mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST,
  819. 1, MLX4_MCAST_CONFIG);
  820. /* Update multicast list - we cache all addresses so they won't
  821. * change while HW is updated holding the command semaphor */
  822. netif_addr_lock_bh(dev);
  823. mlx4_en_cache_mclist(dev);
  824. netif_addr_unlock_bh(dev);
  825. list_for_each_entry(mclist, &priv->mc_list, list) {
  826. mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
  827. mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
  828. mcast_addr, 0, MLX4_MCAST_CONFIG);
  829. }
  830. err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0,
  831. 0, MLX4_MCAST_ENABLE);
  832. if (err)
  833. en_err(priv, "Failed enabling multicast filter\n");
  834. update_mclist_flags(priv, &priv->curr_list, &priv->mc_list);
  835. list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
  836. if (mclist->action == MCLIST_REM) {
  837. /* detach this address and delete from list */
  838. memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
  839. mc_list[5] = priv->port;
  840. err = mlx4_multicast_detach(mdev->dev,
  841. &priv->rss_map.indir_qp,
  842. mc_list,
  843. MLX4_PROT_ETH,
  844. mclist->reg_id);
  845. if (err)
  846. en_err(priv, "Fail to detach multicast address\n");
  847. /* remove from list */
  848. list_del(&mclist->list);
  849. kfree(mclist);
  850. } else if (mclist->action == MCLIST_ADD) {
  851. /* attach the address */
  852. memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
  853. /* needed for B0 steering support */
  854. mc_list[5] = priv->port;
  855. err = mlx4_multicast_attach(mdev->dev,
  856. &priv->rss_map.indir_qp,
  857. mc_list,
  858. priv->port, 0,
  859. MLX4_PROT_ETH,
  860. &mclist->reg_id);
  861. if (err)
  862. en_err(priv, "Fail to attach multicast address\n");
  863. }
  864. }
  865. }
  866. }
  867. static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
  868. struct net_device *dev,
  869. struct mlx4_en_dev *mdev)
  870. {
  871. struct netdev_hw_addr *ha;
  872. struct mlx4_mac_entry *entry;
  873. struct hlist_node *n, *tmp;
  874. bool found;
  875. u64 mac;
  876. int err = 0;
  877. struct hlist_head *bucket;
  878. unsigned int i;
  879. int removed = 0;
  880. u32 prev_flags;
  881. /* Note that we do not need to protect our mac_hash traversal with rcu,
  882. * since all modification code is protected by mdev->state_lock
  883. */
  884. /* find what to remove */
  885. for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
  886. bucket = &priv->mac_hash[i];
  887. hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) {
  888. found = false;
  889. netdev_for_each_uc_addr(ha, dev) {
  890. if (ether_addr_equal_64bits(entry->mac,
  891. ha->addr)) {
  892. found = true;
  893. break;
  894. }
  895. }
  896. /* MAC address of the port is not in uc list */
  897. if (ether_addr_equal_64bits(entry->mac, dev->dev_addr))
  898. found = true;
  899. if (!found) {
  900. mac = mlx4_en_mac_to_u64(entry->mac);
  901. mlx4_en_uc_steer_release(priv, entry->mac,
  902. priv->base_qpn,
  903. entry->reg_id);
  904. mlx4_unregister_mac(mdev->dev, priv->port, mac);
  905. hlist_del_rcu(&entry->hlist);
  906. kfree_rcu(entry, rcu);
  907. en_dbg(DRV, priv, "Removed MAC %pM on port:%d\n",
  908. entry->mac, priv->port);
  909. ++removed;
  910. }
  911. }
  912. }
  913. /* if we didn't remove anything, there is no use in trying to add
  914. * again once we are in a forced promisc mode state
  915. */
  916. if ((priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) && 0 == removed)
  917. return;
  918. prev_flags = priv->flags;
  919. priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC;
  920. /* find what to add */
  921. netdev_for_each_uc_addr(ha, dev) {
  922. found = false;
  923. bucket = &priv->mac_hash[ha->addr[MLX4_EN_MAC_HASH_IDX]];
  924. hlist_for_each_entry(entry, n, bucket, hlist) {
  925. if (ether_addr_equal_64bits(entry->mac, ha->addr)) {
  926. found = true;
  927. break;
  928. }
  929. }
  930. if (!found) {
  931. entry = kmalloc(sizeof(*entry), GFP_KERNEL);
  932. if (!entry) {
  933. en_err(priv, "Failed adding MAC %pM on port:%d (out of memory)\n",
  934. ha->addr, priv->port);
  935. priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
  936. break;
  937. }
  938. mac = mlx4_en_mac_to_u64(ha->addr);
  939. memcpy(entry->mac, ha->addr, ETH_ALEN);
  940. err = mlx4_register_mac(mdev->dev, priv->port, mac);
  941. if (err < 0) {
  942. en_err(priv, "Failed registering MAC %pM on port %d: %d\n",
  943. ha->addr, priv->port, err);
  944. kfree(entry);
  945. priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
  946. break;
  947. }
  948. err = mlx4_en_uc_steer_add(priv, ha->addr,
  949. &priv->base_qpn,
  950. &entry->reg_id);
  951. if (err) {
  952. en_err(priv, "Failed adding MAC %pM on port %d: %d\n",
  953. ha->addr, priv->port, err);
  954. mlx4_unregister_mac(mdev->dev, priv->port, mac);
  955. kfree(entry);
  956. priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
  957. break;
  958. } else {
  959. unsigned int mac_hash;
  960. en_dbg(DRV, priv, "Added MAC %pM on port:%d\n",
  961. ha->addr, priv->port);
  962. mac_hash = ha->addr[MLX4_EN_MAC_HASH_IDX];
  963. bucket = &priv->mac_hash[mac_hash];
  964. hlist_add_head_rcu(&entry->hlist, bucket);
  965. }
  966. }
  967. }
  968. if (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC) {
  969. en_warn(priv, "Forcing promiscuous mode on port:%d\n",
  970. priv->port);
  971. } else if (prev_flags & MLX4_EN_FLAG_FORCE_PROMISC) {
  972. en_warn(priv, "Stop forcing promiscuous mode on port:%d\n",
  973. priv->port);
  974. }
  975. }
  976. static void mlx4_en_do_set_rx_mode(struct work_struct *work)
  977. {
  978. struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
  979. rx_mode_task);
  980. struct mlx4_en_dev *mdev = priv->mdev;
  981. struct net_device *dev = priv->dev;
  982. mutex_lock(&mdev->state_lock);
  983. if (!mdev->device_up) {
  984. en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n");
  985. goto out;
  986. }
  987. if (!priv->port_up) {
  988. en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n");
  989. goto out;
  990. }
  991. if (!netif_carrier_ok(dev)) {
  992. if (!mlx4_en_QUERY_PORT(mdev, priv->port)) {
  993. if (priv->port_state.link_state) {
  994. priv->last_link_state = MLX4_DEV_EVENT_PORT_UP;
  995. netif_carrier_on(dev);
  996. en_dbg(LINK, priv, "Link Up\n");
  997. }
  998. }
  999. }
  1000. if (dev->priv_flags & IFF_UNICAST_FLT)
  1001. mlx4_en_do_uc_filter(priv, dev, mdev);
  1002. /* Promsicuous mode: disable all filters */
  1003. if ((dev->flags & IFF_PROMISC) ||
  1004. (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) {
  1005. mlx4_en_set_promisc_mode(priv, mdev);
  1006. goto out;
  1007. }
  1008. /* Not in promiscuous mode */
  1009. if (priv->flags & MLX4_EN_FLAG_PROMISC)
  1010. mlx4_en_clear_promisc_mode(priv, mdev);
  1011. mlx4_en_do_multicast(priv, dev, mdev);
  1012. out:
  1013. mutex_unlock(&mdev->state_lock);
  1014. }
  1015. #ifdef CONFIG_NET_POLL_CONTROLLER
  1016. static void mlx4_en_netpoll(struct net_device *dev)
  1017. {
  1018. struct mlx4_en_priv *priv = netdev_priv(dev);
  1019. struct mlx4_en_cq *cq;
  1020. unsigned long flags;
  1021. int i;
  1022. for (i = 0; i < priv->rx_ring_num; i++) {
  1023. cq = &priv->rx_cq[i];
  1024. spin_lock_irqsave(&cq->lock, flags);
  1025. napi_synchronize(&cq->napi);
  1026. mlx4_en_process_rx_cq(dev, cq, 0);
  1027. spin_unlock_irqrestore(&cq->lock, flags);
  1028. }
  1029. }
  1030. #endif
  1031. static void mlx4_en_tx_timeout(struct net_device *dev)
  1032. {
  1033. struct mlx4_en_priv *priv = netdev_priv(dev);
  1034. struct mlx4_en_dev *mdev = priv->mdev;
  1035. if (netif_msg_timer(priv))
  1036. en_warn(priv, "Tx timeout called on port:%d\n", priv->port);
  1037. priv->port_stats.tx_timeout++;
  1038. en_dbg(DRV, priv, "Scheduling watchdog\n");
  1039. queue_work(mdev->workqueue, &priv->watchdog_task);
  1040. }
  1041. static struct net_device_stats *mlx4_en_get_stats(struct net_device *dev)
  1042. {
  1043. struct mlx4_en_priv *priv = netdev_priv(dev);
  1044. spin_lock_bh(&priv->stats_lock);
  1045. memcpy(&priv->ret_stats, &priv->stats, sizeof(priv->stats));
  1046. spin_unlock_bh(&priv->stats_lock);
  1047. return &priv->ret_stats;
  1048. }
  1049. static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv)
  1050. {
  1051. struct mlx4_en_cq *cq;
  1052. int i;
  1053. /* If we haven't received a specific coalescing setting
  1054. * (module param), we set the moderation parameters as follows:
  1055. * - moder_cnt is set to the number of mtu sized packets to
  1056. * satisfy our coalescing target.
  1057. * - moder_time is set to a fixed value.
  1058. */
  1059. priv->rx_frames = MLX4_EN_RX_COAL_TARGET;
  1060. priv->rx_usecs = MLX4_EN_RX_COAL_TIME;
  1061. priv->tx_frames = MLX4_EN_TX_COAL_PKTS;
  1062. priv->tx_usecs = MLX4_EN_TX_COAL_TIME;
  1063. en_dbg(INTR, priv, "Default coalesing params for mtu:%d - rx_frames:%d rx_usecs:%d\n",
  1064. priv->dev->mtu, priv->rx_frames, priv->rx_usecs);
  1065. /* Setup cq moderation params */
  1066. for (i = 0; i < priv->rx_ring_num; i++) {
  1067. cq = &priv->rx_cq[i];
  1068. cq->moder_cnt = priv->rx_frames;
  1069. cq->moder_time = priv->rx_usecs;
  1070. priv->last_moder_time[i] = MLX4_EN_AUTO_CONF;
  1071. priv->last_moder_packets[i] = 0;
  1072. priv->last_moder_bytes[i] = 0;
  1073. }
  1074. for (i = 0; i < priv->tx_ring_num; i++) {
  1075. cq = &priv->tx_cq[i];
  1076. cq->moder_cnt = priv->tx_frames;
  1077. cq->moder_time = priv->tx_usecs;
  1078. }
  1079. /* Reset auto-moderation params */
  1080. priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW;
  1081. priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW;
  1082. priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH;
  1083. priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH;
  1084. priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL;
  1085. priv->adaptive_rx_coal = 1;
  1086. priv->last_moder_jiffies = 0;
  1087. priv->last_moder_tx_packets = 0;
  1088. }
  1089. static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv)
  1090. {
  1091. unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies);
  1092. struct mlx4_en_cq *cq;
  1093. unsigned long packets;
  1094. unsigned long rate;
  1095. unsigned long avg_pkt_size;
  1096. unsigned long rx_packets;
  1097. unsigned long rx_bytes;
  1098. unsigned long rx_pkt_diff;
  1099. int moder_time;
  1100. int ring, err;
  1101. if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ)
  1102. return;
  1103. for (ring = 0; ring < priv->rx_ring_num; ring++) {
  1104. spin_lock_bh(&priv->stats_lock);
  1105. rx_packets = priv->rx_ring[ring].packets;
  1106. rx_bytes = priv->rx_ring[ring].bytes;
  1107. spin_unlock_bh(&priv->stats_lock);
  1108. rx_pkt_diff = ((unsigned long) (rx_packets -
  1109. priv->last_moder_packets[ring]));
  1110. packets = rx_pkt_diff;
  1111. rate = packets * HZ / period;
  1112. avg_pkt_size = packets ? ((unsigned long) (rx_bytes -
  1113. priv->last_moder_bytes[ring])) / packets : 0;
  1114. /* Apply auto-moderation only when packet rate
  1115. * exceeds a rate that it matters */
  1116. if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) &&
  1117. avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) {
  1118. if (rate < priv->pkt_rate_low)
  1119. moder_time = priv->rx_usecs_low;
  1120. else if (rate > priv->pkt_rate_high)
  1121. moder_time = priv->rx_usecs_high;
  1122. else
  1123. moder_time = (rate - priv->pkt_rate_low) *
  1124. (priv->rx_usecs_high - priv->rx_usecs_low) /
  1125. (priv->pkt_rate_high - priv->pkt_rate_low) +
  1126. priv->rx_usecs_low;
  1127. } else {
  1128. moder_time = priv->rx_usecs_low;
  1129. }
  1130. if (moder_time != priv->last_moder_time[ring]) {
  1131. priv->last_moder_time[ring] = moder_time;
  1132. cq = &priv->rx_cq[ring];
  1133. cq->moder_time = moder_time;
  1134. err = mlx4_en_set_cq_moder(priv, cq);
  1135. if (err)
  1136. en_err(priv, "Failed modifying moderation for cq:%d\n",
  1137. ring);
  1138. }
  1139. priv->last_moder_packets[ring] = rx_packets;
  1140. priv->last_moder_bytes[ring] = rx_bytes;
  1141. }
  1142. priv->last_moder_jiffies = jiffies;
  1143. }
  1144. static void mlx4_en_do_get_stats(struct work_struct *work)
  1145. {
  1146. struct delayed_work *delay = to_delayed_work(work);
  1147. struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv,
  1148. stats_task);
  1149. struct mlx4_en_dev *mdev = priv->mdev;
  1150. int err;
  1151. mutex_lock(&mdev->state_lock);
  1152. if (mdev->device_up) {
  1153. err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0);
  1154. if (err)
  1155. en_dbg(HW, priv, "Could not update stats\n");
  1156. if (priv->port_up)
  1157. mlx4_en_auto_moderation(priv);
  1158. queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
  1159. }
  1160. if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
  1161. queue_work(mdev->workqueue, &priv->mac_task);
  1162. mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
  1163. }
  1164. mutex_unlock(&mdev->state_lock);
  1165. }
  1166. static void mlx4_en_linkstate(struct work_struct *work)
  1167. {
  1168. struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
  1169. linkstate_task);
  1170. struct mlx4_en_dev *mdev = priv->mdev;
  1171. int linkstate = priv->link_state;
  1172. mutex_lock(&mdev->state_lock);
  1173. /* If observable port state changed set carrier state and
  1174. * report to system log */
  1175. if (priv->last_link_state != linkstate) {
  1176. if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) {
  1177. en_info(priv, "Link Down\n");
  1178. netif_carrier_off(priv->dev);
  1179. } else {
  1180. en_info(priv, "Link Up\n");
  1181. netif_carrier_on(priv->dev);
  1182. }
  1183. }
  1184. priv->last_link_state = linkstate;
  1185. mutex_unlock(&mdev->state_lock);
  1186. }
  1187. int mlx4_en_start_port(struct net_device *dev)
  1188. {
  1189. struct mlx4_en_priv *priv = netdev_priv(dev);
  1190. struct mlx4_en_dev *mdev = priv->mdev;
  1191. struct mlx4_en_cq *cq;
  1192. struct mlx4_en_tx_ring *tx_ring;
  1193. int rx_index = 0;
  1194. int tx_index = 0;
  1195. int err = 0;
  1196. int i;
  1197. int j;
  1198. u8 mc_list[16] = {0};
  1199. if (priv->port_up) {
  1200. en_dbg(DRV, priv, "start port called while port already up\n");
  1201. return 0;
  1202. }
  1203. INIT_LIST_HEAD(&priv->mc_list);
  1204. INIT_LIST_HEAD(&priv->curr_list);
  1205. INIT_LIST_HEAD(&priv->ethtool_list);
  1206. memset(&priv->ethtool_rules[0], 0,
  1207. sizeof(struct ethtool_flow_id) * MAX_NUM_OF_FS_RULES);
  1208. /* Calculate Rx buf size */
  1209. dev->mtu = min(dev->mtu, priv->max_mtu);
  1210. mlx4_en_calc_rx_buf(dev);
  1211. en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_skb_size);
  1212. /* Configure rx cq's and rings */
  1213. err = mlx4_en_activate_rx_rings(priv);
  1214. if (err) {
  1215. en_err(priv, "Failed to activate RX rings\n");
  1216. return err;
  1217. }
  1218. for (i = 0; i < priv->rx_ring_num; i++) {
  1219. cq = &priv->rx_cq[i];
  1220. err = mlx4_en_activate_cq(priv, cq, i);
  1221. if (err) {
  1222. en_err(priv, "Failed activating Rx CQ\n");
  1223. goto cq_err;
  1224. }
  1225. for (j = 0; j < cq->size; j++)
  1226. cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
  1227. err = mlx4_en_set_cq_moder(priv, cq);
  1228. if (err) {
  1229. en_err(priv, "Failed setting cq moderation parameters");
  1230. mlx4_en_deactivate_cq(priv, cq);
  1231. goto cq_err;
  1232. }
  1233. mlx4_en_arm_cq(priv, cq);
  1234. priv->rx_ring[i].cqn = cq->mcq.cqn;
  1235. ++rx_index;
  1236. }
  1237. /* Set qp number */
  1238. en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port);
  1239. err = mlx4_en_get_qp(priv);
  1240. if (err) {
  1241. en_err(priv, "Failed getting eth qp\n");
  1242. goto cq_err;
  1243. }
  1244. mdev->mac_removed[priv->port] = 0;
  1245. err = mlx4_en_config_rss_steer(priv);
  1246. if (err) {
  1247. en_err(priv, "Failed configuring rss steering\n");
  1248. goto mac_err;
  1249. }
  1250. err = mlx4_en_create_drop_qp(priv);
  1251. if (err)
  1252. goto rss_err;
  1253. /* Configure tx cq's and rings */
  1254. for (i = 0; i < priv->tx_ring_num; i++) {
  1255. /* Configure cq */
  1256. cq = &priv->tx_cq[i];
  1257. err = mlx4_en_activate_cq(priv, cq, i);
  1258. if (err) {
  1259. en_err(priv, "Failed allocating Tx CQ\n");
  1260. goto tx_err;
  1261. }
  1262. err = mlx4_en_set_cq_moder(priv, cq);
  1263. if (err) {
  1264. en_err(priv, "Failed setting cq moderation parameters");
  1265. mlx4_en_deactivate_cq(priv, cq);
  1266. goto tx_err;
  1267. }
  1268. en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i);
  1269. cq->buf->wqe_index = cpu_to_be16(0xffff);
  1270. /* Configure ring */
  1271. tx_ring = &priv->tx_ring[i];
  1272. err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn,
  1273. i / priv->num_tx_rings_p_up);
  1274. if (err) {
  1275. en_err(priv, "Failed allocating Tx ring\n");
  1276. mlx4_en_deactivate_cq(priv, cq);
  1277. goto tx_err;
  1278. }
  1279. tx_ring->tx_queue = netdev_get_tx_queue(dev, i);
  1280. /* Arm CQ for TX completions */
  1281. mlx4_en_arm_cq(priv, cq);
  1282. /* Set initial ownership of all Tx TXBBs to SW (1) */
  1283. for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE)
  1284. *((u32 *) (tx_ring->buf + j)) = 0xffffffff;
  1285. ++tx_index;
  1286. }
  1287. /* Configure port */
  1288. err = mlx4_SET_PORT_general(mdev->dev, priv->port,
  1289. priv->rx_skb_size + ETH_FCS_LEN,
  1290. priv->prof->tx_pause,
  1291. priv->prof->tx_ppp,
  1292. priv->prof->rx_pause,
  1293. priv->prof->rx_ppp);
  1294. if (err) {
  1295. en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
  1296. priv->port, err);
  1297. goto tx_err;
  1298. }
  1299. /* Set default qp number */
  1300. err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
  1301. if (err) {
  1302. en_err(priv, "Failed setting default qp numbers\n");
  1303. goto tx_err;
  1304. }
  1305. /* Init port */
  1306. en_dbg(HW, priv, "Initializing port\n");
  1307. err = mlx4_INIT_PORT(mdev->dev, priv->port);
  1308. if (err) {
  1309. en_err(priv, "Failed Initializing port\n");
  1310. goto tx_err;
  1311. }
  1312. /* Attach rx QP to bradcast address */
  1313. memset(&mc_list[10], 0xff, ETH_ALEN);
  1314. mc_list[5] = priv->port; /* needed for B0 steering support */
  1315. if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
  1316. priv->port, 0, MLX4_PROT_ETH,
  1317. &priv->broadcast_id))
  1318. mlx4_warn(mdev, "Failed Attaching Broadcast\n");
  1319. /* Must redo promiscuous mode setup. */
  1320. priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC);
  1321. /* Schedule multicast task to populate multicast list */
  1322. queue_work(mdev->workqueue, &priv->rx_mode_task);
  1323. mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
  1324. priv->port_up = true;
  1325. netif_tx_start_all_queues(dev);
  1326. netif_device_attach(dev);
  1327. return 0;
  1328. tx_err:
  1329. while (tx_index--) {
  1330. mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]);
  1331. mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]);
  1332. }
  1333. mlx4_en_destroy_drop_qp(priv);
  1334. rss_err:
  1335. mlx4_en_release_rss_steer(priv);
  1336. mac_err:
  1337. mlx4_en_put_qp(priv);
  1338. cq_err:
  1339. while (rx_index--)
  1340. mlx4_en_deactivate_cq(priv, &priv->rx_cq[rx_index]);
  1341. for (i = 0; i < priv->rx_ring_num; i++)
  1342. mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
  1343. return err; /* need to close devices */
  1344. }
  1345. void mlx4_en_stop_port(struct net_device *dev, int detach)
  1346. {
  1347. struct mlx4_en_priv *priv = netdev_priv(dev);
  1348. struct mlx4_en_dev *mdev = priv->mdev;
  1349. struct mlx4_en_mc_list *mclist, *tmp;
  1350. struct ethtool_flow_id *flow, *tmp_flow;
  1351. int i;
  1352. u8 mc_list[16] = {0};
  1353. if (!priv->port_up) {
  1354. en_dbg(DRV, priv, "stop port called while port already down\n");
  1355. return;
  1356. }
  1357. /* Synchronize with tx routine */
  1358. netif_tx_lock_bh(dev);
  1359. if (detach)
  1360. netif_device_detach(dev);
  1361. netif_tx_stop_all_queues(dev);
  1362. netif_tx_unlock_bh(dev);
  1363. netif_tx_disable(dev);
  1364. /* Set port as not active */
  1365. priv->port_up = false;
  1366. /* Promsicuous mode */
  1367. if (mdev->dev->caps.steering_mode ==
  1368. MLX4_STEERING_MODE_DEVICE_MANAGED) {
  1369. priv->flags &= ~(MLX4_EN_FLAG_PROMISC |
  1370. MLX4_EN_FLAG_MC_PROMISC);
  1371. mlx4_flow_steer_promisc_remove(mdev->dev,
  1372. priv->port,
  1373. MLX4_FS_PROMISC_UPLINK);
  1374. mlx4_flow_steer_promisc_remove(mdev->dev,
  1375. priv->port,
  1376. MLX4_FS_PROMISC_ALL_MULTI);
  1377. } else if (priv->flags & MLX4_EN_FLAG_PROMISC) {
  1378. priv->flags &= ~MLX4_EN_FLAG_PROMISC;
  1379. /* Disable promiscouos mode */
  1380. mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn,
  1381. priv->port);
  1382. /* Disable Multicast promisc */
  1383. if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) {
  1384. mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn,
  1385. priv->port);
  1386. priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC;
  1387. }
  1388. }
  1389. /* Detach All multicasts */
  1390. memset(&mc_list[10], 0xff, ETH_ALEN);
  1391. mc_list[5] = priv->port; /* needed for B0 steering support */
  1392. mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list,
  1393. MLX4_PROT_ETH, priv->broadcast_id);
  1394. list_for_each_entry(mclist, &priv->curr_list, list) {
  1395. memcpy(&mc_list[10], mclist->addr, ETH_ALEN);
  1396. mc_list[5] = priv->port;
  1397. mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
  1398. mc_list, MLX4_PROT_ETH, mclist->reg_id);
  1399. }
  1400. mlx4_en_clear_list(dev);
  1401. list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
  1402. list_del(&mclist->list);
  1403. kfree(mclist);
  1404. }
  1405. /* Flush multicast filter */
  1406. mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG);
  1407. mlx4_en_destroy_drop_qp(priv);
  1408. /* Free TX Rings */
  1409. for (i = 0; i < priv->tx_ring_num; i++) {
  1410. mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]);
  1411. mlx4_en_deactivate_cq(priv, &priv->tx_cq[i]);
  1412. }
  1413. msleep(10);
  1414. for (i = 0; i < priv->tx_ring_num; i++)
  1415. mlx4_en_free_tx_buf(dev, &priv->tx_ring[i]);
  1416. /* Free RSS qps */
  1417. mlx4_en_release_rss_steer(priv);
  1418. /* Unregister Mac address for the port */
  1419. mlx4_en_put_qp(priv);
  1420. if (!(mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAGS2_REASSIGN_MAC_EN))
  1421. mdev->mac_removed[priv->port] = 1;
  1422. /* Remove flow steering rules for the port*/
  1423. if (mdev->dev->caps.steering_mode ==
  1424. MLX4_STEERING_MODE_DEVICE_MANAGED) {
  1425. ASSERT_RTNL();
  1426. list_for_each_entry_safe(flow, tmp_flow,
  1427. &priv->ethtool_list, list) {
  1428. mlx4_flow_detach(mdev->dev, flow->id);
  1429. list_del(&flow->list);
  1430. }
  1431. }
  1432. /* Free RX Rings */
  1433. for (i = 0; i < priv->rx_ring_num; i++) {
  1434. mlx4_en_deactivate_rx_ring(priv, &priv->rx_ring[i]);
  1435. while (test_bit(NAPI_STATE_SCHED, &priv->rx_cq[i].napi.state))
  1436. msleep(1);
  1437. mlx4_en_deactivate_cq(priv, &priv->rx_cq[i]);
  1438. }
  1439. /* close port*/
  1440. mlx4_CLOSE_PORT(mdev->dev, priv->port);
  1441. }
  1442. static void mlx4_en_restart(struct work_struct *work)
  1443. {
  1444. struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
  1445. watchdog_task);
  1446. struct mlx4_en_dev *mdev = priv->mdev;
  1447. struct net_device *dev = priv->dev;
  1448. int i;
  1449. en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port);
  1450. mutex_lock(&mdev->state_lock);
  1451. if (priv->port_up) {
  1452. mlx4_en_stop_port(dev, 1);
  1453. for (i = 0; i < priv->tx_ring_num; i++)
  1454. netdev_tx_reset_queue(priv->tx_ring[i].tx_queue);
  1455. if (mlx4_en_start_port(dev))
  1456. en_err(priv, "Failed restarting port %d\n", priv->port);
  1457. }
  1458. mutex_unlock(&mdev->state_lock);
  1459. }
  1460. static void mlx4_en_clear_stats(struct net_device *dev)
  1461. {
  1462. struct mlx4_en_priv *priv = netdev_priv(dev);
  1463. struct mlx4_en_dev *mdev = priv->mdev;
  1464. int i;
  1465. if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
  1466. en_dbg(HW, priv, "Failed dumping statistics\n");
  1467. memset(&priv->stats, 0, sizeof(priv->stats));
  1468. memset(&priv->pstats, 0, sizeof(priv->pstats));
  1469. memset(&priv->pkstats, 0, sizeof(priv->pkstats));
  1470. memset(&priv->port_stats, 0, sizeof(priv->port_stats));
  1471. for (i = 0; i < priv->tx_ring_num; i++) {
  1472. priv->tx_ring[i].bytes = 0;
  1473. priv->tx_ring[i].packets = 0;
  1474. priv->tx_ring[i].tx_csum = 0;
  1475. }
  1476. for (i = 0; i < priv->rx_ring_num; i++) {
  1477. priv->rx_ring[i].bytes = 0;
  1478. priv->rx_ring[i].packets = 0;
  1479. priv->rx_ring[i].csum_ok = 0;
  1480. priv->rx_ring[i].csum_none = 0;
  1481. }
  1482. }
  1483. static int mlx4_en_open(struct net_device *dev)
  1484. {
  1485. struct mlx4_en_priv *priv = netdev_priv(dev);
  1486. struct mlx4_en_dev *mdev = priv->mdev;
  1487. int err = 0;
  1488. mutex_lock(&mdev->state_lock);
  1489. if (!mdev->device_up) {
  1490. en_err(priv, "Cannot open - device down/disabled\n");
  1491. err = -EBUSY;
  1492. goto out;
  1493. }
  1494. /* Reset HW statistics and SW counters */
  1495. mlx4_en_clear_stats(dev);
  1496. err = mlx4_en_start_port(dev);
  1497. if (err)
  1498. en_err(priv, "Failed starting port:%d\n", priv->port);
  1499. out:
  1500. mutex_unlock(&mdev->state_lock);
  1501. return err;
  1502. }
  1503. static int mlx4_en_close(struct net_device *dev)
  1504. {
  1505. struct mlx4_en_priv *priv = netdev_priv(dev);
  1506. struct mlx4_en_dev *mdev = priv->mdev;
  1507. en_dbg(IFDOWN, priv, "Close port called\n");
  1508. mutex_lock(&mdev->state_lock);
  1509. mlx4_en_stop_port(dev, 0);
  1510. netif_carrier_off(dev);
  1511. mutex_unlock(&mdev->state_lock);
  1512. return 0;
  1513. }
  1514. void mlx4_en_free_resources(struct mlx4_en_priv *priv)
  1515. {
  1516. int i;
  1517. #ifdef CONFIG_RFS_ACCEL
  1518. free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
  1519. priv->dev->rx_cpu_rmap = NULL;
  1520. #endif
  1521. for (i = 0; i < priv->tx_ring_num; i++) {
  1522. if (priv->tx_ring[i].tx_info)
  1523. mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]);
  1524. if (priv->tx_cq[i].buf)
  1525. mlx4_en_destroy_cq(priv, &priv->tx_cq[i]);
  1526. }
  1527. for (i = 0; i < priv->rx_ring_num; i++) {
  1528. if (priv->rx_ring[i].rx_info)
  1529. mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i],
  1530. priv->prof->rx_ring_size, priv->stride);
  1531. if (priv->rx_cq[i].buf)
  1532. mlx4_en_destroy_cq(priv, &priv->rx_cq[i]);
  1533. }
  1534. if (priv->base_tx_qpn) {
  1535. mlx4_qp_release_range(priv->mdev->dev, priv->base_tx_qpn, priv->tx_ring_num);
  1536. priv->base_tx_qpn = 0;
  1537. }
  1538. }
  1539. int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
  1540. {
  1541. struct mlx4_en_port_profile *prof = priv->prof;
  1542. int i;
  1543. int err;
  1544. err = mlx4_qp_reserve_range(priv->mdev->dev, priv->tx_ring_num, 256, &priv->base_tx_qpn);
  1545. if (err) {
  1546. en_err(priv, "failed reserving range for TX rings\n");
  1547. return err;
  1548. }
  1549. /* Create tx Rings */
  1550. for (i = 0; i < priv->tx_ring_num; i++) {
  1551. if (mlx4_en_create_cq(priv, &priv->tx_cq[i],
  1552. prof->tx_ring_size, i, TX))
  1553. goto err;
  1554. if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], priv->base_tx_qpn + i,
  1555. prof->tx_ring_size, TXBB_SIZE))
  1556. goto err;
  1557. }
  1558. /* Create rx Rings */
  1559. for (i = 0; i < priv->rx_ring_num; i++) {
  1560. if (mlx4_en_create_cq(priv, &priv->rx_cq[i],
  1561. prof->rx_ring_size, i, RX))
  1562. goto err;
  1563. if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i],
  1564. prof->rx_ring_size, priv->stride))
  1565. goto err;
  1566. }
  1567. #ifdef CONFIG_RFS_ACCEL
  1568. priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num);
  1569. if (!priv->dev->rx_cpu_rmap)
  1570. goto err;
  1571. #endif
  1572. return 0;
  1573. err:
  1574. en_err(priv, "Failed to allocate NIC resources\n");
  1575. return -ENOMEM;
  1576. }
  1577. void mlx4_en_destroy_netdev(struct net_device *dev)
  1578. {
  1579. struct mlx4_en_priv *priv = netdev_priv(dev);
  1580. struct mlx4_en_dev *mdev = priv->mdev;
  1581. en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port);
  1582. /* Unregister device - this will close the port if it was up */
  1583. if (priv->registered)
  1584. unregister_netdev(dev);
  1585. if (priv->allocated)
  1586. mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE);
  1587. cancel_delayed_work(&priv->stats_task);
  1588. /* flush any pending task for this netdev */
  1589. flush_workqueue(mdev->workqueue);
  1590. /* Detach the netdev so tasks would not attempt to access it */
  1591. mutex_lock(&mdev->state_lock);
  1592. mdev->pndev[priv->port] = NULL;
  1593. mutex_unlock(&mdev->state_lock);
  1594. mlx4_en_free_resources(priv);
  1595. kfree(priv->tx_ring);
  1596. kfree(priv->tx_cq);
  1597. free_netdev(dev);
  1598. }
  1599. static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
  1600. {
  1601. struct mlx4_en_priv *priv = netdev_priv(dev);
  1602. struct mlx4_en_dev *mdev = priv->mdev;
  1603. int err = 0;
  1604. en_dbg(DRV, priv, "Change MTU called - current:%d new:%d\n",
  1605. dev->mtu, new_mtu);
  1606. if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) {
  1607. en_err(priv, "Bad MTU size:%d.\n", new_mtu);
  1608. return -EPERM;
  1609. }
  1610. dev->mtu = new_mtu;
  1611. if (netif_running(dev)) {
  1612. mutex_lock(&mdev->state_lock);
  1613. if (!mdev->device_up) {
  1614. /* NIC is probably restarting - let watchdog task reset
  1615. * the port */
  1616. en_dbg(DRV, priv, "Change MTU called with card down!?\n");
  1617. } else {
  1618. mlx4_en_stop_port(dev, 1);
  1619. err = mlx4_en_start_port(dev);
  1620. if (err) {
  1621. en_err(priv, "Failed restarting port:%d\n",
  1622. priv->port);
  1623. queue_work(mdev->workqueue, &priv->watchdog_task);
  1624. }
  1625. }
  1626. mutex_unlock(&mdev->state_lock);
  1627. }
  1628. return 0;
  1629. }
  1630. static int mlx4_en_set_features(struct net_device *netdev,
  1631. netdev_features_t features)
  1632. {
  1633. struct mlx4_en_priv *priv = netdev_priv(netdev);
  1634. if (features & NETIF_F_LOOPBACK)
  1635. priv->ctrl_flags |= cpu_to_be32(MLX4_WQE_CTRL_FORCE_LOOPBACK);
  1636. else
  1637. priv->ctrl_flags &=
  1638. cpu_to_be32(~MLX4_WQE_CTRL_FORCE_LOOPBACK);
  1639. mlx4_en_update_loopback_state(netdev, features);
  1640. return 0;
  1641. }
  1642. static const struct net_device_ops mlx4_netdev_ops = {
  1643. .ndo_open = mlx4_en_open,
  1644. .ndo_stop = mlx4_en_close,
  1645. .ndo_start_xmit = mlx4_en_xmit,
  1646. .ndo_select_queue = mlx4_en_select_queue,
  1647. .ndo_get_stats = mlx4_en_get_stats,
  1648. .ndo_set_rx_mode = mlx4_en_set_rx_mode,
  1649. .ndo_set_mac_address = mlx4_en_set_mac,
  1650. .ndo_validate_addr = eth_validate_addr,
  1651. .ndo_change_mtu = mlx4_en_change_mtu,
  1652. .ndo_tx_timeout = mlx4_en_tx_timeout,
  1653. .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid,
  1654. .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid,
  1655. #ifdef CONFIG_NET_POLL_CONTROLLER
  1656. .ndo_poll_controller = mlx4_en_netpoll,
  1657. #endif
  1658. .ndo_set_features = mlx4_en_set_features,
  1659. .ndo_setup_tc = mlx4_en_setup_tc,
  1660. #ifdef CONFIG_RFS_ACCEL
  1661. .ndo_rx_flow_steer = mlx4_en_filter_rfs,
  1662. #endif
  1663. };
  1664. int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
  1665. struct mlx4_en_port_profile *prof)
  1666. {
  1667. struct net_device *dev;
  1668. struct mlx4_en_priv *priv;
  1669. int i;
  1670. int err;
  1671. dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
  1672. MAX_TX_RINGS, MAX_RX_RINGS);
  1673. if (dev == NULL)
  1674. return -ENOMEM;
  1675. netif_set_real_num_tx_queues(dev, prof->tx_ring_num);
  1676. netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
  1677. SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
  1678. dev->dev_id = port - 1;
  1679. /*
  1680. * Initialize driver private data
  1681. */
  1682. priv = netdev_priv(dev);
  1683. memset(priv, 0, sizeof(struct mlx4_en_priv));
  1684. priv->dev = dev;
  1685. priv->mdev = mdev;
  1686. priv->ddev = &mdev->pdev->dev;
  1687. priv->prof = prof;
  1688. priv->port = port;
  1689. priv->port_up = false;
  1690. priv->flags = prof->flags;
  1691. priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
  1692. MLX4_WQE_CTRL_SOLICITED);
  1693. priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
  1694. priv->tx_ring_num = prof->tx_ring_num;
  1695. priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring) * MAX_TX_RINGS,
  1696. GFP_KERNEL);
  1697. if (!priv->tx_ring) {
  1698. err = -ENOMEM;
  1699. goto out;
  1700. }
  1701. priv->tx_cq = kzalloc(sizeof(struct mlx4_en_cq) * MAX_RX_RINGS,
  1702. GFP_KERNEL);
  1703. if (!priv->tx_cq) {
  1704. err = -ENOMEM;
  1705. goto out;
  1706. }
  1707. priv->rx_ring_num = prof->rx_ring_num;
  1708. priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0;
  1709. priv->mac_index = -1;
  1710. priv->msg_enable = MLX4_EN_MSG_LEVEL;
  1711. spin_lock_init(&priv->stats_lock);
  1712. INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode);
  1713. INIT_WORK(&priv->mac_task, mlx4_en_do_set_mac);
  1714. INIT_WORK(&priv->watchdog_task, mlx4_en_restart);
  1715. INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate);
  1716. INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats);
  1717. #ifdef CONFIG_MLX4_EN_DCB
  1718. if (!mlx4_is_slave(priv->mdev->dev))
  1719. dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
  1720. #endif
  1721. for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i)
  1722. INIT_HLIST_HEAD(&priv->mac_hash[i]);
  1723. /* Query for default mac and max mtu */
  1724. priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port];
  1725. /* Set default MAC */
  1726. dev->addr_len = ETH_ALEN;
  1727. mlx4_en_u64_to_mac(dev->dev_addr, mdev->dev->caps.def_mac[priv->port]);
  1728. if (!is_valid_ether_addr(dev->dev_addr)) {
  1729. en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
  1730. priv->port, dev->dev_addr);
  1731. err = -EINVAL;
  1732. goto out;
  1733. }
  1734. memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
  1735. priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
  1736. DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
  1737. err = mlx4_en_alloc_resources(priv);
  1738. if (err)
  1739. goto out;
  1740. #ifdef CONFIG_RFS_ACCEL
  1741. INIT_LIST_HEAD(&priv->filters);
  1742. spin_lock_init(&priv->filters_lock);
  1743. #endif
  1744. /* Allocate page for receive rings */
  1745. err = mlx4_alloc_hwq_res(mdev->dev, &priv->res,
  1746. MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE);
  1747. if (err) {
  1748. en_err(priv, "Failed to allocate page for rx qps\n");
  1749. goto out;
  1750. }
  1751. priv->allocated = 1;
  1752. /*
  1753. * Initialize netdev entry points
  1754. */
  1755. dev->netdev_ops = &mlx4_netdev_ops;
  1756. dev->watchdog_timeo = MLX4_EN_WATCHDOG_TIMEOUT;
  1757. netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
  1758. netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
  1759. SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
  1760. /*
  1761. * Set driver features
  1762. */
  1763. dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
  1764. if (mdev->LSO_support)
  1765. dev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
  1766. dev->vlan_features = dev->hw_features;
  1767. dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_RXHASH;
  1768. dev->features = dev->hw_features | NETIF_F_HIGHDMA |
  1769. NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
  1770. NETIF_F_HW_VLAN_FILTER;
  1771. dev->hw_features |= NETIF_F_LOOPBACK;
  1772. if (mdev->dev->caps.steering_mode ==
  1773. MLX4_STEERING_MODE_DEVICE_MANAGED)
  1774. dev->hw_features |= NETIF_F_NTUPLE;
  1775. if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
  1776. dev->priv_flags |= IFF_UNICAST_FLT;
  1777. mdev->pndev[port] = dev;
  1778. netif_carrier_off(dev);
  1779. err = register_netdev(dev);
  1780. if (err) {
  1781. en_err(priv, "Netdev registration failed for port %d\n", port);
  1782. goto out;
  1783. }
  1784. priv->registered = 1;
  1785. en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num);
  1786. en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num);
  1787. mlx4_en_update_loopback_state(priv->dev, priv->dev->features);
  1788. /* Configure port */
  1789. mlx4_en_calc_rx_buf(dev);
  1790. err = mlx4_SET_PORT_general(mdev->dev, priv->port,
  1791. priv->rx_skb_size + ETH_FCS_LEN,
  1792. prof->tx_pause, prof->tx_ppp,
  1793. prof->rx_pause, prof->rx_ppp);
  1794. if (err) {
  1795. en_err(priv, "Failed setting port general configurations "
  1796. "for port %d, with error %d\n", priv->port, err);
  1797. goto out;
  1798. }
  1799. /* Init port */
  1800. en_warn(priv, "Initializing port\n");
  1801. err = mlx4_INIT_PORT(mdev->dev, priv->port);
  1802. if (err) {
  1803. en_err(priv, "Failed Initializing port\n");
  1804. goto out;
  1805. }
  1806. mlx4_en_set_default_moderation(priv);
  1807. queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
  1808. return 0;
  1809. out:
  1810. mlx4_en_destroy_netdev(dev);
  1811. return err;
  1812. }