mcg.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931
  1. /*
  2. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
  3. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/string.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/mlx4/cmd.h>
  36. #include <linux/export.h>
  37. #include "mlx4.h"
  38. #define MGM_QPN_MASK 0x00FFFFFF
  39. #define MGM_BLCK_LB_BIT 30
  40. static const u8 zero_gid[16]; /* automatically initialized to 0 */
  41. static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
  42. struct mlx4_cmd_mailbox *mailbox)
  43. {
  44. return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
  45. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  46. }
  47. static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
  48. struct mlx4_cmd_mailbox *mailbox)
  49. {
  50. return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
  51. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  52. }
  53. static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
  54. struct mlx4_cmd_mailbox *mailbox)
  55. {
  56. u32 in_mod;
  57. in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
  58. return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
  59. MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
  60. MLX4_CMD_NATIVE);
  61. }
  62. static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  63. u16 *hash, u8 op_mod)
  64. {
  65. u64 imm;
  66. int err;
  67. err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
  68. MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
  69. MLX4_CMD_NATIVE);
  70. if (!err)
  71. *hash = imm;
  72. return err;
  73. }
  74. static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
  75. enum mlx4_steer_type steer,
  76. u32 qpn)
  77. {
  78. struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
  79. struct mlx4_promisc_qp *pqp;
  80. list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
  81. if (pqp->qpn == qpn)
  82. return pqp;
  83. }
  84. /* not found */
  85. return NULL;
  86. }
  87. /*
  88. * Add new entry to steering data structure.
  89. * All promisc QPs should be added as well
  90. */
  91. static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
  92. enum mlx4_steer_type steer,
  93. unsigned int index, u32 qpn)
  94. {
  95. struct mlx4_steer *s_steer;
  96. struct mlx4_cmd_mailbox *mailbox;
  97. struct mlx4_mgm *mgm;
  98. u32 members_count;
  99. struct mlx4_steer_index *new_entry;
  100. struct mlx4_promisc_qp *pqp;
  101. struct mlx4_promisc_qp *dqp = NULL;
  102. u32 prot;
  103. int err;
  104. u8 pf_num;
  105. pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
  106. s_steer = &mlx4_priv(dev)->steer[pf_num];
  107. new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
  108. if (!new_entry)
  109. return -ENOMEM;
  110. INIT_LIST_HEAD(&new_entry->duplicates);
  111. new_entry->index = index;
  112. list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
  113. /* If the given qpn is also a promisc qp,
  114. * it should be inserted to duplicates list
  115. */
  116. pqp = get_promisc_qp(dev, pf_num, steer, qpn);
  117. if (pqp) {
  118. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  119. if (!dqp) {
  120. err = -ENOMEM;
  121. goto out_alloc;
  122. }
  123. dqp->qpn = qpn;
  124. list_add_tail(&dqp->list, &new_entry->duplicates);
  125. }
  126. /* if no promisc qps for this vep, we are done */
  127. if (list_empty(&s_steer->promisc_qps[steer]))
  128. return 0;
  129. /* now need to add all the promisc qps to the new
  130. * steering entry, as they should also receive the packets
  131. * destined to this address */
  132. mailbox = mlx4_alloc_cmd_mailbox(dev);
  133. if (IS_ERR(mailbox)) {
  134. err = -ENOMEM;
  135. goto out_alloc;
  136. }
  137. mgm = mailbox->buf;
  138. err = mlx4_READ_ENTRY(dev, index, mailbox);
  139. if (err)
  140. goto out_mailbox;
  141. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  142. prot = be32_to_cpu(mgm->members_count) >> 30;
  143. list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
  144. /* don't add already existing qpn */
  145. if (pqp->qpn == qpn)
  146. continue;
  147. if (members_count == MLX4_QP_PER_MGM) {
  148. /* out of space */
  149. err = -ENOMEM;
  150. goto out_mailbox;
  151. }
  152. /* add the qpn */
  153. mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
  154. }
  155. /* update the qps count and update the entry with all the promisc qps*/
  156. mgm->members_count = cpu_to_be32(members_count | (prot << 30));
  157. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  158. out_mailbox:
  159. mlx4_free_cmd_mailbox(dev, mailbox);
  160. if (!err)
  161. return 0;
  162. out_alloc:
  163. if (dqp) {
  164. list_del(&dqp->list);
  165. kfree(dqp);
  166. }
  167. list_del(&new_entry->list);
  168. kfree(new_entry);
  169. return err;
  170. }
  171. /* update the data structures with existing steering entry */
  172. static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
  173. enum mlx4_steer_type steer,
  174. unsigned int index, u32 qpn)
  175. {
  176. struct mlx4_steer *s_steer;
  177. struct mlx4_steer_index *tmp_entry, *entry = NULL;
  178. struct mlx4_promisc_qp *pqp;
  179. struct mlx4_promisc_qp *dqp;
  180. u8 pf_num;
  181. pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
  182. s_steer = &mlx4_priv(dev)->steer[pf_num];
  183. pqp = get_promisc_qp(dev, pf_num, steer, qpn);
  184. if (!pqp)
  185. return 0; /* nothing to do */
  186. list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
  187. if (tmp_entry->index == index) {
  188. entry = tmp_entry;
  189. break;
  190. }
  191. }
  192. if (unlikely(!entry)) {
  193. mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
  194. return -EINVAL;
  195. }
  196. /* the given qpn is listed as a promisc qpn
  197. * we need to add it as a duplicate to this entry
  198. * for future references */
  199. list_for_each_entry(dqp, &entry->duplicates, list) {
  200. if (qpn == dqp->qpn)
  201. return 0; /* qp is already duplicated */
  202. }
  203. /* add the qp as a duplicate on this index */
  204. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  205. if (!dqp)
  206. return -ENOMEM;
  207. dqp->qpn = qpn;
  208. list_add_tail(&dqp->list, &entry->duplicates);
  209. return 0;
  210. }
  211. /* Check whether a qpn is a duplicate on steering entry
  212. * If so, it should not be removed from mgm */
  213. static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
  214. enum mlx4_steer_type steer,
  215. unsigned int index, u32 qpn)
  216. {
  217. struct mlx4_steer *s_steer;
  218. struct mlx4_steer_index *tmp_entry, *entry = NULL;
  219. struct mlx4_promisc_qp *dqp, *tmp_dqp;
  220. u8 pf_num;
  221. pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
  222. s_steer = &mlx4_priv(dev)->steer[pf_num];
  223. /* if qp is not promisc, it cannot be duplicated */
  224. if (!get_promisc_qp(dev, pf_num, steer, qpn))
  225. return false;
  226. /* The qp is promisc qp so it is a duplicate on this index
  227. * Find the index entry, and remove the duplicate */
  228. list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
  229. if (tmp_entry->index == index) {
  230. entry = tmp_entry;
  231. break;
  232. }
  233. }
  234. if (unlikely(!entry)) {
  235. mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
  236. return false;
  237. }
  238. list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
  239. if (dqp->qpn == qpn) {
  240. list_del(&dqp->list);
  241. kfree(dqp);
  242. }
  243. }
  244. return true;
  245. }
  246. /* I a steering entry contains only promisc QPs, it can be removed. */
  247. static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
  248. enum mlx4_steer_type steer,
  249. unsigned int index, u32 tqpn)
  250. {
  251. struct mlx4_steer *s_steer;
  252. struct mlx4_cmd_mailbox *mailbox;
  253. struct mlx4_mgm *mgm;
  254. struct mlx4_steer_index *entry = NULL, *tmp_entry;
  255. u32 qpn;
  256. u32 members_count;
  257. bool ret = false;
  258. int i;
  259. u8 pf_num;
  260. pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
  261. s_steer = &mlx4_priv(dev)->steer[pf_num];
  262. mailbox = mlx4_alloc_cmd_mailbox(dev);
  263. if (IS_ERR(mailbox))
  264. return false;
  265. mgm = mailbox->buf;
  266. if (mlx4_READ_ENTRY(dev, index, mailbox))
  267. goto out;
  268. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  269. for (i = 0; i < members_count; i++) {
  270. qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
  271. if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
  272. /* the qp is not promisc, the entry can't be removed */
  273. goto out;
  274. }
  275. }
  276. /* All the qps currently registered for this entry are promiscuous,
  277. * Checking for duplicates */
  278. ret = true;
  279. list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
  280. if (entry->index == index) {
  281. if (list_empty(&entry->duplicates)) {
  282. list_del(&entry->list);
  283. kfree(entry);
  284. } else {
  285. /* This entry contains duplicates so it shouldn't be removed */
  286. ret = false;
  287. goto out;
  288. }
  289. }
  290. }
  291. out:
  292. mlx4_free_cmd_mailbox(dev, mailbox);
  293. return ret;
  294. }
  295. static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
  296. enum mlx4_steer_type steer, u32 qpn)
  297. {
  298. struct mlx4_steer *s_steer;
  299. struct mlx4_cmd_mailbox *mailbox;
  300. struct mlx4_mgm *mgm;
  301. struct mlx4_steer_index *entry;
  302. struct mlx4_promisc_qp *pqp;
  303. struct mlx4_promisc_qp *dqp;
  304. u32 members_count;
  305. u32 prot;
  306. int i;
  307. bool found;
  308. int last_index;
  309. int err;
  310. u8 pf_num;
  311. struct mlx4_priv *priv = mlx4_priv(dev);
  312. pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
  313. s_steer = &mlx4_priv(dev)->steer[pf_num];
  314. mutex_lock(&priv->mcg_table.mutex);
  315. if (get_promisc_qp(dev, pf_num, steer, qpn)) {
  316. err = 0; /* Noting to do, already exists */
  317. goto out_mutex;
  318. }
  319. pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
  320. if (!pqp) {
  321. err = -ENOMEM;
  322. goto out_mutex;
  323. }
  324. pqp->qpn = qpn;
  325. mailbox = mlx4_alloc_cmd_mailbox(dev);
  326. if (IS_ERR(mailbox)) {
  327. err = -ENOMEM;
  328. goto out_alloc;
  329. }
  330. mgm = mailbox->buf;
  331. /* the promisc qp needs to be added for each one of the steering
  332. * entries, if it already exists, needs to be added as a duplicate
  333. * for this entry */
  334. list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
  335. err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
  336. if (err)
  337. goto out_mailbox;
  338. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  339. prot = be32_to_cpu(mgm->members_count) >> 30;
  340. found = false;
  341. for (i = 0; i < members_count; i++) {
  342. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
  343. /* Entry already exists, add to duplicates */
  344. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  345. if (!dqp)
  346. goto out_mailbox;
  347. dqp->qpn = qpn;
  348. list_add_tail(&dqp->list, &entry->duplicates);
  349. found = true;
  350. }
  351. }
  352. if (!found) {
  353. /* Need to add the qpn to mgm */
  354. if (members_count == MLX4_QP_PER_MGM) {
  355. /* entry is full */
  356. err = -ENOMEM;
  357. goto out_mailbox;
  358. }
  359. mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
  360. mgm->members_count = cpu_to_be32(members_count | (prot << 30));
  361. err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
  362. if (err)
  363. goto out_mailbox;
  364. }
  365. last_index = entry->index;
  366. }
  367. /* add the new qpn to list of promisc qps */
  368. list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
  369. /* now need to add all the promisc qps to default entry */
  370. memset(mgm, 0, sizeof *mgm);
  371. members_count = 0;
  372. list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
  373. mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
  374. mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
  375. err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
  376. if (err)
  377. goto out_list;
  378. mlx4_free_cmd_mailbox(dev, mailbox);
  379. mutex_unlock(&priv->mcg_table.mutex);
  380. return 0;
  381. out_list:
  382. list_del(&pqp->list);
  383. out_mailbox:
  384. mlx4_free_cmd_mailbox(dev, mailbox);
  385. out_alloc:
  386. kfree(pqp);
  387. out_mutex:
  388. mutex_unlock(&priv->mcg_table.mutex);
  389. return err;
  390. }
  391. static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
  392. enum mlx4_steer_type steer, u32 qpn)
  393. {
  394. struct mlx4_priv *priv = mlx4_priv(dev);
  395. struct mlx4_steer *s_steer;
  396. struct mlx4_cmd_mailbox *mailbox;
  397. struct mlx4_mgm *mgm;
  398. struct mlx4_steer_index *entry;
  399. struct mlx4_promisc_qp *pqp;
  400. struct mlx4_promisc_qp *dqp;
  401. u32 members_count;
  402. bool found;
  403. bool back_to_list = false;
  404. int loc, i;
  405. int err;
  406. u8 pf_num;
  407. pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
  408. s_steer = &mlx4_priv(dev)->steer[pf_num];
  409. mutex_lock(&priv->mcg_table.mutex);
  410. pqp = get_promisc_qp(dev, pf_num, steer, qpn);
  411. if (unlikely(!pqp)) {
  412. mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
  413. /* nothing to do */
  414. err = 0;
  415. goto out_mutex;
  416. }
  417. /*remove from list of promisc qps */
  418. list_del(&pqp->list);
  419. /* set the default entry not to include the removed one */
  420. mailbox = mlx4_alloc_cmd_mailbox(dev);
  421. if (IS_ERR(mailbox)) {
  422. err = -ENOMEM;
  423. back_to_list = true;
  424. goto out_list;
  425. }
  426. mgm = mailbox->buf;
  427. members_count = 0;
  428. list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
  429. mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
  430. mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
  431. err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
  432. if (err)
  433. goto out_mailbox;
  434. /* remove the qp from all the steering entries*/
  435. list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
  436. found = false;
  437. list_for_each_entry(dqp, &entry->duplicates, list) {
  438. if (dqp->qpn == qpn) {
  439. found = true;
  440. break;
  441. }
  442. }
  443. if (found) {
  444. /* a duplicate, no need to change the mgm,
  445. * only update the duplicates list */
  446. list_del(&dqp->list);
  447. kfree(dqp);
  448. } else {
  449. err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
  450. if (err)
  451. goto out_mailbox;
  452. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  453. for (loc = -1, i = 0; i < members_count; ++i)
  454. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
  455. loc = i;
  456. mgm->members_count = cpu_to_be32(--members_count |
  457. (MLX4_PROT_ETH << 30));
  458. mgm->qp[loc] = mgm->qp[i - 1];
  459. mgm->qp[i - 1] = 0;
  460. err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
  461. if (err)
  462. goto out_mailbox;
  463. }
  464. }
  465. out_mailbox:
  466. mlx4_free_cmd_mailbox(dev, mailbox);
  467. out_list:
  468. if (back_to_list)
  469. list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
  470. else
  471. kfree(pqp);
  472. out_mutex:
  473. mutex_unlock(&priv->mcg_table.mutex);
  474. return err;
  475. }
  476. /*
  477. * Caller must hold MCG table semaphore. gid and mgm parameters must
  478. * be properly aligned for command interface.
  479. *
  480. * Returns 0 unless a firmware command error occurs.
  481. *
  482. * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
  483. * and *mgm holds MGM entry.
  484. *
  485. * if GID is found in AMGM, *index = index in AMGM, *prev = index of
  486. * previous entry in hash chain and *mgm holds AMGM entry.
  487. *
  488. * If no AMGM exists for given gid, *index = -1, *prev = index of last
  489. * entry in hash chain and *mgm holds end of hash chain.
  490. */
  491. static int find_entry(struct mlx4_dev *dev, u8 port,
  492. u8 *gid, enum mlx4_protocol prot,
  493. enum mlx4_steer_type steer,
  494. struct mlx4_cmd_mailbox *mgm_mailbox,
  495. u16 *hash, int *prev, int *index)
  496. {
  497. struct mlx4_cmd_mailbox *mailbox;
  498. struct mlx4_mgm *mgm = mgm_mailbox->buf;
  499. u8 *mgid;
  500. int err;
  501. u8 op_mod = (prot == MLX4_PROT_ETH) ?
  502. !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
  503. mailbox = mlx4_alloc_cmd_mailbox(dev);
  504. if (IS_ERR(mailbox))
  505. return -ENOMEM;
  506. mgid = mailbox->buf;
  507. memcpy(mgid, gid, 16);
  508. err = mlx4_GID_HASH(dev, mailbox, hash, op_mod);
  509. mlx4_free_cmd_mailbox(dev, mailbox);
  510. if (err)
  511. return err;
  512. if (0)
  513. mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash);
  514. *index = *hash;
  515. *prev = -1;
  516. do {
  517. err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
  518. if (err)
  519. return err;
  520. if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
  521. if (*index != *hash) {
  522. mlx4_err(dev, "Found zero MGID in AMGM.\n");
  523. err = -EINVAL;
  524. }
  525. return err;
  526. }
  527. if (!memcmp(mgm->gid, gid, 16) &&
  528. be32_to_cpu(mgm->members_count) >> 30 == prot)
  529. return err;
  530. *prev = *index;
  531. *index = be32_to_cpu(mgm->next_gid_index) >> 6;
  532. } while (*index);
  533. *index = -1;
  534. return err;
  535. }
  536. int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  537. int block_mcast_loopback, enum mlx4_protocol prot,
  538. enum mlx4_steer_type steer)
  539. {
  540. struct mlx4_priv *priv = mlx4_priv(dev);
  541. struct mlx4_cmd_mailbox *mailbox;
  542. struct mlx4_mgm *mgm;
  543. u32 members_count;
  544. u16 hash;
  545. int index, prev;
  546. int link = 0;
  547. int i;
  548. int err;
  549. u8 port = gid[5];
  550. u8 new_entry = 0;
  551. mailbox = mlx4_alloc_cmd_mailbox(dev);
  552. if (IS_ERR(mailbox))
  553. return PTR_ERR(mailbox);
  554. mgm = mailbox->buf;
  555. mutex_lock(&priv->mcg_table.mutex);
  556. err = find_entry(dev, port, gid, prot, steer,
  557. mailbox, &hash, &prev, &index);
  558. if (err)
  559. goto out;
  560. if (index != -1) {
  561. if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
  562. new_entry = 1;
  563. memcpy(mgm->gid, gid, 16);
  564. }
  565. } else {
  566. link = 1;
  567. index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
  568. if (index == -1) {
  569. mlx4_err(dev, "No AMGM entries left\n");
  570. err = -ENOMEM;
  571. goto out;
  572. }
  573. index += dev->caps.num_mgms;
  574. memset(mgm, 0, sizeof *mgm);
  575. memcpy(mgm->gid, gid, 16);
  576. }
  577. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  578. if (members_count == MLX4_QP_PER_MGM) {
  579. mlx4_err(dev, "MGM at index %x is full.\n", index);
  580. err = -ENOMEM;
  581. goto out;
  582. }
  583. for (i = 0; i < members_count; ++i)
  584. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
  585. mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
  586. err = 0;
  587. goto out;
  588. }
  589. if (block_mcast_loopback)
  590. mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
  591. (1U << MGM_BLCK_LB_BIT));
  592. else
  593. mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
  594. mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
  595. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  596. if (err)
  597. goto out;
  598. if (!link)
  599. goto out;
  600. err = mlx4_READ_ENTRY(dev, prev, mailbox);
  601. if (err)
  602. goto out;
  603. mgm->next_gid_index = cpu_to_be32(index << 6);
  604. err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
  605. if (err)
  606. goto out;
  607. out:
  608. if (prot == MLX4_PROT_ETH) {
  609. /* manage the steering entry for promisc mode */
  610. if (new_entry)
  611. new_steering_entry(dev, 0, port, steer, index, qp->qpn);
  612. else
  613. existing_steering_entry(dev, 0, port, steer,
  614. index, qp->qpn);
  615. }
  616. if (err && link && index != -1) {
  617. if (index < dev->caps.num_mgms)
  618. mlx4_warn(dev, "Got AMGM index %d < %d",
  619. index, dev->caps.num_mgms);
  620. else
  621. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  622. index - dev->caps.num_mgms);
  623. }
  624. mutex_unlock(&priv->mcg_table.mutex);
  625. mlx4_free_cmd_mailbox(dev, mailbox);
  626. return err;
  627. }
  628. int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  629. enum mlx4_protocol prot, enum mlx4_steer_type steer)
  630. {
  631. struct mlx4_priv *priv = mlx4_priv(dev);
  632. struct mlx4_cmd_mailbox *mailbox;
  633. struct mlx4_mgm *mgm;
  634. u32 members_count;
  635. u16 hash;
  636. int prev, index;
  637. int i, loc;
  638. int err;
  639. u8 port = gid[5];
  640. bool removed_entry = false;
  641. mailbox = mlx4_alloc_cmd_mailbox(dev);
  642. if (IS_ERR(mailbox))
  643. return PTR_ERR(mailbox);
  644. mgm = mailbox->buf;
  645. mutex_lock(&priv->mcg_table.mutex);
  646. err = find_entry(dev, port, gid, prot, steer,
  647. mailbox, &hash, &prev, &index);
  648. if (err)
  649. goto out;
  650. if (index == -1) {
  651. mlx4_err(dev, "MGID %pI6 not found\n", gid);
  652. err = -EINVAL;
  653. goto out;
  654. }
  655. /* if this pq is also a promisc qp, it shouldn't be removed */
  656. if (prot == MLX4_PROT_ETH &&
  657. check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
  658. goto out;
  659. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  660. for (loc = -1, i = 0; i < members_count; ++i)
  661. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
  662. loc = i;
  663. if (loc == -1) {
  664. mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
  665. err = -EINVAL;
  666. goto out;
  667. }
  668. mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
  669. mgm->qp[loc] = mgm->qp[i - 1];
  670. mgm->qp[i - 1] = 0;
  671. if (prot == MLX4_PROT_ETH)
  672. removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
  673. if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
  674. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  675. goto out;
  676. }
  677. /* We are going to delete the entry, members count should be 0 */
  678. mgm->members_count = cpu_to_be32((u32) prot << 30);
  679. if (prev == -1) {
  680. /* Remove entry from MGM */
  681. int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
  682. if (amgm_index) {
  683. err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
  684. if (err)
  685. goto out;
  686. } else
  687. memset(mgm->gid, 0, 16);
  688. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  689. if (err)
  690. goto out;
  691. if (amgm_index) {
  692. if (amgm_index < dev->caps.num_mgms)
  693. mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
  694. index, amgm_index, dev->caps.num_mgms);
  695. else
  696. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  697. amgm_index - dev->caps.num_mgms);
  698. }
  699. } else {
  700. /* Remove entry from AMGM */
  701. int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
  702. err = mlx4_READ_ENTRY(dev, prev, mailbox);
  703. if (err)
  704. goto out;
  705. mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
  706. err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
  707. if (err)
  708. goto out;
  709. if (index < dev->caps.num_mgms)
  710. mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
  711. prev, index, dev->caps.num_mgms);
  712. else
  713. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  714. index - dev->caps.num_mgms);
  715. }
  716. out:
  717. mutex_unlock(&priv->mcg_table.mutex);
  718. mlx4_free_cmd_mailbox(dev, mailbox);
  719. return err;
  720. }
  721. int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  722. int block_mcast_loopback, enum mlx4_protocol prot)
  723. {
  724. enum mlx4_steer_type steer;
  725. steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
  726. if (prot == MLX4_PROT_ETH &&
  727. !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  728. return 0;
  729. if (prot == MLX4_PROT_ETH)
  730. gid[7] |= (steer << 1);
  731. return mlx4_qp_attach_common(dev, qp, gid,
  732. block_mcast_loopback, prot,
  733. steer);
  734. }
  735. EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
  736. int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  737. enum mlx4_protocol prot)
  738. {
  739. enum mlx4_steer_type steer;
  740. steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
  741. if (prot == MLX4_PROT_ETH &&
  742. !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  743. return 0;
  744. if (prot == MLX4_PROT_ETH) {
  745. gid[7] |= (steer << 1);
  746. }
  747. return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
  748. }
  749. EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
  750. int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
  751. {
  752. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  753. return 0;
  754. return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
  755. }
  756. EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
  757. int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
  758. {
  759. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  760. return 0;
  761. return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
  762. }
  763. EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
  764. int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
  765. {
  766. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  767. return 0;
  768. return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
  769. }
  770. EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
  771. int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
  772. {
  773. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  774. return 0;
  775. return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
  776. }
  777. EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
  778. int mlx4_init_mcg_table(struct mlx4_dev *dev)
  779. {
  780. struct mlx4_priv *priv = mlx4_priv(dev);
  781. int err;
  782. err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
  783. dev->caps.num_amgms - 1, 0, 0);
  784. if (err)
  785. return err;
  786. mutex_init(&priv->mcg_table.mutex);
  787. return 0;
  788. }
  789. void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
  790. {
  791. mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
  792. }