mcg.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929
  1. /*
  2. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
  3. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/string.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/mlx4/cmd.h>
  36. #include <linux/export.h>
  37. #include "mlx4.h"
  38. #define MGM_QPN_MASK 0x00FFFFFF
  39. #define MGM_BLCK_LB_BIT 30
  40. static const u8 zero_gid[16]; /* automatically initialized to 0 */
  41. static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
  42. struct mlx4_cmd_mailbox *mailbox)
  43. {
  44. return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
  45. MLX4_CMD_TIME_CLASS_A);
  46. }
  47. static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
  48. struct mlx4_cmd_mailbox *mailbox)
  49. {
  50. return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
  51. MLX4_CMD_TIME_CLASS_A);
  52. }
  53. static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 vep_num, u8 port, u8 steer,
  54. struct mlx4_cmd_mailbox *mailbox)
  55. {
  56. u32 in_mod;
  57. in_mod = (u32) vep_num << 24 | (u32) port << 16 | steer << 1;
  58. return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
  59. MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A);
  60. }
  61. static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  62. u16 *hash, u8 op_mod)
  63. {
  64. u64 imm;
  65. int err;
  66. err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
  67. MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A);
  68. if (!err)
  69. *hash = imm;
  70. return err;
  71. }
  72. static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
  73. enum mlx4_steer_type steer,
  74. u32 qpn)
  75. {
  76. struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
  77. struct mlx4_promisc_qp *pqp;
  78. list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
  79. if (pqp->qpn == qpn)
  80. return pqp;
  81. }
  82. /* not found */
  83. return NULL;
  84. }
  85. /*
  86. * Add new entry to steering data structure.
  87. * All promisc QPs should be added as well
  88. */
  89. static int new_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
  90. enum mlx4_steer_type steer,
  91. unsigned int index, u32 qpn)
  92. {
  93. struct mlx4_steer *s_steer;
  94. struct mlx4_cmd_mailbox *mailbox;
  95. struct mlx4_mgm *mgm;
  96. u32 members_count;
  97. struct mlx4_steer_index *new_entry;
  98. struct mlx4_promisc_qp *pqp;
  99. struct mlx4_promisc_qp *dqp = NULL;
  100. u32 prot;
  101. int err;
  102. u8 pf_num;
  103. pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
  104. s_steer = &mlx4_priv(dev)->steer[pf_num];
  105. new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
  106. if (!new_entry)
  107. return -ENOMEM;
  108. INIT_LIST_HEAD(&new_entry->duplicates);
  109. new_entry->index = index;
  110. list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
  111. /* If the given qpn is also a promisc qp,
  112. * it should be inserted to duplicates list
  113. */
  114. pqp = get_promisc_qp(dev, pf_num, steer, qpn);
  115. if (pqp) {
  116. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  117. if (!dqp) {
  118. err = -ENOMEM;
  119. goto out_alloc;
  120. }
  121. dqp->qpn = qpn;
  122. list_add_tail(&dqp->list, &new_entry->duplicates);
  123. }
  124. /* if no promisc qps for this vep, we are done */
  125. if (list_empty(&s_steer->promisc_qps[steer]))
  126. return 0;
  127. /* now need to add all the promisc qps to the new
  128. * steering entry, as they should also receive the packets
  129. * destined to this address */
  130. mailbox = mlx4_alloc_cmd_mailbox(dev);
  131. if (IS_ERR(mailbox)) {
  132. err = -ENOMEM;
  133. goto out_alloc;
  134. }
  135. mgm = mailbox->buf;
  136. err = mlx4_READ_ENTRY(dev, index, mailbox);
  137. if (err)
  138. goto out_mailbox;
  139. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  140. prot = be32_to_cpu(mgm->members_count) >> 30;
  141. list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
  142. /* don't add already existing qpn */
  143. if (pqp->qpn == qpn)
  144. continue;
  145. if (members_count == MLX4_QP_PER_MGM) {
  146. /* out of space */
  147. err = -ENOMEM;
  148. goto out_mailbox;
  149. }
  150. /* add the qpn */
  151. mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
  152. }
  153. /* update the qps count and update the entry with all the promisc qps*/
  154. mgm->members_count = cpu_to_be32(members_count | (prot << 30));
  155. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  156. out_mailbox:
  157. mlx4_free_cmd_mailbox(dev, mailbox);
  158. if (!err)
  159. return 0;
  160. out_alloc:
  161. if (dqp) {
  162. list_del(&dqp->list);
  163. kfree(dqp);
  164. }
  165. list_del(&new_entry->list);
  166. kfree(new_entry);
  167. return err;
  168. }
  169. /* update the data structures with existing steering entry */
  170. static int existing_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
  171. enum mlx4_steer_type steer,
  172. unsigned int index, u32 qpn)
  173. {
  174. struct mlx4_steer *s_steer;
  175. struct mlx4_steer_index *tmp_entry, *entry = NULL;
  176. struct mlx4_promisc_qp *pqp;
  177. struct mlx4_promisc_qp *dqp;
  178. u8 pf_num;
  179. pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
  180. s_steer = &mlx4_priv(dev)->steer[pf_num];
  181. pqp = get_promisc_qp(dev, pf_num, steer, qpn);
  182. if (!pqp)
  183. return 0; /* nothing to do */
  184. list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
  185. if (tmp_entry->index == index) {
  186. entry = tmp_entry;
  187. break;
  188. }
  189. }
  190. if (unlikely(!entry)) {
  191. mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
  192. return -EINVAL;
  193. }
  194. /* the given qpn is listed as a promisc qpn
  195. * we need to add it as a duplicate to this entry
  196. * for future references */
  197. list_for_each_entry(dqp, &entry->duplicates, list) {
  198. if (qpn == dqp->qpn)
  199. return 0; /* qp is already duplicated */
  200. }
  201. /* add the qp as a duplicate on this index */
  202. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  203. if (!dqp)
  204. return -ENOMEM;
  205. dqp->qpn = qpn;
  206. list_add_tail(&dqp->list, &entry->duplicates);
  207. return 0;
  208. }
  209. /* Check whether a qpn is a duplicate on steering entry
  210. * If so, it should not be removed from mgm */
  211. static bool check_duplicate_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
  212. enum mlx4_steer_type steer,
  213. unsigned int index, u32 qpn)
  214. {
  215. struct mlx4_steer *s_steer;
  216. struct mlx4_steer_index *tmp_entry, *entry = NULL;
  217. struct mlx4_promisc_qp *dqp, *tmp_dqp;
  218. u8 pf_num;
  219. pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
  220. s_steer = &mlx4_priv(dev)->steer[pf_num];
  221. /* if qp is not promisc, it cannot be duplicated */
  222. if (!get_promisc_qp(dev, pf_num, steer, qpn))
  223. return false;
  224. /* The qp is promisc qp so it is a duplicate on this index
  225. * Find the index entry, and remove the duplicate */
  226. list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
  227. if (tmp_entry->index == index) {
  228. entry = tmp_entry;
  229. break;
  230. }
  231. }
  232. if (unlikely(!entry)) {
  233. mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
  234. return false;
  235. }
  236. list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
  237. if (dqp->qpn == qpn) {
  238. list_del(&dqp->list);
  239. kfree(dqp);
  240. }
  241. }
  242. return true;
  243. }
  244. /* I a steering entry contains only promisc QPs, it can be removed. */
  245. static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 vep_num, u8 port,
  246. enum mlx4_steer_type steer,
  247. unsigned int index, u32 tqpn)
  248. {
  249. struct mlx4_steer *s_steer;
  250. struct mlx4_cmd_mailbox *mailbox;
  251. struct mlx4_mgm *mgm;
  252. struct mlx4_steer_index *entry = NULL, *tmp_entry;
  253. u32 qpn;
  254. u32 members_count;
  255. bool ret = false;
  256. int i;
  257. u8 pf_num;
  258. pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
  259. s_steer = &mlx4_priv(dev)->steer[pf_num];
  260. mailbox = mlx4_alloc_cmd_mailbox(dev);
  261. if (IS_ERR(mailbox))
  262. return false;
  263. mgm = mailbox->buf;
  264. if (mlx4_READ_ENTRY(dev, index, mailbox))
  265. goto out;
  266. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  267. for (i = 0; i < members_count; i++) {
  268. qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
  269. if (!get_promisc_qp(dev, pf_num, steer, qpn) && qpn != tqpn) {
  270. /* the qp is not promisc, the entry can't be removed */
  271. goto out;
  272. }
  273. }
  274. /* All the qps currently registered for this entry are promiscuous,
  275. * Checking for duplicates */
  276. ret = true;
  277. list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
  278. if (entry->index == index) {
  279. if (list_empty(&entry->duplicates)) {
  280. list_del(&entry->list);
  281. kfree(entry);
  282. } else {
  283. /* This entry contains duplicates so it shouldn't be removed */
  284. ret = false;
  285. goto out;
  286. }
  287. }
  288. }
  289. out:
  290. mlx4_free_cmd_mailbox(dev, mailbox);
  291. return ret;
  292. }
  293. static int add_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
  294. enum mlx4_steer_type steer, u32 qpn)
  295. {
  296. struct mlx4_steer *s_steer;
  297. struct mlx4_cmd_mailbox *mailbox;
  298. struct mlx4_mgm *mgm;
  299. struct mlx4_steer_index *entry;
  300. struct mlx4_promisc_qp *pqp;
  301. struct mlx4_promisc_qp *dqp;
  302. u32 members_count;
  303. u32 prot;
  304. int i;
  305. bool found;
  306. int last_index;
  307. int err;
  308. u8 pf_num;
  309. struct mlx4_priv *priv = mlx4_priv(dev);
  310. pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
  311. s_steer = &mlx4_priv(dev)->steer[pf_num];
  312. mutex_lock(&priv->mcg_table.mutex);
  313. if (get_promisc_qp(dev, pf_num, steer, qpn)) {
  314. err = 0; /* Noting to do, already exists */
  315. goto out_mutex;
  316. }
  317. pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
  318. if (!pqp) {
  319. err = -ENOMEM;
  320. goto out_mutex;
  321. }
  322. pqp->qpn = qpn;
  323. mailbox = mlx4_alloc_cmd_mailbox(dev);
  324. if (IS_ERR(mailbox)) {
  325. err = -ENOMEM;
  326. goto out_alloc;
  327. }
  328. mgm = mailbox->buf;
  329. /* the promisc qp needs to be added for each one of the steering
  330. * entries, if it already exists, needs to be added as a duplicate
  331. * for this entry */
  332. list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
  333. err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
  334. if (err)
  335. goto out_mailbox;
  336. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  337. prot = be32_to_cpu(mgm->members_count) >> 30;
  338. found = false;
  339. for (i = 0; i < members_count; i++) {
  340. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
  341. /* Entry already exists, add to duplicates */
  342. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  343. if (!dqp)
  344. goto out_mailbox;
  345. dqp->qpn = qpn;
  346. list_add_tail(&dqp->list, &entry->duplicates);
  347. found = true;
  348. }
  349. }
  350. if (!found) {
  351. /* Need to add the qpn to mgm */
  352. if (members_count == MLX4_QP_PER_MGM) {
  353. /* entry is full */
  354. err = -ENOMEM;
  355. goto out_mailbox;
  356. }
  357. mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
  358. mgm->members_count = cpu_to_be32(members_count | (prot << 30));
  359. err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
  360. if (err)
  361. goto out_mailbox;
  362. }
  363. last_index = entry->index;
  364. }
  365. /* add the new qpn to list of promisc qps */
  366. list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
  367. /* now need to add all the promisc qps to default entry */
  368. memset(mgm, 0, sizeof *mgm);
  369. members_count = 0;
  370. list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
  371. mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
  372. mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
  373. err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
  374. if (err)
  375. goto out_list;
  376. mlx4_free_cmd_mailbox(dev, mailbox);
  377. mutex_unlock(&priv->mcg_table.mutex);
  378. return 0;
  379. out_list:
  380. list_del(&pqp->list);
  381. out_mailbox:
  382. mlx4_free_cmd_mailbox(dev, mailbox);
  383. out_alloc:
  384. kfree(pqp);
  385. out_mutex:
  386. mutex_unlock(&priv->mcg_table.mutex);
  387. return err;
  388. }
  389. static int remove_promisc_qp(struct mlx4_dev *dev, u8 vep_num, u8 port,
  390. enum mlx4_steer_type steer, u32 qpn)
  391. {
  392. struct mlx4_priv *priv = mlx4_priv(dev);
  393. struct mlx4_steer *s_steer;
  394. struct mlx4_cmd_mailbox *mailbox;
  395. struct mlx4_mgm *mgm;
  396. struct mlx4_steer_index *entry;
  397. struct mlx4_promisc_qp *pqp;
  398. struct mlx4_promisc_qp *dqp;
  399. u32 members_count;
  400. bool found;
  401. bool back_to_list = false;
  402. int loc, i;
  403. int err;
  404. u8 pf_num;
  405. pf_num = (dev->caps.num_ports == 1) ? vep_num : (vep_num << 1) | (port - 1);
  406. s_steer = &mlx4_priv(dev)->steer[pf_num];
  407. mutex_lock(&priv->mcg_table.mutex);
  408. pqp = get_promisc_qp(dev, pf_num, steer, qpn);
  409. if (unlikely(!pqp)) {
  410. mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
  411. /* nothing to do */
  412. err = 0;
  413. goto out_mutex;
  414. }
  415. /*remove from list of promisc qps */
  416. list_del(&pqp->list);
  417. /* set the default entry not to include the removed one */
  418. mailbox = mlx4_alloc_cmd_mailbox(dev);
  419. if (IS_ERR(mailbox)) {
  420. err = -ENOMEM;
  421. back_to_list = true;
  422. goto out_list;
  423. }
  424. mgm = mailbox->buf;
  425. members_count = 0;
  426. list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
  427. mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
  428. mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
  429. err = mlx4_WRITE_PROMISC(dev, vep_num, port, steer, mailbox);
  430. if (err)
  431. goto out_mailbox;
  432. /* remove the qp from all the steering entries*/
  433. list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
  434. found = false;
  435. list_for_each_entry(dqp, &entry->duplicates, list) {
  436. if (dqp->qpn == qpn) {
  437. found = true;
  438. break;
  439. }
  440. }
  441. if (found) {
  442. /* a duplicate, no need to change the mgm,
  443. * only update the duplicates list */
  444. list_del(&dqp->list);
  445. kfree(dqp);
  446. } else {
  447. err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
  448. if (err)
  449. goto out_mailbox;
  450. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  451. for (loc = -1, i = 0; i < members_count; ++i)
  452. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
  453. loc = i;
  454. mgm->members_count = cpu_to_be32(--members_count |
  455. (MLX4_PROT_ETH << 30));
  456. mgm->qp[loc] = mgm->qp[i - 1];
  457. mgm->qp[i - 1] = 0;
  458. err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
  459. if (err)
  460. goto out_mailbox;
  461. }
  462. }
  463. out_mailbox:
  464. mlx4_free_cmd_mailbox(dev, mailbox);
  465. out_list:
  466. if (back_to_list)
  467. list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
  468. else
  469. kfree(pqp);
  470. out_mutex:
  471. mutex_unlock(&priv->mcg_table.mutex);
  472. return err;
  473. }
  474. /*
  475. * Caller must hold MCG table semaphore. gid and mgm parameters must
  476. * be properly aligned for command interface.
  477. *
  478. * Returns 0 unless a firmware command error occurs.
  479. *
  480. * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
  481. * and *mgm holds MGM entry.
  482. *
  483. * if GID is found in AMGM, *index = index in AMGM, *prev = index of
  484. * previous entry in hash chain and *mgm holds AMGM entry.
  485. *
  486. * If no AMGM exists for given gid, *index = -1, *prev = index of last
  487. * entry in hash chain and *mgm holds end of hash chain.
  488. */
  489. static int find_entry(struct mlx4_dev *dev, u8 port,
  490. u8 *gid, enum mlx4_protocol prot,
  491. enum mlx4_steer_type steer,
  492. struct mlx4_cmd_mailbox *mgm_mailbox,
  493. u16 *hash, int *prev, int *index)
  494. {
  495. struct mlx4_cmd_mailbox *mailbox;
  496. struct mlx4_mgm *mgm = mgm_mailbox->buf;
  497. u8 *mgid;
  498. int err;
  499. u8 op_mod = (prot == MLX4_PROT_ETH) ?
  500. !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
  501. mailbox = mlx4_alloc_cmd_mailbox(dev);
  502. if (IS_ERR(mailbox))
  503. return -ENOMEM;
  504. mgid = mailbox->buf;
  505. memcpy(mgid, gid, 16);
  506. err = mlx4_GID_HASH(dev, mailbox, hash, op_mod);
  507. mlx4_free_cmd_mailbox(dev, mailbox);
  508. if (err)
  509. return err;
  510. if (0)
  511. mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash);
  512. *index = *hash;
  513. *prev = -1;
  514. do {
  515. err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
  516. if (err)
  517. return err;
  518. if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
  519. if (*index != *hash) {
  520. mlx4_err(dev, "Found zero MGID in AMGM.\n");
  521. err = -EINVAL;
  522. }
  523. return err;
  524. }
  525. if (!memcmp(mgm->gid, gid, 16) &&
  526. be32_to_cpu(mgm->members_count) >> 30 == prot)
  527. return err;
  528. *prev = *index;
  529. *index = be32_to_cpu(mgm->next_gid_index) >> 6;
  530. } while (*index);
  531. *index = -1;
  532. return err;
  533. }
  534. int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  535. int block_mcast_loopback, enum mlx4_protocol prot,
  536. enum mlx4_steer_type steer)
  537. {
  538. struct mlx4_priv *priv = mlx4_priv(dev);
  539. struct mlx4_cmd_mailbox *mailbox;
  540. struct mlx4_mgm *mgm;
  541. u32 members_count;
  542. u16 hash;
  543. int index, prev;
  544. int link = 0;
  545. int i;
  546. int err;
  547. u8 port = gid[5];
  548. u8 new_entry = 0;
  549. mailbox = mlx4_alloc_cmd_mailbox(dev);
  550. if (IS_ERR(mailbox))
  551. return PTR_ERR(mailbox);
  552. mgm = mailbox->buf;
  553. mutex_lock(&priv->mcg_table.mutex);
  554. err = find_entry(dev, port, gid, prot, steer,
  555. mailbox, &hash, &prev, &index);
  556. if (err)
  557. goto out;
  558. if (index != -1) {
  559. if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
  560. new_entry = 1;
  561. memcpy(mgm->gid, gid, 16);
  562. }
  563. } else {
  564. link = 1;
  565. index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
  566. if (index == -1) {
  567. mlx4_err(dev, "No AMGM entries left\n");
  568. err = -ENOMEM;
  569. goto out;
  570. }
  571. index += dev->caps.num_mgms;
  572. memset(mgm, 0, sizeof *mgm);
  573. memcpy(mgm->gid, gid, 16);
  574. }
  575. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  576. if (members_count == MLX4_QP_PER_MGM) {
  577. mlx4_err(dev, "MGM at index %x is full.\n", index);
  578. err = -ENOMEM;
  579. goto out;
  580. }
  581. for (i = 0; i < members_count; ++i)
  582. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
  583. mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
  584. err = 0;
  585. goto out;
  586. }
  587. if (block_mcast_loopback)
  588. mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
  589. (1U << MGM_BLCK_LB_BIT));
  590. else
  591. mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
  592. mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
  593. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  594. if (err)
  595. goto out;
  596. if (!link)
  597. goto out;
  598. err = mlx4_READ_ENTRY(dev, prev, mailbox);
  599. if (err)
  600. goto out;
  601. mgm->next_gid_index = cpu_to_be32(index << 6);
  602. err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
  603. if (err)
  604. goto out;
  605. out:
  606. if (prot == MLX4_PROT_ETH) {
  607. /* manage the steering entry for promisc mode */
  608. if (new_entry)
  609. new_steering_entry(dev, 0, port, steer, index, qp->qpn);
  610. else
  611. existing_steering_entry(dev, 0, port, steer,
  612. index, qp->qpn);
  613. }
  614. if (err && link && index != -1) {
  615. if (index < dev->caps.num_mgms)
  616. mlx4_warn(dev, "Got AMGM index %d < %d",
  617. index, dev->caps.num_mgms);
  618. else
  619. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  620. index - dev->caps.num_mgms);
  621. }
  622. mutex_unlock(&priv->mcg_table.mutex);
  623. mlx4_free_cmd_mailbox(dev, mailbox);
  624. return err;
  625. }
  626. int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  627. enum mlx4_protocol prot, enum mlx4_steer_type steer)
  628. {
  629. struct mlx4_priv *priv = mlx4_priv(dev);
  630. struct mlx4_cmd_mailbox *mailbox;
  631. struct mlx4_mgm *mgm;
  632. u32 members_count;
  633. u16 hash;
  634. int prev, index;
  635. int i, loc;
  636. int err;
  637. u8 port = gid[5];
  638. bool removed_entry = false;
  639. mailbox = mlx4_alloc_cmd_mailbox(dev);
  640. if (IS_ERR(mailbox))
  641. return PTR_ERR(mailbox);
  642. mgm = mailbox->buf;
  643. mutex_lock(&priv->mcg_table.mutex);
  644. err = find_entry(dev, port, gid, prot, steer,
  645. mailbox, &hash, &prev, &index);
  646. if (err)
  647. goto out;
  648. if (index == -1) {
  649. mlx4_err(dev, "MGID %pI6 not found\n", gid);
  650. err = -EINVAL;
  651. goto out;
  652. }
  653. /* if this pq is also a promisc qp, it shouldn't be removed */
  654. if (prot == MLX4_PROT_ETH &&
  655. check_duplicate_entry(dev, 0, port, steer, index, qp->qpn))
  656. goto out;
  657. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  658. for (loc = -1, i = 0; i < members_count; ++i)
  659. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
  660. loc = i;
  661. if (loc == -1) {
  662. mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
  663. err = -EINVAL;
  664. goto out;
  665. }
  666. mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
  667. mgm->qp[loc] = mgm->qp[i - 1];
  668. mgm->qp[i - 1] = 0;
  669. if (prot == MLX4_PROT_ETH)
  670. removed_entry = can_remove_steering_entry(dev, 0, port, steer, index, qp->qpn);
  671. if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
  672. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  673. goto out;
  674. }
  675. /* We are going to delete the entry, members count should be 0 */
  676. mgm->members_count = cpu_to_be32((u32) prot << 30);
  677. if (prev == -1) {
  678. /* Remove entry from MGM */
  679. int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
  680. if (amgm_index) {
  681. err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
  682. if (err)
  683. goto out;
  684. } else
  685. memset(mgm->gid, 0, 16);
  686. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  687. if (err)
  688. goto out;
  689. if (amgm_index) {
  690. if (amgm_index < dev->caps.num_mgms)
  691. mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
  692. index, amgm_index, dev->caps.num_mgms);
  693. else
  694. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  695. amgm_index - dev->caps.num_mgms);
  696. }
  697. } else {
  698. /* Remove entry from AMGM */
  699. int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
  700. err = mlx4_READ_ENTRY(dev, prev, mailbox);
  701. if (err)
  702. goto out;
  703. mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
  704. err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
  705. if (err)
  706. goto out;
  707. if (index < dev->caps.num_mgms)
  708. mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
  709. prev, index, dev->caps.num_mgms);
  710. else
  711. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  712. index - dev->caps.num_mgms);
  713. }
  714. out:
  715. mutex_unlock(&priv->mcg_table.mutex);
  716. mlx4_free_cmd_mailbox(dev, mailbox);
  717. return err;
  718. }
  719. int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  720. int block_mcast_loopback, enum mlx4_protocol prot)
  721. {
  722. enum mlx4_steer_type steer;
  723. steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
  724. if (prot == MLX4_PROT_ETH &&
  725. !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  726. return 0;
  727. if (prot == MLX4_PROT_ETH)
  728. gid[7] |= (steer << 1);
  729. return mlx4_qp_attach_common(dev, qp, gid,
  730. block_mcast_loopback, prot,
  731. steer);
  732. }
  733. EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
  734. int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  735. enum mlx4_protocol prot)
  736. {
  737. enum mlx4_steer_type steer;
  738. steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
  739. if (prot == MLX4_PROT_ETH &&
  740. !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  741. return 0;
  742. if (prot == MLX4_PROT_ETH) {
  743. gid[7] |= (steer << 1);
  744. }
  745. return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
  746. }
  747. EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
  748. int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
  749. {
  750. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  751. return 0;
  752. return add_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
  753. }
  754. EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
  755. int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
  756. {
  757. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  758. return 0;
  759. return remove_promisc_qp(dev, 0, port, MLX4_MC_STEER, qpn);
  760. }
  761. EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
  762. int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
  763. {
  764. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  765. return 0;
  766. return add_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
  767. }
  768. EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
  769. int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
  770. {
  771. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  772. return 0;
  773. return remove_promisc_qp(dev, 0, port, MLX4_UC_STEER, qpn);
  774. }
  775. EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
  776. int mlx4_init_mcg_table(struct mlx4_dev *dev)
  777. {
  778. struct mlx4_priv *priv = mlx4_priv(dev);
  779. int err;
  780. err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
  781. dev->caps.num_amgms - 1, 0, 0);
  782. if (err)
  783. return err;
  784. mutex_init(&priv->mcg_table.mutex);
  785. return 0;
  786. }
  787. void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
  788. {
  789. mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
  790. }