mcg.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047
  1. /*
  2. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
  3. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/string.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/mlx4/cmd.h>
  36. #include <linux/export.h>
  37. #include "mlx4.h"
  38. #define MGM_QPN_MASK 0x00FFFFFF
  39. #define MGM_BLCK_LB_BIT 30
  40. static const u8 zero_gid[16]; /* automatically initialized to 0 */
  41. struct mlx4_mgm {
  42. __be32 next_gid_index;
  43. __be32 members_count;
  44. u32 reserved[2];
  45. u8 gid[16];
  46. __be32 qp[MLX4_MAX_QP_PER_MGM];
  47. };
  48. int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
  49. {
  50. return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
  51. }
  52. int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
  53. {
  54. return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
  55. }
  56. static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
  57. struct mlx4_cmd_mailbox *mailbox)
  58. {
  59. return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
  60. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  61. }
  62. static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
  63. struct mlx4_cmd_mailbox *mailbox)
  64. {
  65. return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
  66. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  67. }
  68. static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
  69. struct mlx4_cmd_mailbox *mailbox)
  70. {
  71. u32 in_mod;
  72. in_mod = (u32) port << 16 | steer << 1;
  73. return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
  74. MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
  75. MLX4_CMD_NATIVE);
  76. }
  77. static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  78. u16 *hash, u8 op_mod)
  79. {
  80. u64 imm;
  81. int err;
  82. err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
  83. MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
  84. MLX4_CMD_NATIVE);
  85. if (!err)
  86. *hash = imm;
  87. return err;
  88. }
  89. static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
  90. enum mlx4_steer_type steer,
  91. u32 qpn)
  92. {
  93. struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
  94. struct mlx4_promisc_qp *pqp;
  95. list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
  96. if (pqp->qpn == qpn)
  97. return pqp;
  98. }
  99. /* not found */
  100. return NULL;
  101. }
  102. /*
  103. * Add new entry to steering data structure.
  104. * All promisc QPs should be added as well
  105. */
  106. static int new_steering_entry(struct mlx4_dev *dev, u8 port,
  107. enum mlx4_steer_type steer,
  108. unsigned int index, u32 qpn)
  109. {
  110. struct mlx4_steer *s_steer;
  111. struct mlx4_cmd_mailbox *mailbox;
  112. struct mlx4_mgm *mgm;
  113. u32 members_count;
  114. struct mlx4_steer_index *new_entry;
  115. struct mlx4_promisc_qp *pqp;
  116. struct mlx4_promisc_qp *dqp = NULL;
  117. u32 prot;
  118. int err;
  119. s_steer = &mlx4_priv(dev)->steer[0];
  120. new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
  121. if (!new_entry)
  122. return -ENOMEM;
  123. INIT_LIST_HEAD(&new_entry->duplicates);
  124. new_entry->index = index;
  125. list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
  126. /* If the given qpn is also a promisc qp,
  127. * it should be inserted to duplicates list
  128. */
  129. pqp = get_promisc_qp(dev, 0, steer, qpn);
  130. if (pqp) {
  131. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  132. if (!dqp) {
  133. err = -ENOMEM;
  134. goto out_alloc;
  135. }
  136. dqp->qpn = qpn;
  137. list_add_tail(&dqp->list, &new_entry->duplicates);
  138. }
  139. /* if no promisc qps for this vep, we are done */
  140. if (list_empty(&s_steer->promisc_qps[steer]))
  141. return 0;
  142. /* now need to add all the promisc qps to the new
  143. * steering entry, as they should also receive the packets
  144. * destined to this address */
  145. mailbox = mlx4_alloc_cmd_mailbox(dev);
  146. if (IS_ERR(mailbox)) {
  147. err = -ENOMEM;
  148. goto out_alloc;
  149. }
  150. mgm = mailbox->buf;
  151. err = mlx4_READ_ENTRY(dev, index, mailbox);
  152. if (err)
  153. goto out_mailbox;
  154. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  155. prot = be32_to_cpu(mgm->members_count) >> 30;
  156. list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
  157. /* don't add already existing qpn */
  158. if (pqp->qpn == qpn)
  159. continue;
  160. if (members_count == dev->caps.num_qp_per_mgm) {
  161. /* out of space */
  162. err = -ENOMEM;
  163. goto out_mailbox;
  164. }
  165. /* add the qpn */
  166. mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
  167. }
  168. /* update the qps count and update the entry with all the promisc qps*/
  169. mgm->members_count = cpu_to_be32(members_count | (prot << 30));
  170. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  171. out_mailbox:
  172. mlx4_free_cmd_mailbox(dev, mailbox);
  173. if (!err)
  174. return 0;
  175. out_alloc:
  176. if (dqp) {
  177. list_del(&dqp->list);
  178. kfree(dqp);
  179. }
  180. list_del(&new_entry->list);
  181. kfree(new_entry);
  182. return err;
  183. }
  184. /* update the data structures with existing steering entry */
  185. static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
  186. enum mlx4_steer_type steer,
  187. unsigned int index, u32 qpn)
  188. {
  189. struct mlx4_steer *s_steer;
  190. struct mlx4_steer_index *tmp_entry, *entry = NULL;
  191. struct mlx4_promisc_qp *pqp;
  192. struct mlx4_promisc_qp *dqp;
  193. s_steer = &mlx4_priv(dev)->steer[0];
  194. pqp = get_promisc_qp(dev, 0, steer, qpn);
  195. if (!pqp)
  196. return 0; /* nothing to do */
  197. list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
  198. if (tmp_entry->index == index) {
  199. entry = tmp_entry;
  200. break;
  201. }
  202. }
  203. if (unlikely(!entry)) {
  204. mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
  205. return -EINVAL;
  206. }
  207. /* the given qpn is listed as a promisc qpn
  208. * we need to add it as a duplicate to this entry
  209. * for future references */
  210. list_for_each_entry(dqp, &entry->duplicates, list) {
  211. if (qpn == pqp->qpn)
  212. return 0; /* qp is already duplicated */
  213. }
  214. /* add the qp as a duplicate on this index */
  215. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  216. if (!dqp)
  217. return -ENOMEM;
  218. dqp->qpn = qpn;
  219. list_add_tail(&dqp->list, &entry->duplicates);
  220. return 0;
  221. }
  222. /* Check whether a qpn is a duplicate on steering entry
  223. * If so, it should not be removed from mgm */
  224. static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
  225. enum mlx4_steer_type steer,
  226. unsigned int index, u32 qpn)
  227. {
  228. struct mlx4_steer *s_steer;
  229. struct mlx4_steer_index *tmp_entry, *entry = NULL;
  230. struct mlx4_promisc_qp *dqp, *tmp_dqp;
  231. s_steer = &mlx4_priv(dev)->steer[0];
  232. /* if qp is not promisc, it cannot be duplicated */
  233. if (!get_promisc_qp(dev, 0, steer, qpn))
  234. return false;
  235. /* The qp is promisc qp so it is a duplicate on this index
  236. * Find the index entry, and remove the duplicate */
  237. list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
  238. if (tmp_entry->index == index) {
  239. entry = tmp_entry;
  240. break;
  241. }
  242. }
  243. if (unlikely(!entry)) {
  244. mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
  245. return false;
  246. }
  247. list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
  248. if (dqp->qpn == qpn) {
  249. list_del(&dqp->list);
  250. kfree(dqp);
  251. }
  252. }
  253. return true;
  254. }
  255. /* I a steering entry contains only promisc QPs, it can be removed. */
  256. static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
  257. enum mlx4_steer_type steer,
  258. unsigned int index, u32 tqpn)
  259. {
  260. struct mlx4_steer *s_steer;
  261. struct mlx4_cmd_mailbox *mailbox;
  262. struct mlx4_mgm *mgm;
  263. struct mlx4_steer_index *entry = NULL, *tmp_entry;
  264. u32 qpn;
  265. u32 members_count;
  266. bool ret = false;
  267. int i;
  268. s_steer = &mlx4_priv(dev)->steer[0];
  269. mailbox = mlx4_alloc_cmd_mailbox(dev);
  270. if (IS_ERR(mailbox))
  271. return false;
  272. mgm = mailbox->buf;
  273. if (mlx4_READ_ENTRY(dev, index, mailbox))
  274. goto out;
  275. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  276. for (i = 0; i < members_count; i++) {
  277. qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
  278. if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
  279. /* the qp is not promisc, the entry can't be removed */
  280. goto out;
  281. }
  282. }
  283. /* All the qps currently registered for this entry are promiscuous,
  284. * Checking for duplicates */
  285. ret = true;
  286. list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
  287. if (entry->index == index) {
  288. if (list_empty(&entry->duplicates)) {
  289. list_del(&entry->list);
  290. kfree(entry);
  291. } else {
  292. /* This entry contains duplicates so it shouldn't be removed */
  293. ret = false;
  294. goto out;
  295. }
  296. }
  297. }
  298. out:
  299. mlx4_free_cmd_mailbox(dev, mailbox);
  300. return ret;
  301. }
  302. static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
  303. enum mlx4_steer_type steer, u32 qpn)
  304. {
  305. struct mlx4_steer *s_steer;
  306. struct mlx4_cmd_mailbox *mailbox;
  307. struct mlx4_mgm *mgm;
  308. struct mlx4_steer_index *entry;
  309. struct mlx4_promisc_qp *pqp;
  310. struct mlx4_promisc_qp *dqp;
  311. u32 members_count;
  312. u32 prot;
  313. int i;
  314. bool found;
  315. int last_index;
  316. int err;
  317. struct mlx4_priv *priv = mlx4_priv(dev);
  318. s_steer = &mlx4_priv(dev)->steer[0];
  319. mutex_lock(&priv->mcg_table.mutex);
  320. if (get_promisc_qp(dev, 0, steer, qpn)) {
  321. err = 0; /* Noting to do, already exists */
  322. goto out_mutex;
  323. }
  324. pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
  325. if (!pqp) {
  326. err = -ENOMEM;
  327. goto out_mutex;
  328. }
  329. pqp->qpn = qpn;
  330. mailbox = mlx4_alloc_cmd_mailbox(dev);
  331. if (IS_ERR(mailbox)) {
  332. err = -ENOMEM;
  333. goto out_alloc;
  334. }
  335. mgm = mailbox->buf;
  336. /* the promisc qp needs to be added for each one of the steering
  337. * entries, if it already exists, needs to be added as a duplicate
  338. * for this entry */
  339. list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
  340. err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
  341. if (err)
  342. goto out_mailbox;
  343. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  344. prot = be32_to_cpu(mgm->members_count) >> 30;
  345. found = false;
  346. for (i = 0; i < members_count; i++) {
  347. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
  348. /* Entry already exists, add to duplicates */
  349. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  350. if (!dqp)
  351. goto out_mailbox;
  352. dqp->qpn = qpn;
  353. list_add_tail(&dqp->list, &entry->duplicates);
  354. found = true;
  355. }
  356. }
  357. if (!found) {
  358. /* Need to add the qpn to mgm */
  359. if (members_count == dev->caps.num_qp_per_mgm) {
  360. /* entry is full */
  361. err = -ENOMEM;
  362. goto out_mailbox;
  363. }
  364. mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
  365. mgm->members_count = cpu_to_be32(members_count | (prot << 30));
  366. err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
  367. if (err)
  368. goto out_mailbox;
  369. }
  370. last_index = entry->index;
  371. }
  372. /* add the new qpn to list of promisc qps */
  373. list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
  374. /* now need to add all the promisc qps to default entry */
  375. memset(mgm, 0, sizeof *mgm);
  376. members_count = 0;
  377. list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
  378. mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
  379. mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
  380. err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
  381. if (err)
  382. goto out_list;
  383. mlx4_free_cmd_mailbox(dev, mailbox);
  384. mutex_unlock(&priv->mcg_table.mutex);
  385. return 0;
  386. out_list:
  387. list_del(&pqp->list);
  388. out_mailbox:
  389. mlx4_free_cmd_mailbox(dev, mailbox);
  390. out_alloc:
  391. kfree(pqp);
  392. out_mutex:
  393. mutex_unlock(&priv->mcg_table.mutex);
  394. return err;
  395. }
  396. static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
  397. enum mlx4_steer_type steer, u32 qpn)
  398. {
  399. struct mlx4_priv *priv = mlx4_priv(dev);
  400. struct mlx4_steer *s_steer;
  401. struct mlx4_cmd_mailbox *mailbox;
  402. struct mlx4_mgm *mgm;
  403. struct mlx4_steer_index *entry;
  404. struct mlx4_promisc_qp *pqp;
  405. struct mlx4_promisc_qp *dqp;
  406. u32 members_count;
  407. bool found;
  408. bool back_to_list = false;
  409. int loc, i;
  410. int err;
  411. s_steer = &mlx4_priv(dev)->steer[0];
  412. mutex_lock(&priv->mcg_table.mutex);
  413. pqp = get_promisc_qp(dev, 0, steer, qpn);
  414. if (unlikely(!pqp)) {
  415. mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
  416. /* nothing to do */
  417. err = 0;
  418. goto out_mutex;
  419. }
  420. /*remove from list of promisc qps */
  421. list_del(&pqp->list);
  422. /* set the default entry not to include the removed one */
  423. mailbox = mlx4_alloc_cmd_mailbox(dev);
  424. if (IS_ERR(mailbox)) {
  425. err = -ENOMEM;
  426. back_to_list = true;
  427. goto out_list;
  428. }
  429. mgm = mailbox->buf;
  430. memset(mgm, 0, sizeof *mgm);
  431. members_count = 0;
  432. list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
  433. mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
  434. mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
  435. err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
  436. if (err)
  437. goto out_mailbox;
  438. /* remove the qp from all the steering entries*/
  439. list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
  440. found = false;
  441. list_for_each_entry(dqp, &entry->duplicates, list) {
  442. if (dqp->qpn == qpn) {
  443. found = true;
  444. break;
  445. }
  446. }
  447. if (found) {
  448. /* a duplicate, no need to change the mgm,
  449. * only update the duplicates list */
  450. list_del(&dqp->list);
  451. kfree(dqp);
  452. } else {
  453. err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
  454. if (err)
  455. goto out_mailbox;
  456. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  457. for (loc = -1, i = 0; i < members_count; ++i)
  458. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
  459. loc = i;
  460. mgm->members_count = cpu_to_be32(--members_count |
  461. (MLX4_PROT_ETH << 30));
  462. mgm->qp[loc] = mgm->qp[i - 1];
  463. mgm->qp[i - 1] = 0;
  464. err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
  465. if (err)
  466. goto out_mailbox;
  467. }
  468. }
  469. out_mailbox:
  470. mlx4_free_cmd_mailbox(dev, mailbox);
  471. out_list:
  472. if (back_to_list)
  473. list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
  474. else
  475. kfree(pqp);
  476. out_mutex:
  477. mutex_unlock(&priv->mcg_table.mutex);
  478. return err;
  479. }
  480. /*
  481. * Caller must hold MCG table semaphore. gid and mgm parameters must
  482. * be properly aligned for command interface.
  483. *
  484. * Returns 0 unless a firmware command error occurs.
  485. *
  486. * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
  487. * and *mgm holds MGM entry.
  488. *
  489. * if GID is found in AMGM, *index = index in AMGM, *prev = index of
  490. * previous entry in hash chain and *mgm holds AMGM entry.
  491. *
  492. * If no AMGM exists for given gid, *index = -1, *prev = index of last
  493. * entry in hash chain and *mgm holds end of hash chain.
  494. */
  495. static int find_entry(struct mlx4_dev *dev, u8 port,
  496. u8 *gid, enum mlx4_protocol prot,
  497. enum mlx4_steer_type steer,
  498. struct mlx4_cmd_mailbox *mgm_mailbox,
  499. u16 *hash, int *prev, int *index)
  500. {
  501. struct mlx4_cmd_mailbox *mailbox;
  502. struct mlx4_mgm *mgm = mgm_mailbox->buf;
  503. u8 *mgid;
  504. int err;
  505. u8 op_mod = (prot == MLX4_PROT_ETH) ?
  506. !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
  507. mailbox = mlx4_alloc_cmd_mailbox(dev);
  508. if (IS_ERR(mailbox))
  509. return -ENOMEM;
  510. mgid = mailbox->buf;
  511. memcpy(mgid, gid, 16);
  512. err = mlx4_GID_HASH(dev, mailbox, hash, op_mod);
  513. mlx4_free_cmd_mailbox(dev, mailbox);
  514. if (err)
  515. return err;
  516. if (0)
  517. mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, *hash);
  518. *index = *hash;
  519. *prev = -1;
  520. do {
  521. err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
  522. if (err)
  523. return err;
  524. if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
  525. if (*index != *hash) {
  526. mlx4_err(dev, "Found zero MGID in AMGM.\n");
  527. err = -EINVAL;
  528. }
  529. return err;
  530. }
  531. if (!memcmp(mgm->gid, gid, 16) &&
  532. be32_to_cpu(mgm->members_count) >> 30 == prot)
  533. return err;
  534. *prev = *index;
  535. *index = be32_to_cpu(mgm->next_gid_index) >> 6;
  536. } while (*index);
  537. *index = -1;
  538. return err;
  539. }
  540. int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  541. int block_mcast_loopback, enum mlx4_protocol prot,
  542. enum mlx4_steer_type steer)
  543. {
  544. struct mlx4_priv *priv = mlx4_priv(dev);
  545. struct mlx4_cmd_mailbox *mailbox;
  546. struct mlx4_mgm *mgm;
  547. u32 members_count;
  548. u16 hash;
  549. int index, prev;
  550. int link = 0;
  551. int i;
  552. int err;
  553. u8 port = gid[5];
  554. u8 new_entry = 0;
  555. mailbox = mlx4_alloc_cmd_mailbox(dev);
  556. if (IS_ERR(mailbox))
  557. return PTR_ERR(mailbox);
  558. mgm = mailbox->buf;
  559. mutex_lock(&priv->mcg_table.mutex);
  560. err = find_entry(dev, port, gid, prot, steer,
  561. mailbox, &hash, &prev, &index);
  562. if (err)
  563. goto out;
  564. if (index != -1) {
  565. if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
  566. new_entry = 1;
  567. memcpy(mgm->gid, gid, 16);
  568. }
  569. } else {
  570. link = 1;
  571. index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
  572. if (index == -1) {
  573. mlx4_err(dev, "No AMGM entries left\n");
  574. err = -ENOMEM;
  575. goto out;
  576. }
  577. index += dev->caps.num_mgms;
  578. new_entry = 1;
  579. memset(mgm, 0, sizeof *mgm);
  580. memcpy(mgm->gid, gid, 16);
  581. }
  582. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  583. if (members_count == dev->caps.num_qp_per_mgm) {
  584. mlx4_err(dev, "MGM at index %x is full.\n", index);
  585. err = -ENOMEM;
  586. goto out;
  587. }
  588. for (i = 0; i < members_count; ++i)
  589. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
  590. mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
  591. err = 0;
  592. goto out;
  593. }
  594. if (block_mcast_loopback)
  595. mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
  596. (1U << MGM_BLCK_LB_BIT));
  597. else
  598. mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
  599. mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
  600. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  601. if (err)
  602. goto out;
  603. if (!link)
  604. goto out;
  605. err = mlx4_READ_ENTRY(dev, prev, mailbox);
  606. if (err)
  607. goto out;
  608. mgm->next_gid_index = cpu_to_be32(index << 6);
  609. err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
  610. if (err)
  611. goto out;
  612. out:
  613. if (prot == MLX4_PROT_ETH) {
  614. /* manage the steering entry for promisc mode */
  615. if (new_entry)
  616. new_steering_entry(dev, port, steer, index, qp->qpn);
  617. else
  618. existing_steering_entry(dev, port, steer,
  619. index, qp->qpn);
  620. }
  621. if (err && link && index != -1) {
  622. if (index < dev->caps.num_mgms)
  623. mlx4_warn(dev, "Got AMGM index %d < %d",
  624. index, dev->caps.num_mgms);
  625. else
  626. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  627. index - dev->caps.num_mgms);
  628. }
  629. mutex_unlock(&priv->mcg_table.mutex);
  630. mlx4_free_cmd_mailbox(dev, mailbox);
  631. return err;
  632. }
  633. int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  634. enum mlx4_protocol prot, enum mlx4_steer_type steer)
  635. {
  636. struct mlx4_priv *priv = mlx4_priv(dev);
  637. struct mlx4_cmd_mailbox *mailbox;
  638. struct mlx4_mgm *mgm;
  639. u32 members_count;
  640. u16 hash;
  641. int prev, index;
  642. int i, loc;
  643. int err;
  644. u8 port = gid[5];
  645. bool removed_entry = false;
  646. mailbox = mlx4_alloc_cmd_mailbox(dev);
  647. if (IS_ERR(mailbox))
  648. return PTR_ERR(mailbox);
  649. mgm = mailbox->buf;
  650. mutex_lock(&priv->mcg_table.mutex);
  651. err = find_entry(dev, port, gid, prot, steer,
  652. mailbox, &hash, &prev, &index);
  653. if (err)
  654. goto out;
  655. if (index == -1) {
  656. mlx4_err(dev, "MGID %pI6 not found\n", gid);
  657. err = -EINVAL;
  658. goto out;
  659. }
  660. /* if this pq is also a promisc qp, it shouldn't be removed */
  661. if (prot == MLX4_PROT_ETH &&
  662. check_duplicate_entry(dev, port, steer, index, qp->qpn))
  663. goto out;
  664. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  665. for (loc = -1, i = 0; i < members_count; ++i)
  666. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
  667. loc = i;
  668. if (loc == -1) {
  669. mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
  670. err = -EINVAL;
  671. goto out;
  672. }
  673. mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
  674. mgm->qp[loc] = mgm->qp[i - 1];
  675. mgm->qp[i - 1] = 0;
  676. if (prot == MLX4_PROT_ETH)
  677. removed_entry = can_remove_steering_entry(dev, port, steer,
  678. index, qp->qpn);
  679. if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
  680. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  681. goto out;
  682. }
  683. /* We are going to delete the entry, members count should be 0 */
  684. mgm->members_count = cpu_to_be32((u32) prot << 30);
  685. if (prev == -1) {
  686. /* Remove entry from MGM */
  687. int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
  688. if (amgm_index) {
  689. err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
  690. if (err)
  691. goto out;
  692. } else
  693. memset(mgm->gid, 0, 16);
  694. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  695. if (err)
  696. goto out;
  697. if (amgm_index) {
  698. if (amgm_index < dev->caps.num_mgms)
  699. mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
  700. index, amgm_index, dev->caps.num_mgms);
  701. else
  702. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  703. amgm_index - dev->caps.num_mgms);
  704. }
  705. } else {
  706. /* Remove entry from AMGM */
  707. int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
  708. err = mlx4_READ_ENTRY(dev, prev, mailbox);
  709. if (err)
  710. goto out;
  711. mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
  712. err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
  713. if (err)
  714. goto out;
  715. if (index < dev->caps.num_mgms)
  716. mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
  717. prev, index, dev->caps.num_mgms);
  718. else
  719. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  720. index - dev->caps.num_mgms);
  721. }
  722. out:
  723. mutex_unlock(&priv->mcg_table.mutex);
  724. mlx4_free_cmd_mailbox(dev, mailbox);
  725. return err;
  726. }
  727. static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
  728. u8 gid[16], u8 attach, u8 block_loopback,
  729. enum mlx4_protocol prot)
  730. {
  731. struct mlx4_cmd_mailbox *mailbox;
  732. int err = 0;
  733. int qpn;
  734. if (!mlx4_is_mfunc(dev))
  735. return -EBADF;
  736. mailbox = mlx4_alloc_cmd_mailbox(dev);
  737. if (IS_ERR(mailbox))
  738. return PTR_ERR(mailbox);
  739. memcpy(mailbox->buf, gid, 16);
  740. qpn = qp->qpn;
  741. qpn |= (prot << 28);
  742. if (attach && block_loopback)
  743. qpn |= (1 << 31);
  744. err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
  745. MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
  746. MLX4_CMD_WRAPPED);
  747. mlx4_free_cmd_mailbox(dev, mailbox);
  748. return err;
  749. }
  750. int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  751. int block_mcast_loopback, enum mlx4_protocol prot)
  752. {
  753. enum mlx4_steer_type steer;
  754. steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
  755. if (prot == MLX4_PROT_ETH &&
  756. !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  757. return 0;
  758. if (prot == MLX4_PROT_ETH)
  759. gid[7] |= (steer << 1);
  760. if (mlx4_is_mfunc(dev))
  761. return mlx4_QP_ATTACH(dev, qp, gid, 1,
  762. block_mcast_loopback, prot);
  763. return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
  764. prot, steer);
  765. }
  766. EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
  767. int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  768. enum mlx4_protocol prot)
  769. {
  770. enum mlx4_steer_type steer;
  771. steer = (is_valid_ether_addr(&gid[10])) ? MLX4_UC_STEER : MLX4_MC_STEER;
  772. if (prot == MLX4_PROT_ETH &&
  773. !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  774. return 0;
  775. if (prot == MLX4_PROT_ETH)
  776. gid[7] |= (steer << 1);
  777. if (mlx4_is_mfunc(dev))
  778. return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
  779. return mlx4_qp_detach_common(dev, qp, gid, prot, steer);
  780. }
  781. EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
  782. int mlx4_unicast_attach(struct mlx4_dev *dev,
  783. struct mlx4_qp *qp, u8 gid[16],
  784. int block_mcast_loopback, enum mlx4_protocol prot)
  785. {
  786. if (prot == MLX4_PROT_ETH &&
  787. !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
  788. return 0;
  789. if (prot == MLX4_PROT_ETH)
  790. gid[7] |= (MLX4_UC_STEER << 1);
  791. if (mlx4_is_mfunc(dev))
  792. return mlx4_QP_ATTACH(dev, qp, gid, 1,
  793. block_mcast_loopback, prot);
  794. return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
  795. prot, MLX4_UC_STEER);
  796. }
  797. EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
  798. int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
  799. u8 gid[16], enum mlx4_protocol prot)
  800. {
  801. if (prot == MLX4_PROT_ETH &&
  802. !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
  803. return 0;
  804. if (prot == MLX4_PROT_ETH)
  805. gid[7] |= (MLX4_UC_STEER << 1);
  806. if (mlx4_is_mfunc(dev))
  807. return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
  808. return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
  809. }
  810. EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
  811. int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
  812. struct mlx4_vhcr *vhcr,
  813. struct mlx4_cmd_mailbox *inbox,
  814. struct mlx4_cmd_mailbox *outbox,
  815. struct mlx4_cmd_info *cmd)
  816. {
  817. u32 qpn = (u32) vhcr->in_param & 0xffffffff;
  818. u8 port = vhcr->in_param >> 62;
  819. enum mlx4_steer_type steer = vhcr->in_modifier;
  820. /* Promiscuous unicast is not allowed in mfunc */
  821. if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
  822. return 0;
  823. if (vhcr->op_modifier)
  824. return add_promisc_qp(dev, port, steer, qpn);
  825. else
  826. return remove_promisc_qp(dev, port, steer, qpn);
  827. }
  828. static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
  829. enum mlx4_steer_type steer, u8 add, u8 port)
  830. {
  831. return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
  832. MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
  833. MLX4_CMD_WRAPPED);
  834. }
  835. int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
  836. {
  837. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  838. return 0;
  839. if (mlx4_is_mfunc(dev))
  840. return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
  841. return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
  842. }
  843. EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
  844. int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
  845. {
  846. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  847. return 0;
  848. if (mlx4_is_mfunc(dev))
  849. return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
  850. return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
  851. }
  852. EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
  853. int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
  854. {
  855. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  856. return 0;
  857. if (mlx4_is_mfunc(dev))
  858. return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
  859. return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
  860. }
  861. EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
  862. int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
  863. {
  864. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  865. return 0;
  866. if (mlx4_is_mfunc(dev))
  867. return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
  868. return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
  869. }
  870. EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
  871. int mlx4_init_mcg_table(struct mlx4_dev *dev)
  872. {
  873. struct mlx4_priv *priv = mlx4_priv(dev);
  874. int err;
  875. err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
  876. dev->caps.num_amgms - 1, 0, 0);
  877. if (err)
  878. return err;
  879. mutex_init(&priv->mcg_table.mutex);
  880. return 0;
  881. }
  882. void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
  883. {
  884. mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
  885. }