mcg.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035
  1. /*
  2. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
  3. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/string.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/mlx4/cmd.h>
  36. #include <linux/export.h>
  37. #include "mlx4.h"
  38. #define MGM_QPN_MASK 0x00FFFFFF
  39. #define MGM_BLCK_LB_BIT 30
  40. static const u8 zero_gid[16]; /* automatically initialized to 0 */
  41. struct mlx4_mgm {
  42. __be32 next_gid_index;
  43. __be32 members_count;
  44. u32 reserved[2];
  45. u8 gid[16];
  46. __be32 qp[MLX4_MAX_QP_PER_MGM];
  47. };
  48. int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
  49. {
  50. return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
  51. }
  52. int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
  53. {
  54. return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
  55. }
  56. static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
  57. struct mlx4_cmd_mailbox *mailbox)
  58. {
  59. return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
  60. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  61. }
  62. static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
  63. struct mlx4_cmd_mailbox *mailbox)
  64. {
  65. return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
  66. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  67. }
  68. static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
  69. struct mlx4_cmd_mailbox *mailbox)
  70. {
  71. u32 in_mod;
  72. in_mod = (u32) port << 16 | steer << 1;
  73. return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
  74. MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
  75. MLX4_CMD_NATIVE);
  76. }
  77. static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  78. u16 *hash, u8 op_mod)
  79. {
  80. u64 imm;
  81. int err;
  82. err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
  83. MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
  84. MLX4_CMD_NATIVE);
  85. if (!err)
  86. *hash = imm;
  87. return err;
  88. }
  89. static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
  90. enum mlx4_steer_type steer,
  91. u32 qpn)
  92. {
  93. struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
  94. struct mlx4_promisc_qp *pqp;
  95. list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
  96. if (pqp->qpn == qpn)
  97. return pqp;
  98. }
  99. /* not found */
  100. return NULL;
  101. }
  102. /*
  103. * Add new entry to steering data structure.
  104. * All promisc QPs should be added as well
  105. */
  106. static int new_steering_entry(struct mlx4_dev *dev, u8 port,
  107. enum mlx4_steer_type steer,
  108. unsigned int index, u32 qpn)
  109. {
  110. struct mlx4_steer *s_steer;
  111. struct mlx4_cmd_mailbox *mailbox;
  112. struct mlx4_mgm *mgm;
  113. u32 members_count;
  114. struct mlx4_steer_index *new_entry;
  115. struct mlx4_promisc_qp *pqp;
  116. struct mlx4_promisc_qp *dqp = NULL;
  117. u32 prot;
  118. int err;
  119. s_steer = &mlx4_priv(dev)->steer[port - 1];
  120. new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
  121. if (!new_entry)
  122. return -ENOMEM;
  123. INIT_LIST_HEAD(&new_entry->duplicates);
  124. new_entry->index = index;
  125. list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
  126. /* If the given qpn is also a promisc qp,
  127. * it should be inserted to duplicates list
  128. */
  129. pqp = get_promisc_qp(dev, 0, steer, qpn);
  130. if (pqp) {
  131. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  132. if (!dqp) {
  133. err = -ENOMEM;
  134. goto out_alloc;
  135. }
  136. dqp->qpn = qpn;
  137. list_add_tail(&dqp->list, &new_entry->duplicates);
  138. }
  139. /* if no promisc qps for this vep, we are done */
  140. if (list_empty(&s_steer->promisc_qps[steer]))
  141. return 0;
  142. /* now need to add all the promisc qps to the new
  143. * steering entry, as they should also receive the packets
  144. * destined to this address */
  145. mailbox = mlx4_alloc_cmd_mailbox(dev);
  146. if (IS_ERR(mailbox)) {
  147. err = -ENOMEM;
  148. goto out_alloc;
  149. }
  150. mgm = mailbox->buf;
  151. err = mlx4_READ_ENTRY(dev, index, mailbox);
  152. if (err)
  153. goto out_mailbox;
  154. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  155. prot = be32_to_cpu(mgm->members_count) >> 30;
  156. list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
  157. /* don't add already existing qpn */
  158. if (pqp->qpn == qpn)
  159. continue;
  160. if (members_count == dev->caps.num_qp_per_mgm) {
  161. /* out of space */
  162. err = -ENOMEM;
  163. goto out_mailbox;
  164. }
  165. /* add the qpn */
  166. mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
  167. }
  168. /* update the qps count and update the entry with all the promisc qps*/
  169. mgm->members_count = cpu_to_be32(members_count | (prot << 30));
  170. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  171. out_mailbox:
  172. mlx4_free_cmd_mailbox(dev, mailbox);
  173. if (!err)
  174. return 0;
  175. out_alloc:
  176. if (dqp) {
  177. list_del(&dqp->list);
  178. kfree(dqp);
  179. }
  180. list_del(&new_entry->list);
  181. kfree(new_entry);
  182. return err;
  183. }
  184. /* update the data structures with existing steering entry */
  185. static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
  186. enum mlx4_steer_type steer,
  187. unsigned int index, u32 qpn)
  188. {
  189. struct mlx4_steer *s_steer;
  190. struct mlx4_steer_index *tmp_entry, *entry = NULL;
  191. struct mlx4_promisc_qp *pqp;
  192. struct mlx4_promisc_qp *dqp;
  193. s_steer = &mlx4_priv(dev)->steer[port - 1];
  194. pqp = get_promisc_qp(dev, 0, steer, qpn);
  195. if (!pqp)
  196. return 0; /* nothing to do */
  197. list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
  198. if (tmp_entry->index == index) {
  199. entry = tmp_entry;
  200. break;
  201. }
  202. }
  203. if (unlikely(!entry)) {
  204. mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
  205. return -EINVAL;
  206. }
  207. /* the given qpn is listed as a promisc qpn
  208. * we need to add it as a duplicate to this entry
  209. * for future references */
  210. list_for_each_entry(dqp, &entry->duplicates, list) {
  211. if (qpn == pqp->qpn)
  212. return 0; /* qp is already duplicated */
  213. }
  214. /* add the qp as a duplicate on this index */
  215. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  216. if (!dqp)
  217. return -ENOMEM;
  218. dqp->qpn = qpn;
  219. list_add_tail(&dqp->list, &entry->duplicates);
  220. return 0;
  221. }
  222. /* Check whether a qpn is a duplicate on steering entry
  223. * If so, it should not be removed from mgm */
  224. static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
  225. enum mlx4_steer_type steer,
  226. unsigned int index, u32 qpn)
  227. {
  228. struct mlx4_steer *s_steer;
  229. struct mlx4_steer_index *tmp_entry, *entry = NULL;
  230. struct mlx4_promisc_qp *dqp, *tmp_dqp;
  231. s_steer = &mlx4_priv(dev)->steer[port - 1];
  232. /* if qp is not promisc, it cannot be duplicated */
  233. if (!get_promisc_qp(dev, 0, steer, qpn))
  234. return false;
  235. /* The qp is promisc qp so it is a duplicate on this index
  236. * Find the index entry, and remove the duplicate */
  237. list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
  238. if (tmp_entry->index == index) {
  239. entry = tmp_entry;
  240. break;
  241. }
  242. }
  243. if (unlikely(!entry)) {
  244. mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
  245. return false;
  246. }
  247. list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
  248. if (dqp->qpn == qpn) {
  249. list_del(&dqp->list);
  250. kfree(dqp);
  251. }
  252. }
  253. return true;
  254. }
  255. /* I a steering entry contains only promisc QPs, it can be removed. */
  256. static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
  257. enum mlx4_steer_type steer,
  258. unsigned int index, u32 tqpn)
  259. {
  260. struct mlx4_steer *s_steer;
  261. struct mlx4_cmd_mailbox *mailbox;
  262. struct mlx4_mgm *mgm;
  263. struct mlx4_steer_index *entry = NULL, *tmp_entry;
  264. u32 qpn;
  265. u32 members_count;
  266. bool ret = false;
  267. int i;
  268. s_steer = &mlx4_priv(dev)->steer[port - 1];
  269. mailbox = mlx4_alloc_cmd_mailbox(dev);
  270. if (IS_ERR(mailbox))
  271. return false;
  272. mgm = mailbox->buf;
  273. if (mlx4_READ_ENTRY(dev, index, mailbox))
  274. goto out;
  275. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  276. for (i = 0; i < members_count; i++) {
  277. qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
  278. if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
  279. /* the qp is not promisc, the entry can't be removed */
  280. goto out;
  281. }
  282. }
  283. /* All the qps currently registered for this entry are promiscuous,
  284. * Checking for duplicates */
  285. ret = true;
  286. list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
  287. if (entry->index == index) {
  288. if (list_empty(&entry->duplicates)) {
  289. list_del(&entry->list);
  290. kfree(entry);
  291. } else {
  292. /* This entry contains duplicates so it shouldn't be removed */
  293. ret = false;
  294. goto out;
  295. }
  296. }
  297. }
  298. out:
  299. mlx4_free_cmd_mailbox(dev, mailbox);
  300. return ret;
  301. }
  302. static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
  303. enum mlx4_steer_type steer, u32 qpn)
  304. {
  305. struct mlx4_steer *s_steer;
  306. struct mlx4_cmd_mailbox *mailbox;
  307. struct mlx4_mgm *mgm;
  308. struct mlx4_steer_index *entry;
  309. struct mlx4_promisc_qp *pqp;
  310. struct mlx4_promisc_qp *dqp;
  311. u32 members_count;
  312. u32 prot;
  313. int i;
  314. bool found;
  315. int err;
  316. struct mlx4_priv *priv = mlx4_priv(dev);
  317. s_steer = &mlx4_priv(dev)->steer[port - 1];
  318. mutex_lock(&priv->mcg_table.mutex);
  319. if (get_promisc_qp(dev, 0, steer, qpn)) {
  320. err = 0; /* Noting to do, already exists */
  321. goto out_mutex;
  322. }
  323. pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
  324. if (!pqp) {
  325. err = -ENOMEM;
  326. goto out_mutex;
  327. }
  328. pqp->qpn = qpn;
  329. mailbox = mlx4_alloc_cmd_mailbox(dev);
  330. if (IS_ERR(mailbox)) {
  331. err = -ENOMEM;
  332. goto out_alloc;
  333. }
  334. mgm = mailbox->buf;
  335. /* the promisc qp needs to be added for each one of the steering
  336. * entries, if it already exists, needs to be added as a duplicate
  337. * for this entry */
  338. list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
  339. err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
  340. if (err)
  341. goto out_mailbox;
  342. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  343. prot = be32_to_cpu(mgm->members_count) >> 30;
  344. found = false;
  345. for (i = 0; i < members_count; i++) {
  346. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
  347. /* Entry already exists, add to duplicates */
  348. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  349. if (!dqp)
  350. goto out_mailbox;
  351. dqp->qpn = qpn;
  352. list_add_tail(&dqp->list, &entry->duplicates);
  353. found = true;
  354. }
  355. }
  356. if (!found) {
  357. /* Need to add the qpn to mgm */
  358. if (members_count == dev->caps.num_qp_per_mgm) {
  359. /* entry is full */
  360. err = -ENOMEM;
  361. goto out_mailbox;
  362. }
  363. mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
  364. mgm->members_count = cpu_to_be32(members_count | (prot << 30));
  365. err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
  366. if (err)
  367. goto out_mailbox;
  368. }
  369. }
  370. /* add the new qpn to list of promisc qps */
  371. list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
  372. /* now need to add all the promisc qps to default entry */
  373. memset(mgm, 0, sizeof *mgm);
  374. members_count = 0;
  375. list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
  376. mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
  377. mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
  378. err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
  379. if (err)
  380. goto out_list;
  381. mlx4_free_cmd_mailbox(dev, mailbox);
  382. mutex_unlock(&priv->mcg_table.mutex);
  383. return 0;
  384. out_list:
  385. list_del(&pqp->list);
  386. out_mailbox:
  387. mlx4_free_cmd_mailbox(dev, mailbox);
  388. out_alloc:
  389. kfree(pqp);
  390. out_mutex:
  391. mutex_unlock(&priv->mcg_table.mutex);
  392. return err;
  393. }
  394. static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
  395. enum mlx4_steer_type steer, u32 qpn)
  396. {
  397. struct mlx4_priv *priv = mlx4_priv(dev);
  398. struct mlx4_steer *s_steer;
  399. struct mlx4_cmd_mailbox *mailbox;
  400. struct mlx4_mgm *mgm;
  401. struct mlx4_steer_index *entry;
  402. struct mlx4_promisc_qp *pqp;
  403. struct mlx4_promisc_qp *dqp;
  404. u32 members_count;
  405. bool found;
  406. bool back_to_list = false;
  407. int loc, i;
  408. int err;
  409. s_steer = &mlx4_priv(dev)->steer[port - 1];
  410. mutex_lock(&priv->mcg_table.mutex);
  411. pqp = get_promisc_qp(dev, 0, steer, qpn);
  412. if (unlikely(!pqp)) {
  413. mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
  414. /* nothing to do */
  415. err = 0;
  416. goto out_mutex;
  417. }
  418. /*remove from list of promisc qps */
  419. list_del(&pqp->list);
  420. /* set the default entry not to include the removed one */
  421. mailbox = mlx4_alloc_cmd_mailbox(dev);
  422. if (IS_ERR(mailbox)) {
  423. err = -ENOMEM;
  424. back_to_list = true;
  425. goto out_list;
  426. }
  427. mgm = mailbox->buf;
  428. memset(mgm, 0, sizeof *mgm);
  429. members_count = 0;
  430. list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
  431. mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
  432. mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
  433. err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
  434. if (err)
  435. goto out_mailbox;
  436. /* remove the qp from all the steering entries*/
  437. list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
  438. found = false;
  439. list_for_each_entry(dqp, &entry->duplicates, list) {
  440. if (dqp->qpn == qpn) {
  441. found = true;
  442. break;
  443. }
  444. }
  445. if (found) {
  446. /* a duplicate, no need to change the mgm,
  447. * only update the duplicates list */
  448. list_del(&dqp->list);
  449. kfree(dqp);
  450. } else {
  451. err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
  452. if (err)
  453. goto out_mailbox;
  454. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  455. for (loc = -1, i = 0; i < members_count; ++i)
  456. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
  457. loc = i;
  458. mgm->members_count = cpu_to_be32(--members_count |
  459. (MLX4_PROT_ETH << 30));
  460. mgm->qp[loc] = mgm->qp[i - 1];
  461. mgm->qp[i - 1] = 0;
  462. err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
  463. if (err)
  464. goto out_mailbox;
  465. }
  466. }
  467. out_mailbox:
  468. mlx4_free_cmd_mailbox(dev, mailbox);
  469. out_list:
  470. if (back_to_list)
  471. list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
  472. else
  473. kfree(pqp);
  474. out_mutex:
  475. mutex_unlock(&priv->mcg_table.mutex);
  476. return err;
  477. }
  478. /*
  479. * Caller must hold MCG table semaphore. gid and mgm parameters must
  480. * be properly aligned for command interface.
  481. *
  482. * Returns 0 unless a firmware command error occurs.
  483. *
  484. * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
  485. * and *mgm holds MGM entry.
  486. *
  487. * if GID is found in AMGM, *index = index in AMGM, *prev = index of
  488. * previous entry in hash chain and *mgm holds AMGM entry.
  489. *
  490. * If no AMGM exists for given gid, *index = -1, *prev = index of last
  491. * entry in hash chain and *mgm holds end of hash chain.
  492. */
  493. static int find_entry(struct mlx4_dev *dev, u8 port,
  494. u8 *gid, enum mlx4_protocol prot,
  495. struct mlx4_cmd_mailbox *mgm_mailbox,
  496. int *prev, int *index)
  497. {
  498. struct mlx4_cmd_mailbox *mailbox;
  499. struct mlx4_mgm *mgm = mgm_mailbox->buf;
  500. u8 *mgid;
  501. int err;
  502. u16 hash;
  503. u8 op_mod = (prot == MLX4_PROT_ETH) ?
  504. !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
  505. mailbox = mlx4_alloc_cmd_mailbox(dev);
  506. if (IS_ERR(mailbox))
  507. return -ENOMEM;
  508. mgid = mailbox->buf;
  509. memcpy(mgid, gid, 16);
  510. err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod);
  511. mlx4_free_cmd_mailbox(dev, mailbox);
  512. if (err)
  513. return err;
  514. if (0)
  515. mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash);
  516. *index = hash;
  517. *prev = -1;
  518. do {
  519. err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
  520. if (err)
  521. return err;
  522. if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
  523. if (*index != hash) {
  524. mlx4_err(dev, "Found zero MGID in AMGM.\n");
  525. err = -EINVAL;
  526. }
  527. return err;
  528. }
  529. if (!memcmp(mgm->gid, gid, 16) &&
  530. be32_to_cpu(mgm->members_count) >> 30 == prot)
  531. return err;
  532. *prev = *index;
  533. *index = be32_to_cpu(mgm->next_gid_index) >> 6;
  534. } while (*index);
  535. *index = -1;
  536. return err;
  537. }
  538. int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  539. int block_mcast_loopback, enum mlx4_protocol prot,
  540. enum mlx4_steer_type steer)
  541. {
  542. struct mlx4_priv *priv = mlx4_priv(dev);
  543. struct mlx4_cmd_mailbox *mailbox;
  544. struct mlx4_mgm *mgm;
  545. u32 members_count;
  546. int index, prev;
  547. int link = 0;
  548. int i;
  549. int err;
  550. u8 port = gid[5];
  551. u8 new_entry = 0;
  552. mailbox = mlx4_alloc_cmd_mailbox(dev);
  553. if (IS_ERR(mailbox))
  554. return PTR_ERR(mailbox);
  555. mgm = mailbox->buf;
  556. mutex_lock(&priv->mcg_table.mutex);
  557. err = find_entry(dev, port, gid, prot,
  558. mailbox, &prev, &index);
  559. if (err)
  560. goto out;
  561. if (index != -1) {
  562. if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
  563. new_entry = 1;
  564. memcpy(mgm->gid, gid, 16);
  565. }
  566. } else {
  567. link = 1;
  568. index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
  569. if (index == -1) {
  570. mlx4_err(dev, "No AMGM entries left\n");
  571. err = -ENOMEM;
  572. goto out;
  573. }
  574. index += dev->caps.num_mgms;
  575. new_entry = 1;
  576. memset(mgm, 0, sizeof *mgm);
  577. memcpy(mgm->gid, gid, 16);
  578. }
  579. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  580. if (members_count == dev->caps.num_qp_per_mgm) {
  581. mlx4_err(dev, "MGM at index %x is full.\n", index);
  582. err = -ENOMEM;
  583. goto out;
  584. }
  585. for (i = 0; i < members_count; ++i)
  586. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
  587. mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
  588. err = 0;
  589. goto out;
  590. }
  591. if (block_mcast_loopback)
  592. mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
  593. (1U << MGM_BLCK_LB_BIT));
  594. else
  595. mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
  596. mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
  597. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  598. if (err)
  599. goto out;
  600. if (!link)
  601. goto out;
  602. err = mlx4_READ_ENTRY(dev, prev, mailbox);
  603. if (err)
  604. goto out;
  605. mgm->next_gid_index = cpu_to_be32(index << 6);
  606. err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
  607. if (err)
  608. goto out;
  609. out:
  610. if (prot == MLX4_PROT_ETH) {
  611. /* manage the steering entry for promisc mode */
  612. if (new_entry)
  613. new_steering_entry(dev, port, steer, index, qp->qpn);
  614. else
  615. existing_steering_entry(dev, port, steer,
  616. index, qp->qpn);
  617. }
  618. if (err && link && index != -1) {
  619. if (index < dev->caps.num_mgms)
  620. mlx4_warn(dev, "Got AMGM index %d < %d",
  621. index, dev->caps.num_mgms);
  622. else
  623. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  624. index - dev->caps.num_mgms);
  625. }
  626. mutex_unlock(&priv->mcg_table.mutex);
  627. mlx4_free_cmd_mailbox(dev, mailbox);
  628. return err;
  629. }
  630. int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  631. enum mlx4_protocol prot, enum mlx4_steer_type steer)
  632. {
  633. struct mlx4_priv *priv = mlx4_priv(dev);
  634. struct mlx4_cmd_mailbox *mailbox;
  635. struct mlx4_mgm *mgm;
  636. u32 members_count;
  637. int prev, index;
  638. int i, loc;
  639. int err;
  640. u8 port = gid[5];
  641. bool removed_entry = false;
  642. mailbox = mlx4_alloc_cmd_mailbox(dev);
  643. if (IS_ERR(mailbox))
  644. return PTR_ERR(mailbox);
  645. mgm = mailbox->buf;
  646. mutex_lock(&priv->mcg_table.mutex);
  647. err = find_entry(dev, port, gid, prot,
  648. mailbox, &prev, &index);
  649. if (err)
  650. goto out;
  651. if (index == -1) {
  652. mlx4_err(dev, "MGID %pI6 not found\n", gid);
  653. err = -EINVAL;
  654. goto out;
  655. }
  656. /* if this pq is also a promisc qp, it shouldn't be removed */
  657. if (prot == MLX4_PROT_ETH &&
  658. check_duplicate_entry(dev, port, steer, index, qp->qpn))
  659. goto out;
  660. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  661. for (loc = -1, i = 0; i < members_count; ++i)
  662. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
  663. loc = i;
  664. if (loc == -1) {
  665. mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
  666. err = -EINVAL;
  667. goto out;
  668. }
  669. mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
  670. mgm->qp[loc] = mgm->qp[i - 1];
  671. mgm->qp[i - 1] = 0;
  672. if (prot == MLX4_PROT_ETH)
  673. removed_entry = can_remove_steering_entry(dev, port, steer,
  674. index, qp->qpn);
  675. if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
  676. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  677. goto out;
  678. }
  679. /* We are going to delete the entry, members count should be 0 */
  680. mgm->members_count = cpu_to_be32((u32) prot << 30);
  681. if (prev == -1) {
  682. /* Remove entry from MGM */
  683. int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
  684. if (amgm_index) {
  685. err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
  686. if (err)
  687. goto out;
  688. } else
  689. memset(mgm->gid, 0, 16);
  690. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  691. if (err)
  692. goto out;
  693. if (amgm_index) {
  694. if (amgm_index < dev->caps.num_mgms)
  695. mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
  696. index, amgm_index, dev->caps.num_mgms);
  697. else
  698. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  699. amgm_index - dev->caps.num_mgms);
  700. }
  701. } else {
  702. /* Remove entry from AMGM */
  703. int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
  704. err = mlx4_READ_ENTRY(dev, prev, mailbox);
  705. if (err)
  706. goto out;
  707. mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
  708. err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
  709. if (err)
  710. goto out;
  711. if (index < dev->caps.num_mgms)
  712. mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
  713. prev, index, dev->caps.num_mgms);
  714. else
  715. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  716. index - dev->caps.num_mgms);
  717. }
  718. out:
  719. mutex_unlock(&priv->mcg_table.mutex);
  720. mlx4_free_cmd_mailbox(dev, mailbox);
  721. return err;
  722. }
  723. static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
  724. u8 gid[16], u8 attach, u8 block_loopback,
  725. enum mlx4_protocol prot)
  726. {
  727. struct mlx4_cmd_mailbox *mailbox;
  728. int err = 0;
  729. int qpn;
  730. if (!mlx4_is_mfunc(dev))
  731. return -EBADF;
  732. mailbox = mlx4_alloc_cmd_mailbox(dev);
  733. if (IS_ERR(mailbox))
  734. return PTR_ERR(mailbox);
  735. memcpy(mailbox->buf, gid, 16);
  736. qpn = qp->qpn;
  737. qpn |= (prot << 28);
  738. if (attach && block_loopback)
  739. qpn |= (1 << 31);
  740. err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
  741. MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
  742. MLX4_CMD_WRAPPED);
  743. mlx4_free_cmd_mailbox(dev, mailbox);
  744. return err;
  745. }
  746. int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  747. int block_mcast_loopback, enum mlx4_protocol prot)
  748. {
  749. if (prot == MLX4_PROT_ETH &&
  750. !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  751. return 0;
  752. if (prot == MLX4_PROT_ETH)
  753. gid[7] |= (MLX4_MC_STEER << 1);
  754. if (mlx4_is_mfunc(dev))
  755. return mlx4_QP_ATTACH(dev, qp, gid, 1,
  756. block_mcast_loopback, prot);
  757. return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
  758. prot, MLX4_MC_STEER);
  759. }
  760. EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
  761. int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  762. enum mlx4_protocol prot)
  763. {
  764. if (prot == MLX4_PROT_ETH &&
  765. !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  766. return 0;
  767. if (prot == MLX4_PROT_ETH)
  768. gid[7] |= (MLX4_MC_STEER << 1);
  769. if (mlx4_is_mfunc(dev))
  770. return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
  771. return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_MC_STEER);
  772. }
  773. EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
  774. int mlx4_unicast_attach(struct mlx4_dev *dev,
  775. struct mlx4_qp *qp, u8 gid[16],
  776. int block_mcast_loopback, enum mlx4_protocol prot)
  777. {
  778. if (prot == MLX4_PROT_ETH &&
  779. !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
  780. return 0;
  781. if (prot == MLX4_PROT_ETH)
  782. gid[7] |= (MLX4_UC_STEER << 1);
  783. if (mlx4_is_mfunc(dev))
  784. return mlx4_QP_ATTACH(dev, qp, gid, 1,
  785. block_mcast_loopback, prot);
  786. return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
  787. prot, MLX4_UC_STEER);
  788. }
  789. EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
  790. int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
  791. u8 gid[16], enum mlx4_protocol prot)
  792. {
  793. if (prot == MLX4_PROT_ETH &&
  794. !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
  795. return 0;
  796. if (prot == MLX4_PROT_ETH)
  797. gid[7] |= (MLX4_UC_STEER << 1);
  798. if (mlx4_is_mfunc(dev))
  799. return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
  800. return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
  801. }
  802. EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
  803. int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
  804. struct mlx4_vhcr *vhcr,
  805. struct mlx4_cmd_mailbox *inbox,
  806. struct mlx4_cmd_mailbox *outbox,
  807. struct mlx4_cmd_info *cmd)
  808. {
  809. u32 qpn = (u32) vhcr->in_param & 0xffffffff;
  810. u8 port = vhcr->in_param >> 62;
  811. enum mlx4_steer_type steer = vhcr->in_modifier;
  812. /* Promiscuous unicast is not allowed in mfunc */
  813. if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
  814. return 0;
  815. if (vhcr->op_modifier)
  816. return add_promisc_qp(dev, port, steer, qpn);
  817. else
  818. return remove_promisc_qp(dev, port, steer, qpn);
  819. }
  820. static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
  821. enum mlx4_steer_type steer, u8 add, u8 port)
  822. {
  823. return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
  824. MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
  825. MLX4_CMD_WRAPPED);
  826. }
  827. int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
  828. {
  829. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  830. return 0;
  831. if (mlx4_is_mfunc(dev))
  832. return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
  833. return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
  834. }
  835. EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
  836. int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
  837. {
  838. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER))
  839. return 0;
  840. if (mlx4_is_mfunc(dev))
  841. return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
  842. return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
  843. }
  844. EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
  845. int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
  846. {
  847. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
  848. return 0;
  849. if (mlx4_is_mfunc(dev))
  850. return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
  851. return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
  852. }
  853. EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
  854. int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
  855. {
  856. if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER))
  857. return 0;
  858. if (mlx4_is_mfunc(dev))
  859. return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
  860. return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
  861. }
  862. EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
  863. int mlx4_init_mcg_table(struct mlx4_dev *dev)
  864. {
  865. struct mlx4_priv *priv = mlx4_priv(dev);
  866. int err;
  867. err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
  868. dev->caps.num_amgms - 1, 0, 0);
  869. if (err)
  870. return err;
  871. mutex_init(&priv->mcg_table.mutex);
  872. return 0;
  873. }
  874. void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
  875. {
  876. mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
  877. }