mcg.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476
  1. /*
  2. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
  3. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/string.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/mlx4/cmd.h>
  36. #include <linux/export.h>
  37. #include "mlx4.h"
  38. #define MGM_QPN_MASK 0x00FFFFFF
  39. #define MGM_BLCK_LB_BIT 30
  40. static const u8 zero_gid[16]; /* automatically initialized to 0 */
  41. struct mlx4_mgm {
  42. __be32 next_gid_index;
  43. __be32 members_count;
  44. u32 reserved[2];
  45. u8 gid[16];
  46. __be32 qp[MLX4_MAX_QP_PER_MGM];
  47. };
  48. int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
  49. {
  50. if (dev->caps.steering_mode ==
  51. MLX4_STEERING_MODE_DEVICE_MANAGED)
  52. return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE;
  53. else
  54. return min((1 << mlx4_log_num_mgm_entry_size),
  55. MLX4_MAX_MGM_ENTRY_SIZE);
  56. }
  57. int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
  58. {
  59. return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
  60. }
  61. static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev,
  62. struct mlx4_cmd_mailbox *mailbox,
  63. u32 size,
  64. u64 *reg_id)
  65. {
  66. u64 imm;
  67. int err = 0;
  68. err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
  69. MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
  70. MLX4_CMD_NATIVE);
  71. if (err)
  72. return err;
  73. *reg_id = imm;
  74. return err;
  75. }
  76. static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid)
  77. {
  78. int err = 0;
  79. err = mlx4_cmd(dev, regid, 0, 0,
  80. MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
  81. MLX4_CMD_NATIVE);
  82. return err;
  83. }
  84. static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
  85. struct mlx4_cmd_mailbox *mailbox)
  86. {
  87. return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
  88. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  89. }
  90. static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
  91. struct mlx4_cmd_mailbox *mailbox)
  92. {
  93. return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
  94. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  95. }
  96. static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
  97. struct mlx4_cmd_mailbox *mailbox)
  98. {
  99. u32 in_mod;
  100. in_mod = (u32) port << 16 | steer << 1;
  101. return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
  102. MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
  103. MLX4_CMD_NATIVE);
  104. }
  105. static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  106. u16 *hash, u8 op_mod)
  107. {
  108. u64 imm;
  109. int err;
  110. err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
  111. MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
  112. MLX4_CMD_NATIVE);
  113. if (!err)
  114. *hash = imm;
  115. return err;
  116. }
  117. static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
  118. enum mlx4_steer_type steer,
  119. u32 qpn)
  120. {
  121. struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
  122. struct mlx4_promisc_qp *pqp;
  123. list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
  124. if (pqp->qpn == qpn)
  125. return pqp;
  126. }
  127. /* not found */
  128. return NULL;
  129. }
  130. /*
  131. * Add new entry to steering data structure.
  132. * All promisc QPs should be added as well
  133. */
  134. static int new_steering_entry(struct mlx4_dev *dev, u8 port,
  135. enum mlx4_steer_type steer,
  136. unsigned int index, u32 qpn)
  137. {
  138. struct mlx4_steer *s_steer;
  139. struct mlx4_cmd_mailbox *mailbox;
  140. struct mlx4_mgm *mgm;
  141. u32 members_count;
  142. struct mlx4_steer_index *new_entry;
  143. struct mlx4_promisc_qp *pqp;
  144. struct mlx4_promisc_qp *dqp = NULL;
  145. u32 prot;
  146. int err;
  147. s_steer = &mlx4_priv(dev)->steer[port - 1];
  148. new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
  149. if (!new_entry)
  150. return -ENOMEM;
  151. INIT_LIST_HEAD(&new_entry->duplicates);
  152. new_entry->index = index;
  153. list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
  154. /* If the given qpn is also a promisc qp,
  155. * it should be inserted to duplicates list
  156. */
  157. pqp = get_promisc_qp(dev, 0, steer, qpn);
  158. if (pqp) {
  159. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  160. if (!dqp) {
  161. err = -ENOMEM;
  162. goto out_alloc;
  163. }
  164. dqp->qpn = qpn;
  165. list_add_tail(&dqp->list, &new_entry->duplicates);
  166. }
  167. /* if no promisc qps for this vep, we are done */
  168. if (list_empty(&s_steer->promisc_qps[steer]))
  169. return 0;
  170. /* now need to add all the promisc qps to the new
  171. * steering entry, as they should also receive the packets
  172. * destined to this address */
  173. mailbox = mlx4_alloc_cmd_mailbox(dev);
  174. if (IS_ERR(mailbox)) {
  175. err = -ENOMEM;
  176. goto out_alloc;
  177. }
  178. mgm = mailbox->buf;
  179. err = mlx4_READ_ENTRY(dev, index, mailbox);
  180. if (err)
  181. goto out_mailbox;
  182. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  183. prot = be32_to_cpu(mgm->members_count) >> 30;
  184. list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
  185. /* don't add already existing qpn */
  186. if (pqp->qpn == qpn)
  187. continue;
  188. if (members_count == dev->caps.num_qp_per_mgm) {
  189. /* out of space */
  190. err = -ENOMEM;
  191. goto out_mailbox;
  192. }
  193. /* add the qpn */
  194. mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
  195. }
  196. /* update the qps count and update the entry with all the promisc qps*/
  197. mgm->members_count = cpu_to_be32(members_count | (prot << 30));
  198. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  199. out_mailbox:
  200. mlx4_free_cmd_mailbox(dev, mailbox);
  201. if (!err)
  202. return 0;
  203. out_alloc:
  204. if (dqp) {
  205. list_del(&dqp->list);
  206. kfree(dqp);
  207. }
  208. list_del(&new_entry->list);
  209. kfree(new_entry);
  210. return err;
  211. }
  212. /* update the data structures with existing steering entry */
  213. static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
  214. enum mlx4_steer_type steer,
  215. unsigned int index, u32 qpn)
  216. {
  217. struct mlx4_steer *s_steer;
  218. struct mlx4_steer_index *tmp_entry, *entry = NULL;
  219. struct mlx4_promisc_qp *pqp;
  220. struct mlx4_promisc_qp *dqp;
  221. s_steer = &mlx4_priv(dev)->steer[port - 1];
  222. pqp = get_promisc_qp(dev, 0, steer, qpn);
  223. if (!pqp)
  224. return 0; /* nothing to do */
  225. list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
  226. if (tmp_entry->index == index) {
  227. entry = tmp_entry;
  228. break;
  229. }
  230. }
  231. if (unlikely(!entry)) {
  232. mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
  233. return -EINVAL;
  234. }
  235. /* the given qpn is listed as a promisc qpn
  236. * we need to add it as a duplicate to this entry
  237. * for future references */
  238. list_for_each_entry(dqp, &entry->duplicates, list) {
  239. if (qpn == pqp->qpn)
  240. return 0; /* qp is already duplicated */
  241. }
  242. /* add the qp as a duplicate on this index */
  243. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  244. if (!dqp)
  245. return -ENOMEM;
  246. dqp->qpn = qpn;
  247. list_add_tail(&dqp->list, &entry->duplicates);
  248. return 0;
  249. }
  250. /* Check whether a qpn is a duplicate on steering entry
  251. * If so, it should not be removed from mgm */
  252. static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
  253. enum mlx4_steer_type steer,
  254. unsigned int index, u32 qpn)
  255. {
  256. struct mlx4_steer *s_steer;
  257. struct mlx4_steer_index *tmp_entry, *entry = NULL;
  258. struct mlx4_promisc_qp *dqp, *tmp_dqp;
  259. s_steer = &mlx4_priv(dev)->steer[port - 1];
  260. /* if qp is not promisc, it cannot be duplicated */
  261. if (!get_promisc_qp(dev, 0, steer, qpn))
  262. return false;
  263. /* The qp is promisc qp so it is a duplicate on this index
  264. * Find the index entry, and remove the duplicate */
  265. list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
  266. if (tmp_entry->index == index) {
  267. entry = tmp_entry;
  268. break;
  269. }
  270. }
  271. if (unlikely(!entry)) {
  272. mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
  273. return false;
  274. }
  275. list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
  276. if (dqp->qpn == qpn) {
  277. list_del(&dqp->list);
  278. kfree(dqp);
  279. }
  280. }
  281. return true;
  282. }
  283. /* I a steering entry contains only promisc QPs, it can be removed. */
  284. static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
  285. enum mlx4_steer_type steer,
  286. unsigned int index, u32 tqpn)
  287. {
  288. struct mlx4_steer *s_steer;
  289. struct mlx4_cmd_mailbox *mailbox;
  290. struct mlx4_mgm *mgm;
  291. struct mlx4_steer_index *entry = NULL, *tmp_entry;
  292. u32 qpn;
  293. u32 members_count;
  294. bool ret = false;
  295. int i;
  296. s_steer = &mlx4_priv(dev)->steer[port - 1];
  297. mailbox = mlx4_alloc_cmd_mailbox(dev);
  298. if (IS_ERR(mailbox))
  299. return false;
  300. mgm = mailbox->buf;
  301. if (mlx4_READ_ENTRY(dev, index, mailbox))
  302. goto out;
  303. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  304. for (i = 0; i < members_count; i++) {
  305. qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
  306. if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
  307. /* the qp is not promisc, the entry can't be removed */
  308. goto out;
  309. }
  310. }
  311. /* All the qps currently registered for this entry are promiscuous,
  312. * Checking for duplicates */
  313. ret = true;
  314. list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
  315. if (entry->index == index) {
  316. if (list_empty(&entry->duplicates)) {
  317. list_del(&entry->list);
  318. kfree(entry);
  319. } else {
  320. /* This entry contains duplicates so it shouldn't be removed */
  321. ret = false;
  322. goto out;
  323. }
  324. }
  325. }
  326. out:
  327. mlx4_free_cmd_mailbox(dev, mailbox);
  328. return ret;
  329. }
  330. static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
  331. enum mlx4_steer_type steer, u32 qpn)
  332. {
  333. struct mlx4_steer *s_steer;
  334. struct mlx4_cmd_mailbox *mailbox;
  335. struct mlx4_mgm *mgm;
  336. struct mlx4_steer_index *entry;
  337. struct mlx4_promisc_qp *pqp;
  338. struct mlx4_promisc_qp *dqp;
  339. u32 members_count;
  340. u32 prot;
  341. int i;
  342. bool found;
  343. int err;
  344. struct mlx4_priv *priv = mlx4_priv(dev);
  345. s_steer = &mlx4_priv(dev)->steer[port - 1];
  346. mutex_lock(&priv->mcg_table.mutex);
  347. if (get_promisc_qp(dev, 0, steer, qpn)) {
  348. err = 0; /* Noting to do, already exists */
  349. goto out_mutex;
  350. }
  351. pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
  352. if (!pqp) {
  353. err = -ENOMEM;
  354. goto out_mutex;
  355. }
  356. pqp->qpn = qpn;
  357. mailbox = mlx4_alloc_cmd_mailbox(dev);
  358. if (IS_ERR(mailbox)) {
  359. err = -ENOMEM;
  360. goto out_alloc;
  361. }
  362. mgm = mailbox->buf;
  363. /* the promisc qp needs to be added for each one of the steering
  364. * entries, if it already exists, needs to be added as a duplicate
  365. * for this entry */
  366. list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
  367. err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
  368. if (err)
  369. goto out_mailbox;
  370. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  371. prot = be32_to_cpu(mgm->members_count) >> 30;
  372. found = false;
  373. for (i = 0; i < members_count; i++) {
  374. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
  375. /* Entry already exists, add to duplicates */
  376. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  377. if (!dqp) {
  378. err = -ENOMEM;
  379. goto out_mailbox;
  380. }
  381. dqp->qpn = qpn;
  382. list_add_tail(&dqp->list, &entry->duplicates);
  383. found = true;
  384. }
  385. }
  386. if (!found) {
  387. /* Need to add the qpn to mgm */
  388. if (members_count == dev->caps.num_qp_per_mgm) {
  389. /* entry is full */
  390. err = -ENOMEM;
  391. goto out_mailbox;
  392. }
  393. mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
  394. mgm->members_count = cpu_to_be32(members_count | (prot << 30));
  395. err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
  396. if (err)
  397. goto out_mailbox;
  398. }
  399. }
  400. /* add the new qpn to list of promisc qps */
  401. list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
  402. /* now need to add all the promisc qps to default entry */
  403. memset(mgm, 0, sizeof *mgm);
  404. members_count = 0;
  405. list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
  406. mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
  407. mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
  408. err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
  409. if (err)
  410. goto out_list;
  411. mlx4_free_cmd_mailbox(dev, mailbox);
  412. mutex_unlock(&priv->mcg_table.mutex);
  413. return 0;
  414. out_list:
  415. list_del(&pqp->list);
  416. out_mailbox:
  417. mlx4_free_cmd_mailbox(dev, mailbox);
  418. out_alloc:
  419. kfree(pqp);
  420. out_mutex:
  421. mutex_unlock(&priv->mcg_table.mutex);
  422. return err;
  423. }
  424. static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
  425. enum mlx4_steer_type steer, u32 qpn)
  426. {
  427. struct mlx4_priv *priv = mlx4_priv(dev);
  428. struct mlx4_steer *s_steer;
  429. struct mlx4_cmd_mailbox *mailbox;
  430. struct mlx4_mgm *mgm;
  431. struct mlx4_steer_index *entry;
  432. struct mlx4_promisc_qp *pqp;
  433. struct mlx4_promisc_qp *dqp;
  434. u32 members_count;
  435. bool found;
  436. bool back_to_list = false;
  437. int loc, i;
  438. int err;
  439. s_steer = &mlx4_priv(dev)->steer[port - 1];
  440. mutex_lock(&priv->mcg_table.mutex);
  441. pqp = get_promisc_qp(dev, 0, steer, qpn);
  442. if (unlikely(!pqp)) {
  443. mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
  444. /* nothing to do */
  445. err = 0;
  446. goto out_mutex;
  447. }
  448. /*remove from list of promisc qps */
  449. list_del(&pqp->list);
  450. /* set the default entry not to include the removed one */
  451. mailbox = mlx4_alloc_cmd_mailbox(dev);
  452. if (IS_ERR(mailbox)) {
  453. err = -ENOMEM;
  454. back_to_list = true;
  455. goto out_list;
  456. }
  457. mgm = mailbox->buf;
  458. memset(mgm, 0, sizeof *mgm);
  459. members_count = 0;
  460. list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
  461. mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
  462. mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
  463. err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
  464. if (err)
  465. goto out_mailbox;
  466. /* remove the qp from all the steering entries*/
  467. list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
  468. found = false;
  469. list_for_each_entry(dqp, &entry->duplicates, list) {
  470. if (dqp->qpn == qpn) {
  471. found = true;
  472. break;
  473. }
  474. }
  475. if (found) {
  476. /* a duplicate, no need to change the mgm,
  477. * only update the duplicates list */
  478. list_del(&dqp->list);
  479. kfree(dqp);
  480. } else {
  481. err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
  482. if (err)
  483. goto out_mailbox;
  484. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  485. for (loc = -1, i = 0; i < members_count; ++i)
  486. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
  487. loc = i;
  488. mgm->members_count = cpu_to_be32(--members_count |
  489. (MLX4_PROT_ETH << 30));
  490. mgm->qp[loc] = mgm->qp[i - 1];
  491. mgm->qp[i - 1] = 0;
  492. err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
  493. if (err)
  494. goto out_mailbox;
  495. }
  496. }
  497. out_mailbox:
  498. mlx4_free_cmd_mailbox(dev, mailbox);
  499. out_list:
  500. if (back_to_list)
  501. list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
  502. else
  503. kfree(pqp);
  504. out_mutex:
  505. mutex_unlock(&priv->mcg_table.mutex);
  506. return err;
  507. }
  508. /*
  509. * Caller must hold MCG table semaphore. gid and mgm parameters must
  510. * be properly aligned for command interface.
  511. *
  512. * Returns 0 unless a firmware command error occurs.
  513. *
  514. * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
  515. * and *mgm holds MGM entry.
  516. *
  517. * if GID is found in AMGM, *index = index in AMGM, *prev = index of
  518. * previous entry in hash chain and *mgm holds AMGM entry.
  519. *
  520. * If no AMGM exists for given gid, *index = -1, *prev = index of last
  521. * entry in hash chain and *mgm holds end of hash chain.
  522. */
  523. static int find_entry(struct mlx4_dev *dev, u8 port,
  524. u8 *gid, enum mlx4_protocol prot,
  525. struct mlx4_cmd_mailbox *mgm_mailbox,
  526. int *prev, int *index)
  527. {
  528. struct mlx4_cmd_mailbox *mailbox;
  529. struct mlx4_mgm *mgm = mgm_mailbox->buf;
  530. u8 *mgid;
  531. int err;
  532. u16 hash;
  533. u8 op_mod = (prot == MLX4_PROT_ETH) ?
  534. !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
  535. mailbox = mlx4_alloc_cmd_mailbox(dev);
  536. if (IS_ERR(mailbox))
  537. return -ENOMEM;
  538. mgid = mailbox->buf;
  539. memcpy(mgid, gid, 16);
  540. err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod);
  541. mlx4_free_cmd_mailbox(dev, mailbox);
  542. if (err)
  543. return err;
  544. if (0)
  545. mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash);
  546. *index = hash;
  547. *prev = -1;
  548. do {
  549. err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
  550. if (err)
  551. return err;
  552. if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
  553. if (*index != hash) {
  554. mlx4_err(dev, "Found zero MGID in AMGM.\n");
  555. err = -EINVAL;
  556. }
  557. return err;
  558. }
  559. if (!memcmp(mgm->gid, gid, 16) &&
  560. be32_to_cpu(mgm->members_count) >> 30 == prot)
  561. return err;
  562. *prev = *index;
  563. *index = be32_to_cpu(mgm->next_gid_index) >> 6;
  564. } while (*index);
  565. *index = -1;
  566. return err;
  567. }
  568. struct mlx4_net_trans_rule_hw_ctrl {
  569. __be32 ctrl;
  570. __be32 vf_vep_port;
  571. __be32 qpn;
  572. __be32 reserved;
  573. };
  574. static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
  575. struct mlx4_net_trans_rule_hw_ctrl *hw)
  576. {
  577. static const u8 __promisc_mode[] = {
  578. [MLX4_FS_PROMISC_NONE] = 0x0,
  579. [MLX4_FS_PROMISC_UPLINK] = 0x1,
  580. [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2,
  581. [MLX4_FS_PROMISC_ALL_MULTI] = 0x3,
  582. };
  583. u32 dw = 0;
  584. dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
  585. dw |= ctrl->exclusive ? (1 << 2) : 0;
  586. dw |= ctrl->allow_loopback ? (1 << 3) : 0;
  587. dw |= __promisc_mode[ctrl->promisc_mode] << 8;
  588. dw |= ctrl->priority << 16;
  589. hw->ctrl = cpu_to_be32(dw);
  590. hw->vf_vep_port = cpu_to_be32(ctrl->port);
  591. hw->qpn = cpu_to_be32(ctrl->qpn);
  592. }
  593. struct mlx4_net_trans_rule_hw_ib {
  594. u8 size;
  595. u8 rsvd1;
  596. __be16 id;
  597. u32 rsvd2;
  598. __be32 qpn;
  599. __be32 qpn_mask;
  600. u8 dst_gid[16];
  601. u8 dst_gid_msk[16];
  602. } __packed;
  603. struct mlx4_net_trans_rule_hw_eth {
  604. u8 size;
  605. u8 rsvd;
  606. __be16 id;
  607. u8 rsvd1[6];
  608. u8 dst_mac[6];
  609. u16 rsvd2;
  610. u8 dst_mac_msk[6];
  611. u16 rsvd3;
  612. u8 src_mac[6];
  613. u16 rsvd4;
  614. u8 src_mac_msk[6];
  615. u8 rsvd5;
  616. u8 ether_type_enable;
  617. __be16 ether_type;
  618. __be16 vlan_id_msk;
  619. __be16 vlan_id;
  620. } __packed;
  621. struct mlx4_net_trans_rule_hw_tcp_udp {
  622. u8 size;
  623. u8 rsvd;
  624. __be16 id;
  625. __be16 rsvd1[3];
  626. __be16 dst_port;
  627. __be16 rsvd2;
  628. __be16 dst_port_msk;
  629. __be16 rsvd3;
  630. __be16 src_port;
  631. __be16 rsvd4;
  632. __be16 src_port_msk;
  633. } __packed;
  634. struct mlx4_net_trans_rule_hw_ipv4 {
  635. u8 size;
  636. u8 rsvd;
  637. __be16 id;
  638. __be32 rsvd1;
  639. __be32 dst_ip;
  640. __be32 dst_ip_msk;
  641. __be32 src_ip;
  642. __be32 src_ip_msk;
  643. } __packed;
  644. struct _rule_hw {
  645. union {
  646. struct {
  647. u8 size;
  648. u8 rsvd;
  649. __be16 id;
  650. };
  651. struct mlx4_net_trans_rule_hw_eth eth;
  652. struct mlx4_net_trans_rule_hw_ib ib;
  653. struct mlx4_net_trans_rule_hw_ipv4 ipv4;
  654. struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp;
  655. };
  656. };
  657. static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
  658. struct _rule_hw *rule_hw)
  659. {
  660. static const u16 __sw_id_hw[] = {
  661. [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001,
  662. [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005,
  663. [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
  664. [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
  665. [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
  666. [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
  667. };
  668. static const size_t __rule_hw_sz[] = {
  669. [MLX4_NET_TRANS_RULE_ID_ETH] =
  670. sizeof(struct mlx4_net_trans_rule_hw_eth),
  671. [MLX4_NET_TRANS_RULE_ID_IB] =
  672. sizeof(struct mlx4_net_trans_rule_hw_ib),
  673. [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
  674. [MLX4_NET_TRANS_RULE_ID_IPV4] =
  675. sizeof(struct mlx4_net_trans_rule_hw_ipv4),
  676. [MLX4_NET_TRANS_RULE_ID_TCP] =
  677. sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
  678. [MLX4_NET_TRANS_RULE_ID_UDP] =
  679. sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
  680. };
  681. if (spec->id >= MLX4_NET_TRANS_RULE_NUM) {
  682. mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id);
  683. return -EINVAL;
  684. }
  685. memset(rule_hw, 0, __rule_hw_sz[spec->id]);
  686. rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
  687. rule_hw->size = __rule_hw_sz[spec->id] >> 2;
  688. switch (spec->id) {
  689. case MLX4_NET_TRANS_RULE_ID_ETH:
  690. memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN);
  691. memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk,
  692. ETH_ALEN);
  693. memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN);
  694. memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk,
  695. ETH_ALEN);
  696. if (spec->eth.ether_type_enable) {
  697. rule_hw->eth.ether_type_enable = 1;
  698. rule_hw->eth.ether_type = spec->eth.ether_type;
  699. }
  700. rule_hw->eth.vlan_id = spec->eth.vlan_id;
  701. rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk;
  702. break;
  703. case MLX4_NET_TRANS_RULE_ID_IB:
  704. rule_hw->ib.qpn = spec->ib.r_qpn;
  705. rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
  706. memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
  707. memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
  708. break;
  709. case MLX4_NET_TRANS_RULE_ID_IPV6:
  710. return -EOPNOTSUPP;
  711. case MLX4_NET_TRANS_RULE_ID_IPV4:
  712. rule_hw->ipv4.src_ip = spec->ipv4.src_ip;
  713. rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk;
  714. rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip;
  715. rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk;
  716. break;
  717. case MLX4_NET_TRANS_RULE_ID_TCP:
  718. case MLX4_NET_TRANS_RULE_ID_UDP:
  719. rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port;
  720. rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk;
  721. rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port;
  722. rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
  723. break;
  724. default:
  725. return -EINVAL;
  726. }
  727. return __rule_hw_sz[spec->id];
  728. }
  729. static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
  730. struct mlx4_net_trans_rule *rule)
  731. {
  732. #define BUF_SIZE 256
  733. struct mlx4_spec_list *cur;
  734. char buf[BUF_SIZE];
  735. int len = 0;
  736. mlx4_err(dev, "%s", str);
  737. len += snprintf(buf + len, BUF_SIZE - len,
  738. "port = %d prio = 0x%x qp = 0x%x ",
  739. rule->port, rule->priority, rule->qpn);
  740. list_for_each_entry(cur, &rule->list, list) {
  741. switch (cur->id) {
  742. case MLX4_NET_TRANS_RULE_ID_ETH:
  743. len += snprintf(buf + len, BUF_SIZE - len,
  744. "dmac = %pM ", &cur->eth.dst_mac);
  745. if (cur->eth.ether_type)
  746. len += snprintf(buf + len, BUF_SIZE - len,
  747. "ethertype = 0x%x ",
  748. be16_to_cpu(cur->eth.ether_type));
  749. if (cur->eth.vlan_id)
  750. len += snprintf(buf + len, BUF_SIZE - len,
  751. "vlan-id = %d ",
  752. be16_to_cpu(cur->eth.vlan_id));
  753. break;
  754. case MLX4_NET_TRANS_RULE_ID_IPV4:
  755. if (cur->ipv4.src_ip)
  756. len += snprintf(buf + len, BUF_SIZE - len,
  757. "src-ip = %pI4 ",
  758. &cur->ipv4.src_ip);
  759. if (cur->ipv4.dst_ip)
  760. len += snprintf(buf + len, BUF_SIZE - len,
  761. "dst-ip = %pI4 ",
  762. &cur->ipv4.dst_ip);
  763. break;
  764. case MLX4_NET_TRANS_RULE_ID_TCP:
  765. case MLX4_NET_TRANS_RULE_ID_UDP:
  766. if (cur->tcp_udp.src_port)
  767. len += snprintf(buf + len, BUF_SIZE - len,
  768. "src-port = %d ",
  769. be16_to_cpu(cur->tcp_udp.src_port));
  770. if (cur->tcp_udp.dst_port)
  771. len += snprintf(buf + len, BUF_SIZE - len,
  772. "dst-port = %d ",
  773. be16_to_cpu(cur->tcp_udp.dst_port));
  774. break;
  775. case MLX4_NET_TRANS_RULE_ID_IB:
  776. len += snprintf(buf + len, BUF_SIZE - len,
  777. "dst-gid = %pI6\n", cur->ib.dst_gid);
  778. len += snprintf(buf + len, BUF_SIZE - len,
  779. "dst-gid-mask = %pI6\n",
  780. cur->ib.dst_gid_msk);
  781. break;
  782. case MLX4_NET_TRANS_RULE_ID_IPV6:
  783. break;
  784. default:
  785. break;
  786. }
  787. }
  788. len += snprintf(buf + len, BUF_SIZE - len, "\n");
  789. mlx4_err(dev, "%s", buf);
  790. if (len >= BUF_SIZE)
  791. mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
  792. }
  793. int mlx4_flow_attach(struct mlx4_dev *dev,
  794. struct mlx4_net_trans_rule *rule, u64 *reg_id)
  795. {
  796. struct mlx4_cmd_mailbox *mailbox;
  797. struct mlx4_spec_list *cur;
  798. u32 size = 0;
  799. int ret;
  800. mailbox = mlx4_alloc_cmd_mailbox(dev);
  801. if (IS_ERR(mailbox))
  802. return PTR_ERR(mailbox);
  803. memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl));
  804. trans_rule_ctrl_to_hw(rule, mailbox->buf);
  805. size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
  806. list_for_each_entry(cur, &rule->list, list) {
  807. ret = parse_trans_rule(dev, cur, mailbox->buf + size);
  808. if (ret < 0) {
  809. mlx4_free_cmd_mailbox(dev, mailbox);
  810. return -EINVAL;
  811. }
  812. size += ret;
  813. }
  814. ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
  815. if (ret == -ENOMEM)
  816. mlx4_err_rule(dev,
  817. "mcg table is full. Fail to register network rule.\n",
  818. rule);
  819. else if (ret)
  820. mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
  821. mlx4_free_cmd_mailbox(dev, mailbox);
  822. return ret;
  823. }
  824. EXPORT_SYMBOL_GPL(mlx4_flow_attach);
  825. int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
  826. {
  827. int err;
  828. err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
  829. if (err)
  830. mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
  831. reg_id);
  832. return err;
  833. }
  834. EXPORT_SYMBOL_GPL(mlx4_flow_detach);
  835. int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  836. int block_mcast_loopback, enum mlx4_protocol prot,
  837. enum mlx4_steer_type steer)
  838. {
  839. struct mlx4_priv *priv = mlx4_priv(dev);
  840. struct mlx4_cmd_mailbox *mailbox;
  841. struct mlx4_mgm *mgm;
  842. u32 members_count;
  843. int index, prev;
  844. int link = 0;
  845. int i;
  846. int err;
  847. u8 port = gid[5];
  848. u8 new_entry = 0;
  849. mailbox = mlx4_alloc_cmd_mailbox(dev);
  850. if (IS_ERR(mailbox))
  851. return PTR_ERR(mailbox);
  852. mgm = mailbox->buf;
  853. mutex_lock(&priv->mcg_table.mutex);
  854. err = find_entry(dev, port, gid, prot,
  855. mailbox, &prev, &index);
  856. if (err)
  857. goto out;
  858. if (index != -1) {
  859. if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
  860. new_entry = 1;
  861. memcpy(mgm->gid, gid, 16);
  862. }
  863. } else {
  864. link = 1;
  865. index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
  866. if (index == -1) {
  867. mlx4_err(dev, "No AMGM entries left\n");
  868. err = -ENOMEM;
  869. goto out;
  870. }
  871. index += dev->caps.num_mgms;
  872. new_entry = 1;
  873. memset(mgm, 0, sizeof *mgm);
  874. memcpy(mgm->gid, gid, 16);
  875. }
  876. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  877. if (members_count == dev->caps.num_qp_per_mgm) {
  878. mlx4_err(dev, "MGM at index %x is full.\n", index);
  879. err = -ENOMEM;
  880. goto out;
  881. }
  882. for (i = 0; i < members_count; ++i)
  883. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
  884. mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
  885. err = 0;
  886. goto out;
  887. }
  888. if (block_mcast_loopback)
  889. mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
  890. (1U << MGM_BLCK_LB_BIT));
  891. else
  892. mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
  893. mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
  894. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  895. if (err)
  896. goto out;
  897. if (!link)
  898. goto out;
  899. err = mlx4_READ_ENTRY(dev, prev, mailbox);
  900. if (err)
  901. goto out;
  902. mgm->next_gid_index = cpu_to_be32(index << 6);
  903. err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
  904. if (err)
  905. goto out;
  906. out:
  907. if (prot == MLX4_PROT_ETH) {
  908. /* manage the steering entry for promisc mode */
  909. if (new_entry)
  910. new_steering_entry(dev, port, steer, index, qp->qpn);
  911. else
  912. existing_steering_entry(dev, port, steer,
  913. index, qp->qpn);
  914. }
  915. if (err && link && index != -1) {
  916. if (index < dev->caps.num_mgms)
  917. mlx4_warn(dev, "Got AMGM index %d < %d",
  918. index, dev->caps.num_mgms);
  919. else
  920. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  921. index - dev->caps.num_mgms);
  922. }
  923. mutex_unlock(&priv->mcg_table.mutex);
  924. mlx4_free_cmd_mailbox(dev, mailbox);
  925. return err;
  926. }
  927. int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  928. enum mlx4_protocol prot, enum mlx4_steer_type steer)
  929. {
  930. struct mlx4_priv *priv = mlx4_priv(dev);
  931. struct mlx4_cmd_mailbox *mailbox;
  932. struct mlx4_mgm *mgm;
  933. u32 members_count;
  934. int prev, index;
  935. int i, loc;
  936. int err;
  937. u8 port = gid[5];
  938. bool removed_entry = false;
  939. mailbox = mlx4_alloc_cmd_mailbox(dev);
  940. if (IS_ERR(mailbox))
  941. return PTR_ERR(mailbox);
  942. mgm = mailbox->buf;
  943. mutex_lock(&priv->mcg_table.mutex);
  944. err = find_entry(dev, port, gid, prot,
  945. mailbox, &prev, &index);
  946. if (err)
  947. goto out;
  948. if (index == -1) {
  949. mlx4_err(dev, "MGID %pI6 not found\n", gid);
  950. err = -EINVAL;
  951. goto out;
  952. }
  953. /* if this pq is also a promisc qp, it shouldn't be removed */
  954. if (prot == MLX4_PROT_ETH &&
  955. check_duplicate_entry(dev, port, steer, index, qp->qpn))
  956. goto out;
  957. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  958. for (loc = -1, i = 0; i < members_count; ++i)
  959. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
  960. loc = i;
  961. if (loc == -1) {
  962. mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
  963. err = -EINVAL;
  964. goto out;
  965. }
  966. mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
  967. mgm->qp[loc] = mgm->qp[i - 1];
  968. mgm->qp[i - 1] = 0;
  969. if (prot == MLX4_PROT_ETH)
  970. removed_entry = can_remove_steering_entry(dev, port, steer,
  971. index, qp->qpn);
  972. if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
  973. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  974. goto out;
  975. }
  976. /* We are going to delete the entry, members count should be 0 */
  977. mgm->members_count = cpu_to_be32((u32) prot << 30);
  978. if (prev == -1) {
  979. /* Remove entry from MGM */
  980. int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
  981. if (amgm_index) {
  982. err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
  983. if (err)
  984. goto out;
  985. } else
  986. memset(mgm->gid, 0, 16);
  987. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  988. if (err)
  989. goto out;
  990. if (amgm_index) {
  991. if (amgm_index < dev->caps.num_mgms)
  992. mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
  993. index, amgm_index, dev->caps.num_mgms);
  994. else
  995. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  996. amgm_index - dev->caps.num_mgms);
  997. }
  998. } else {
  999. /* Remove entry from AMGM */
  1000. int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
  1001. err = mlx4_READ_ENTRY(dev, prev, mailbox);
  1002. if (err)
  1003. goto out;
  1004. mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
  1005. err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
  1006. if (err)
  1007. goto out;
  1008. if (index < dev->caps.num_mgms)
  1009. mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
  1010. prev, index, dev->caps.num_mgms);
  1011. else
  1012. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  1013. index - dev->caps.num_mgms);
  1014. }
  1015. out:
  1016. mutex_unlock(&priv->mcg_table.mutex);
  1017. mlx4_free_cmd_mailbox(dev, mailbox);
  1018. return err;
  1019. }
  1020. static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
  1021. u8 gid[16], u8 attach, u8 block_loopback,
  1022. enum mlx4_protocol prot)
  1023. {
  1024. struct mlx4_cmd_mailbox *mailbox;
  1025. int err = 0;
  1026. int qpn;
  1027. if (!mlx4_is_mfunc(dev))
  1028. return -EBADF;
  1029. mailbox = mlx4_alloc_cmd_mailbox(dev);
  1030. if (IS_ERR(mailbox))
  1031. return PTR_ERR(mailbox);
  1032. memcpy(mailbox->buf, gid, 16);
  1033. qpn = qp->qpn;
  1034. qpn |= (prot << 28);
  1035. if (attach && block_loopback)
  1036. qpn |= (1 << 31);
  1037. err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
  1038. MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
  1039. MLX4_CMD_WRAPPED);
  1040. mlx4_free_cmd_mailbox(dev, mailbox);
  1041. return err;
  1042. }
  1043. int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  1044. u8 port, int block_mcast_loopback,
  1045. enum mlx4_protocol prot, u64 *reg_id)
  1046. {
  1047. switch (dev->caps.steering_mode) {
  1048. case MLX4_STEERING_MODE_A0:
  1049. if (prot == MLX4_PROT_ETH)
  1050. return 0;
  1051. case MLX4_STEERING_MODE_B0:
  1052. if (prot == MLX4_PROT_ETH)
  1053. gid[7] |= (MLX4_MC_STEER << 1);
  1054. if (mlx4_is_mfunc(dev))
  1055. return mlx4_QP_ATTACH(dev, qp, gid, 1,
  1056. block_mcast_loopback, prot);
  1057. return mlx4_qp_attach_common(dev, qp, gid,
  1058. block_mcast_loopback, prot,
  1059. MLX4_MC_STEER);
  1060. case MLX4_STEERING_MODE_DEVICE_MANAGED: {
  1061. struct mlx4_spec_list spec = { {NULL} };
  1062. __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
  1063. struct mlx4_net_trans_rule rule = {
  1064. .queue_mode = MLX4_NET_TRANS_Q_FIFO,
  1065. .exclusive = 0,
  1066. .promisc_mode = MLX4_FS_PROMISC_NONE,
  1067. .priority = MLX4_DOMAIN_NIC,
  1068. };
  1069. rule.allow_loopback = ~block_mcast_loopback;
  1070. rule.port = port;
  1071. rule.qpn = qp->qpn;
  1072. INIT_LIST_HEAD(&rule.list);
  1073. switch (prot) {
  1074. case MLX4_PROT_ETH:
  1075. spec.id = MLX4_NET_TRANS_RULE_ID_ETH;
  1076. memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN);
  1077. memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
  1078. break;
  1079. case MLX4_PROT_IB_IPV6:
  1080. spec.id = MLX4_NET_TRANS_RULE_ID_IB;
  1081. memcpy(spec.ib.dst_gid, gid, 16);
  1082. memset(&spec.ib.dst_gid_msk, 0xff, 16);
  1083. break;
  1084. default:
  1085. return -EINVAL;
  1086. }
  1087. list_add_tail(&spec.list, &rule.list);
  1088. return mlx4_flow_attach(dev, &rule, reg_id);
  1089. }
  1090. default:
  1091. return -EINVAL;
  1092. }
  1093. }
  1094. EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
  1095. int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  1096. enum mlx4_protocol prot, u64 reg_id)
  1097. {
  1098. switch (dev->caps.steering_mode) {
  1099. case MLX4_STEERING_MODE_A0:
  1100. if (prot == MLX4_PROT_ETH)
  1101. return 0;
  1102. case MLX4_STEERING_MODE_B0:
  1103. if (prot == MLX4_PROT_ETH)
  1104. gid[7] |= (MLX4_MC_STEER << 1);
  1105. if (mlx4_is_mfunc(dev))
  1106. return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
  1107. return mlx4_qp_detach_common(dev, qp, gid, prot,
  1108. MLX4_MC_STEER);
  1109. case MLX4_STEERING_MODE_DEVICE_MANAGED:
  1110. return mlx4_flow_detach(dev, reg_id);
  1111. default:
  1112. return -EINVAL;
  1113. }
  1114. }
  1115. EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
  1116. int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
  1117. u32 qpn, enum mlx4_net_trans_promisc_mode mode)
  1118. {
  1119. struct mlx4_net_trans_rule rule;
  1120. u64 *regid_p;
  1121. switch (mode) {
  1122. case MLX4_FS_PROMISC_UPLINK:
  1123. case MLX4_FS_PROMISC_FUNCTION_PORT:
  1124. regid_p = &dev->regid_promisc_array[port];
  1125. break;
  1126. case MLX4_FS_PROMISC_ALL_MULTI:
  1127. regid_p = &dev->regid_allmulti_array[port];
  1128. break;
  1129. default:
  1130. return -1;
  1131. }
  1132. if (*regid_p != 0)
  1133. return -1;
  1134. rule.promisc_mode = mode;
  1135. rule.port = port;
  1136. rule.qpn = qpn;
  1137. INIT_LIST_HEAD(&rule.list);
  1138. mlx4_err(dev, "going promisc on %x\n", port);
  1139. return mlx4_flow_attach(dev, &rule, regid_p);
  1140. }
  1141. EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add);
  1142. int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
  1143. enum mlx4_net_trans_promisc_mode mode)
  1144. {
  1145. int ret;
  1146. u64 *regid_p;
  1147. switch (mode) {
  1148. case MLX4_FS_PROMISC_UPLINK:
  1149. case MLX4_FS_PROMISC_FUNCTION_PORT:
  1150. regid_p = &dev->regid_promisc_array[port];
  1151. break;
  1152. case MLX4_FS_PROMISC_ALL_MULTI:
  1153. regid_p = &dev->regid_allmulti_array[port];
  1154. break;
  1155. default:
  1156. return -1;
  1157. }
  1158. if (*regid_p == 0)
  1159. return -1;
  1160. ret = mlx4_flow_detach(dev, *regid_p);
  1161. if (ret == 0)
  1162. *regid_p = 0;
  1163. return ret;
  1164. }
  1165. EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove);
  1166. int mlx4_unicast_attach(struct mlx4_dev *dev,
  1167. struct mlx4_qp *qp, u8 gid[16],
  1168. int block_mcast_loopback, enum mlx4_protocol prot)
  1169. {
  1170. if (prot == MLX4_PROT_ETH)
  1171. gid[7] |= (MLX4_UC_STEER << 1);
  1172. if (mlx4_is_mfunc(dev))
  1173. return mlx4_QP_ATTACH(dev, qp, gid, 1,
  1174. block_mcast_loopback, prot);
  1175. return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
  1176. prot, MLX4_UC_STEER);
  1177. }
  1178. EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
  1179. int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
  1180. u8 gid[16], enum mlx4_protocol prot)
  1181. {
  1182. if (prot == MLX4_PROT_ETH)
  1183. gid[7] |= (MLX4_UC_STEER << 1);
  1184. if (mlx4_is_mfunc(dev))
  1185. return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
  1186. return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
  1187. }
  1188. EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
  1189. int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
  1190. struct mlx4_vhcr *vhcr,
  1191. struct mlx4_cmd_mailbox *inbox,
  1192. struct mlx4_cmd_mailbox *outbox,
  1193. struct mlx4_cmd_info *cmd)
  1194. {
  1195. u32 qpn = (u32) vhcr->in_param & 0xffffffff;
  1196. u8 port = vhcr->in_param >> 62;
  1197. enum mlx4_steer_type steer = vhcr->in_modifier;
  1198. /* Promiscuous unicast is not allowed in mfunc */
  1199. if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
  1200. return 0;
  1201. if (vhcr->op_modifier)
  1202. return add_promisc_qp(dev, port, steer, qpn);
  1203. else
  1204. return remove_promisc_qp(dev, port, steer, qpn);
  1205. }
  1206. static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
  1207. enum mlx4_steer_type steer, u8 add, u8 port)
  1208. {
  1209. return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
  1210. MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
  1211. MLX4_CMD_WRAPPED);
  1212. }
  1213. int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
  1214. {
  1215. if (mlx4_is_mfunc(dev))
  1216. return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
  1217. return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
  1218. }
  1219. EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
  1220. int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
  1221. {
  1222. if (mlx4_is_mfunc(dev))
  1223. return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
  1224. return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
  1225. }
  1226. EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
  1227. int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
  1228. {
  1229. if (mlx4_is_mfunc(dev))
  1230. return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
  1231. return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
  1232. }
  1233. EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
  1234. int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
  1235. {
  1236. if (mlx4_is_mfunc(dev))
  1237. return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
  1238. return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
  1239. }
  1240. EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
  1241. int mlx4_init_mcg_table(struct mlx4_dev *dev)
  1242. {
  1243. struct mlx4_priv *priv = mlx4_priv(dev);
  1244. int err;
  1245. /* No need for mcg_table when fw managed the mcg table*/
  1246. if (dev->caps.steering_mode ==
  1247. MLX4_STEERING_MODE_DEVICE_MANAGED)
  1248. return 0;
  1249. err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
  1250. dev->caps.num_amgms - 1, 0, 0);
  1251. if (err)
  1252. return err;
  1253. mutex_init(&priv->mcg_table.mutex);
  1254. return 0;
  1255. }
  1256. void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
  1257. {
  1258. if (dev->caps.steering_mode !=
  1259. MLX4_STEERING_MODE_DEVICE_MANAGED)
  1260. mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
  1261. }