mcg.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058
  1. /*
  2. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
  3. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/string.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/mlx4/cmd.h>
  36. #include <linux/export.h>
  37. #include "mlx4.h"
  38. #define MGM_QPN_MASK 0x00FFFFFF
  39. #define MGM_BLCK_LB_BIT 30
  40. static const u8 zero_gid[16]; /* automatically initialized to 0 */
  41. struct mlx4_mgm {
  42. __be32 next_gid_index;
  43. __be32 members_count;
  44. u32 reserved[2];
  45. u8 gid[16];
  46. __be32 qp[MLX4_MAX_QP_PER_MGM];
  47. };
  48. int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
  49. {
  50. return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE);
  51. }
  52. int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
  53. {
  54. return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
  55. }
  56. static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev,
  57. struct mlx4_cmd_mailbox *mailbox,
  58. u32 size,
  59. u64 *reg_id)
  60. {
  61. u64 imm;
  62. int err = 0;
  63. err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
  64. MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
  65. MLX4_CMD_NATIVE);
  66. if (err)
  67. return err;
  68. *reg_id = imm;
  69. return err;
  70. }
  71. static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid)
  72. {
  73. int err = 0;
  74. err = mlx4_cmd(dev, regid, 0, 0,
  75. MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
  76. MLX4_CMD_NATIVE);
  77. return err;
  78. }
  79. static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
  80. struct mlx4_cmd_mailbox *mailbox)
  81. {
  82. return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
  83. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  84. }
  85. static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
  86. struct mlx4_cmd_mailbox *mailbox)
  87. {
  88. return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
  89. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  90. }
  91. static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
  92. struct mlx4_cmd_mailbox *mailbox)
  93. {
  94. u32 in_mod;
  95. in_mod = (u32) port << 16 | steer << 1;
  96. return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
  97. MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
  98. MLX4_CMD_NATIVE);
  99. }
  100. static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  101. u16 *hash, u8 op_mod)
  102. {
  103. u64 imm;
  104. int err;
  105. err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
  106. MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
  107. MLX4_CMD_NATIVE);
  108. if (!err)
  109. *hash = imm;
  110. return err;
  111. }
  112. static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 pf_num,
  113. enum mlx4_steer_type steer,
  114. u32 qpn)
  115. {
  116. struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[pf_num];
  117. struct mlx4_promisc_qp *pqp;
  118. list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
  119. if (pqp->qpn == qpn)
  120. return pqp;
  121. }
  122. /* not found */
  123. return NULL;
  124. }
  125. /*
  126. * Add new entry to steering data structure.
  127. * All promisc QPs should be added as well
  128. */
  129. static int new_steering_entry(struct mlx4_dev *dev, u8 port,
  130. enum mlx4_steer_type steer,
  131. unsigned int index, u32 qpn)
  132. {
  133. struct mlx4_steer *s_steer;
  134. struct mlx4_cmd_mailbox *mailbox;
  135. struct mlx4_mgm *mgm;
  136. u32 members_count;
  137. struct mlx4_steer_index *new_entry;
  138. struct mlx4_promisc_qp *pqp;
  139. struct mlx4_promisc_qp *dqp = NULL;
  140. u32 prot;
  141. int err;
  142. s_steer = &mlx4_priv(dev)->steer[port - 1];
  143. new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
  144. if (!new_entry)
  145. return -ENOMEM;
  146. INIT_LIST_HEAD(&new_entry->duplicates);
  147. new_entry->index = index;
  148. list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
  149. /* If the given qpn is also a promisc qp,
  150. * it should be inserted to duplicates list
  151. */
  152. pqp = get_promisc_qp(dev, 0, steer, qpn);
  153. if (pqp) {
  154. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  155. if (!dqp) {
  156. err = -ENOMEM;
  157. goto out_alloc;
  158. }
  159. dqp->qpn = qpn;
  160. list_add_tail(&dqp->list, &new_entry->duplicates);
  161. }
  162. /* if no promisc qps for this vep, we are done */
  163. if (list_empty(&s_steer->promisc_qps[steer]))
  164. return 0;
  165. /* now need to add all the promisc qps to the new
  166. * steering entry, as they should also receive the packets
  167. * destined to this address */
  168. mailbox = mlx4_alloc_cmd_mailbox(dev);
  169. if (IS_ERR(mailbox)) {
  170. err = -ENOMEM;
  171. goto out_alloc;
  172. }
  173. mgm = mailbox->buf;
  174. err = mlx4_READ_ENTRY(dev, index, mailbox);
  175. if (err)
  176. goto out_mailbox;
  177. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  178. prot = be32_to_cpu(mgm->members_count) >> 30;
  179. list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
  180. /* don't add already existing qpn */
  181. if (pqp->qpn == qpn)
  182. continue;
  183. if (members_count == dev->caps.num_qp_per_mgm) {
  184. /* out of space */
  185. err = -ENOMEM;
  186. goto out_mailbox;
  187. }
  188. /* add the qpn */
  189. mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
  190. }
  191. /* update the qps count and update the entry with all the promisc qps*/
  192. mgm->members_count = cpu_to_be32(members_count | (prot << 30));
  193. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  194. out_mailbox:
  195. mlx4_free_cmd_mailbox(dev, mailbox);
  196. if (!err)
  197. return 0;
  198. out_alloc:
  199. if (dqp) {
  200. list_del(&dqp->list);
  201. kfree(dqp);
  202. }
  203. list_del(&new_entry->list);
  204. kfree(new_entry);
  205. return err;
  206. }
  207. /* update the data structures with existing steering entry */
  208. static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
  209. enum mlx4_steer_type steer,
  210. unsigned int index, u32 qpn)
  211. {
  212. struct mlx4_steer *s_steer;
  213. struct mlx4_steer_index *tmp_entry, *entry = NULL;
  214. struct mlx4_promisc_qp *pqp;
  215. struct mlx4_promisc_qp *dqp;
  216. s_steer = &mlx4_priv(dev)->steer[port - 1];
  217. pqp = get_promisc_qp(dev, 0, steer, qpn);
  218. if (!pqp)
  219. return 0; /* nothing to do */
  220. list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
  221. if (tmp_entry->index == index) {
  222. entry = tmp_entry;
  223. break;
  224. }
  225. }
  226. if (unlikely(!entry)) {
  227. mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
  228. return -EINVAL;
  229. }
  230. /* the given qpn is listed as a promisc qpn
  231. * we need to add it as a duplicate to this entry
  232. * for future references */
  233. list_for_each_entry(dqp, &entry->duplicates, list) {
  234. if (qpn == pqp->qpn)
  235. return 0; /* qp is already duplicated */
  236. }
  237. /* add the qp as a duplicate on this index */
  238. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  239. if (!dqp)
  240. return -ENOMEM;
  241. dqp->qpn = qpn;
  242. list_add_tail(&dqp->list, &entry->duplicates);
  243. return 0;
  244. }
  245. /* Check whether a qpn is a duplicate on steering entry
  246. * If so, it should not be removed from mgm */
  247. static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
  248. enum mlx4_steer_type steer,
  249. unsigned int index, u32 qpn)
  250. {
  251. struct mlx4_steer *s_steer;
  252. struct mlx4_steer_index *tmp_entry, *entry = NULL;
  253. struct mlx4_promisc_qp *dqp, *tmp_dqp;
  254. s_steer = &mlx4_priv(dev)->steer[port - 1];
  255. /* if qp is not promisc, it cannot be duplicated */
  256. if (!get_promisc_qp(dev, 0, steer, qpn))
  257. return false;
  258. /* The qp is promisc qp so it is a duplicate on this index
  259. * Find the index entry, and remove the duplicate */
  260. list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
  261. if (tmp_entry->index == index) {
  262. entry = tmp_entry;
  263. break;
  264. }
  265. }
  266. if (unlikely(!entry)) {
  267. mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
  268. return false;
  269. }
  270. list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
  271. if (dqp->qpn == qpn) {
  272. list_del(&dqp->list);
  273. kfree(dqp);
  274. }
  275. }
  276. return true;
  277. }
  278. /* I a steering entry contains only promisc QPs, it can be removed. */
  279. static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
  280. enum mlx4_steer_type steer,
  281. unsigned int index, u32 tqpn)
  282. {
  283. struct mlx4_steer *s_steer;
  284. struct mlx4_cmd_mailbox *mailbox;
  285. struct mlx4_mgm *mgm;
  286. struct mlx4_steer_index *entry = NULL, *tmp_entry;
  287. u32 qpn;
  288. u32 members_count;
  289. bool ret = false;
  290. int i;
  291. s_steer = &mlx4_priv(dev)->steer[port - 1];
  292. mailbox = mlx4_alloc_cmd_mailbox(dev);
  293. if (IS_ERR(mailbox))
  294. return false;
  295. mgm = mailbox->buf;
  296. if (mlx4_READ_ENTRY(dev, index, mailbox))
  297. goto out;
  298. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  299. for (i = 0; i < members_count; i++) {
  300. qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
  301. if (!get_promisc_qp(dev, 0, steer, qpn) && qpn != tqpn) {
  302. /* the qp is not promisc, the entry can't be removed */
  303. goto out;
  304. }
  305. }
  306. /* All the qps currently registered for this entry are promiscuous,
  307. * Checking for duplicates */
  308. ret = true;
  309. list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
  310. if (entry->index == index) {
  311. if (list_empty(&entry->duplicates)) {
  312. list_del(&entry->list);
  313. kfree(entry);
  314. } else {
  315. /* This entry contains duplicates so it shouldn't be removed */
  316. ret = false;
  317. goto out;
  318. }
  319. }
  320. }
  321. out:
  322. mlx4_free_cmd_mailbox(dev, mailbox);
  323. return ret;
  324. }
  325. static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
  326. enum mlx4_steer_type steer, u32 qpn)
  327. {
  328. struct mlx4_steer *s_steer;
  329. struct mlx4_cmd_mailbox *mailbox;
  330. struct mlx4_mgm *mgm;
  331. struct mlx4_steer_index *entry;
  332. struct mlx4_promisc_qp *pqp;
  333. struct mlx4_promisc_qp *dqp;
  334. u32 members_count;
  335. u32 prot;
  336. int i;
  337. bool found;
  338. int err;
  339. struct mlx4_priv *priv = mlx4_priv(dev);
  340. s_steer = &mlx4_priv(dev)->steer[port - 1];
  341. mutex_lock(&priv->mcg_table.mutex);
  342. if (get_promisc_qp(dev, 0, steer, qpn)) {
  343. err = 0; /* Noting to do, already exists */
  344. goto out_mutex;
  345. }
  346. pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
  347. if (!pqp) {
  348. err = -ENOMEM;
  349. goto out_mutex;
  350. }
  351. pqp->qpn = qpn;
  352. mailbox = mlx4_alloc_cmd_mailbox(dev);
  353. if (IS_ERR(mailbox)) {
  354. err = -ENOMEM;
  355. goto out_alloc;
  356. }
  357. mgm = mailbox->buf;
  358. /* the promisc qp needs to be added for each one of the steering
  359. * entries, if it already exists, needs to be added as a duplicate
  360. * for this entry */
  361. list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
  362. err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
  363. if (err)
  364. goto out_mailbox;
  365. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  366. prot = be32_to_cpu(mgm->members_count) >> 30;
  367. found = false;
  368. for (i = 0; i < members_count; i++) {
  369. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
  370. /* Entry already exists, add to duplicates */
  371. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  372. if (!dqp)
  373. goto out_mailbox;
  374. dqp->qpn = qpn;
  375. list_add_tail(&dqp->list, &entry->duplicates);
  376. found = true;
  377. }
  378. }
  379. if (!found) {
  380. /* Need to add the qpn to mgm */
  381. if (members_count == dev->caps.num_qp_per_mgm) {
  382. /* entry is full */
  383. err = -ENOMEM;
  384. goto out_mailbox;
  385. }
  386. mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
  387. mgm->members_count = cpu_to_be32(members_count | (prot << 30));
  388. err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
  389. if (err)
  390. goto out_mailbox;
  391. }
  392. }
  393. /* add the new qpn to list of promisc qps */
  394. list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
  395. /* now need to add all the promisc qps to default entry */
  396. memset(mgm, 0, sizeof *mgm);
  397. members_count = 0;
  398. list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
  399. mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
  400. mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
  401. err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
  402. if (err)
  403. goto out_list;
  404. mlx4_free_cmd_mailbox(dev, mailbox);
  405. mutex_unlock(&priv->mcg_table.mutex);
  406. return 0;
  407. out_list:
  408. list_del(&pqp->list);
  409. out_mailbox:
  410. mlx4_free_cmd_mailbox(dev, mailbox);
  411. out_alloc:
  412. kfree(pqp);
  413. out_mutex:
  414. mutex_unlock(&priv->mcg_table.mutex);
  415. return err;
  416. }
  417. static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
  418. enum mlx4_steer_type steer, u32 qpn)
  419. {
  420. struct mlx4_priv *priv = mlx4_priv(dev);
  421. struct mlx4_steer *s_steer;
  422. struct mlx4_cmd_mailbox *mailbox;
  423. struct mlx4_mgm *mgm;
  424. struct mlx4_steer_index *entry;
  425. struct mlx4_promisc_qp *pqp;
  426. struct mlx4_promisc_qp *dqp;
  427. u32 members_count;
  428. bool found;
  429. bool back_to_list = false;
  430. int loc, i;
  431. int err;
  432. s_steer = &mlx4_priv(dev)->steer[port - 1];
  433. mutex_lock(&priv->mcg_table.mutex);
  434. pqp = get_promisc_qp(dev, 0, steer, qpn);
  435. if (unlikely(!pqp)) {
  436. mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
  437. /* nothing to do */
  438. err = 0;
  439. goto out_mutex;
  440. }
  441. /*remove from list of promisc qps */
  442. list_del(&pqp->list);
  443. /* set the default entry not to include the removed one */
  444. mailbox = mlx4_alloc_cmd_mailbox(dev);
  445. if (IS_ERR(mailbox)) {
  446. err = -ENOMEM;
  447. back_to_list = true;
  448. goto out_list;
  449. }
  450. mgm = mailbox->buf;
  451. memset(mgm, 0, sizeof *mgm);
  452. members_count = 0;
  453. list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
  454. mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
  455. mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
  456. err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
  457. if (err)
  458. goto out_mailbox;
  459. /* remove the qp from all the steering entries*/
  460. list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
  461. found = false;
  462. list_for_each_entry(dqp, &entry->duplicates, list) {
  463. if (dqp->qpn == qpn) {
  464. found = true;
  465. break;
  466. }
  467. }
  468. if (found) {
  469. /* a duplicate, no need to change the mgm,
  470. * only update the duplicates list */
  471. list_del(&dqp->list);
  472. kfree(dqp);
  473. } else {
  474. err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
  475. if (err)
  476. goto out_mailbox;
  477. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  478. for (loc = -1, i = 0; i < members_count; ++i)
  479. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
  480. loc = i;
  481. mgm->members_count = cpu_to_be32(--members_count |
  482. (MLX4_PROT_ETH << 30));
  483. mgm->qp[loc] = mgm->qp[i - 1];
  484. mgm->qp[i - 1] = 0;
  485. err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
  486. if (err)
  487. goto out_mailbox;
  488. }
  489. }
  490. out_mailbox:
  491. mlx4_free_cmd_mailbox(dev, mailbox);
  492. out_list:
  493. if (back_to_list)
  494. list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
  495. else
  496. kfree(pqp);
  497. out_mutex:
  498. mutex_unlock(&priv->mcg_table.mutex);
  499. return err;
  500. }
  501. /*
  502. * Caller must hold MCG table semaphore. gid and mgm parameters must
  503. * be properly aligned for command interface.
  504. *
  505. * Returns 0 unless a firmware command error occurs.
  506. *
  507. * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
  508. * and *mgm holds MGM entry.
  509. *
  510. * if GID is found in AMGM, *index = index in AMGM, *prev = index of
  511. * previous entry in hash chain and *mgm holds AMGM entry.
  512. *
  513. * If no AMGM exists for given gid, *index = -1, *prev = index of last
  514. * entry in hash chain and *mgm holds end of hash chain.
  515. */
  516. static int find_entry(struct mlx4_dev *dev, u8 port,
  517. u8 *gid, enum mlx4_protocol prot,
  518. struct mlx4_cmd_mailbox *mgm_mailbox,
  519. int *prev, int *index)
  520. {
  521. struct mlx4_cmd_mailbox *mailbox;
  522. struct mlx4_mgm *mgm = mgm_mailbox->buf;
  523. u8 *mgid;
  524. int err;
  525. u16 hash;
  526. u8 op_mod = (prot == MLX4_PROT_ETH) ?
  527. !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
  528. mailbox = mlx4_alloc_cmd_mailbox(dev);
  529. if (IS_ERR(mailbox))
  530. return -ENOMEM;
  531. mgid = mailbox->buf;
  532. memcpy(mgid, gid, 16);
  533. err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod);
  534. mlx4_free_cmd_mailbox(dev, mailbox);
  535. if (err)
  536. return err;
  537. if (0)
  538. mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash);
  539. *index = hash;
  540. *prev = -1;
  541. do {
  542. err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
  543. if (err)
  544. return err;
  545. if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
  546. if (*index != hash) {
  547. mlx4_err(dev, "Found zero MGID in AMGM.\n");
  548. err = -EINVAL;
  549. }
  550. return err;
  551. }
  552. if (!memcmp(mgm->gid, gid, 16) &&
  553. be32_to_cpu(mgm->members_count) >> 30 == prot)
  554. return err;
  555. *prev = *index;
  556. *index = be32_to_cpu(mgm->next_gid_index) >> 6;
  557. } while (*index);
  558. *index = -1;
  559. return err;
  560. }
  561. int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  562. int block_mcast_loopback, enum mlx4_protocol prot,
  563. enum mlx4_steer_type steer)
  564. {
  565. struct mlx4_priv *priv = mlx4_priv(dev);
  566. struct mlx4_cmd_mailbox *mailbox;
  567. struct mlx4_mgm *mgm;
  568. u32 members_count;
  569. int index, prev;
  570. int link = 0;
  571. int i;
  572. int err;
  573. u8 port = gid[5];
  574. u8 new_entry = 0;
  575. mailbox = mlx4_alloc_cmd_mailbox(dev);
  576. if (IS_ERR(mailbox))
  577. return PTR_ERR(mailbox);
  578. mgm = mailbox->buf;
  579. mutex_lock(&priv->mcg_table.mutex);
  580. err = find_entry(dev, port, gid, prot,
  581. mailbox, &prev, &index);
  582. if (err)
  583. goto out;
  584. if (index != -1) {
  585. if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
  586. new_entry = 1;
  587. memcpy(mgm->gid, gid, 16);
  588. }
  589. } else {
  590. link = 1;
  591. index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
  592. if (index == -1) {
  593. mlx4_err(dev, "No AMGM entries left\n");
  594. err = -ENOMEM;
  595. goto out;
  596. }
  597. index += dev->caps.num_mgms;
  598. new_entry = 1;
  599. memset(mgm, 0, sizeof *mgm);
  600. memcpy(mgm->gid, gid, 16);
  601. }
  602. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  603. if (members_count == dev->caps.num_qp_per_mgm) {
  604. mlx4_err(dev, "MGM at index %x is full.\n", index);
  605. err = -ENOMEM;
  606. goto out;
  607. }
  608. for (i = 0; i < members_count; ++i)
  609. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
  610. mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
  611. err = 0;
  612. goto out;
  613. }
  614. if (block_mcast_loopback)
  615. mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
  616. (1U << MGM_BLCK_LB_BIT));
  617. else
  618. mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
  619. mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
  620. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  621. if (err)
  622. goto out;
  623. if (!link)
  624. goto out;
  625. err = mlx4_READ_ENTRY(dev, prev, mailbox);
  626. if (err)
  627. goto out;
  628. mgm->next_gid_index = cpu_to_be32(index << 6);
  629. err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
  630. if (err)
  631. goto out;
  632. out:
  633. if (prot == MLX4_PROT_ETH) {
  634. /* manage the steering entry for promisc mode */
  635. if (new_entry)
  636. new_steering_entry(dev, port, steer, index, qp->qpn);
  637. else
  638. existing_steering_entry(dev, port, steer,
  639. index, qp->qpn);
  640. }
  641. if (err && link && index != -1) {
  642. if (index < dev->caps.num_mgms)
  643. mlx4_warn(dev, "Got AMGM index %d < %d",
  644. index, dev->caps.num_mgms);
  645. else
  646. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  647. index - dev->caps.num_mgms);
  648. }
  649. mutex_unlock(&priv->mcg_table.mutex);
  650. mlx4_free_cmd_mailbox(dev, mailbox);
  651. return err;
  652. }
  653. int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  654. enum mlx4_protocol prot, enum mlx4_steer_type steer)
  655. {
  656. struct mlx4_priv *priv = mlx4_priv(dev);
  657. struct mlx4_cmd_mailbox *mailbox;
  658. struct mlx4_mgm *mgm;
  659. u32 members_count;
  660. int prev, index;
  661. int i, loc;
  662. int err;
  663. u8 port = gid[5];
  664. bool removed_entry = false;
  665. mailbox = mlx4_alloc_cmd_mailbox(dev);
  666. if (IS_ERR(mailbox))
  667. return PTR_ERR(mailbox);
  668. mgm = mailbox->buf;
  669. mutex_lock(&priv->mcg_table.mutex);
  670. err = find_entry(dev, port, gid, prot,
  671. mailbox, &prev, &index);
  672. if (err)
  673. goto out;
  674. if (index == -1) {
  675. mlx4_err(dev, "MGID %pI6 not found\n", gid);
  676. err = -EINVAL;
  677. goto out;
  678. }
  679. /* if this pq is also a promisc qp, it shouldn't be removed */
  680. if (prot == MLX4_PROT_ETH &&
  681. check_duplicate_entry(dev, port, steer, index, qp->qpn))
  682. goto out;
  683. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  684. for (loc = -1, i = 0; i < members_count; ++i)
  685. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
  686. loc = i;
  687. if (loc == -1) {
  688. mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
  689. err = -EINVAL;
  690. goto out;
  691. }
  692. mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
  693. mgm->qp[loc] = mgm->qp[i - 1];
  694. mgm->qp[i - 1] = 0;
  695. if (prot == MLX4_PROT_ETH)
  696. removed_entry = can_remove_steering_entry(dev, port, steer,
  697. index, qp->qpn);
  698. if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
  699. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  700. goto out;
  701. }
  702. /* We are going to delete the entry, members count should be 0 */
  703. mgm->members_count = cpu_to_be32((u32) prot << 30);
  704. if (prev == -1) {
  705. /* Remove entry from MGM */
  706. int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
  707. if (amgm_index) {
  708. err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
  709. if (err)
  710. goto out;
  711. } else
  712. memset(mgm->gid, 0, 16);
  713. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  714. if (err)
  715. goto out;
  716. if (amgm_index) {
  717. if (amgm_index < dev->caps.num_mgms)
  718. mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
  719. index, amgm_index, dev->caps.num_mgms);
  720. else
  721. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  722. amgm_index - dev->caps.num_mgms);
  723. }
  724. } else {
  725. /* Remove entry from AMGM */
  726. int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
  727. err = mlx4_READ_ENTRY(dev, prev, mailbox);
  728. if (err)
  729. goto out;
  730. mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
  731. err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
  732. if (err)
  733. goto out;
  734. if (index < dev->caps.num_mgms)
  735. mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
  736. prev, index, dev->caps.num_mgms);
  737. else
  738. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  739. index - dev->caps.num_mgms);
  740. }
  741. out:
  742. mutex_unlock(&priv->mcg_table.mutex);
  743. mlx4_free_cmd_mailbox(dev, mailbox);
  744. return err;
  745. }
  746. static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
  747. u8 gid[16], u8 attach, u8 block_loopback,
  748. enum mlx4_protocol prot)
  749. {
  750. struct mlx4_cmd_mailbox *mailbox;
  751. int err = 0;
  752. int qpn;
  753. if (!mlx4_is_mfunc(dev))
  754. return -EBADF;
  755. mailbox = mlx4_alloc_cmd_mailbox(dev);
  756. if (IS_ERR(mailbox))
  757. return PTR_ERR(mailbox);
  758. memcpy(mailbox->buf, gid, 16);
  759. qpn = qp->qpn;
  760. qpn |= (prot << 28);
  761. if (attach && block_loopback)
  762. qpn |= (1 << 31);
  763. err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
  764. MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
  765. MLX4_CMD_WRAPPED);
  766. mlx4_free_cmd_mailbox(dev, mailbox);
  767. return err;
  768. }
  769. int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  770. int block_mcast_loopback, enum mlx4_protocol prot)
  771. {
  772. switch (dev->caps.steering_mode) {
  773. case MLX4_STEERING_MODE_A0:
  774. if (prot == MLX4_PROT_ETH)
  775. return 0;
  776. case MLX4_STEERING_MODE_B0:
  777. if (prot == MLX4_PROT_ETH)
  778. gid[7] |= (MLX4_MC_STEER << 1);
  779. if (mlx4_is_mfunc(dev))
  780. return mlx4_QP_ATTACH(dev, qp, gid, 1,
  781. block_mcast_loopback, prot);
  782. return mlx4_qp_attach_common(dev, qp, gid,
  783. block_mcast_loopback, prot,
  784. MLX4_MC_STEER);
  785. default:
  786. return -EINVAL;
  787. }
  788. }
  789. EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
  790. int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  791. enum mlx4_protocol prot)
  792. {
  793. switch (dev->caps.steering_mode) {
  794. case MLX4_STEERING_MODE_A0:
  795. if (prot == MLX4_PROT_ETH)
  796. return 0;
  797. case MLX4_STEERING_MODE_B0:
  798. if (prot == MLX4_PROT_ETH)
  799. gid[7] |= (MLX4_MC_STEER << 1);
  800. if (mlx4_is_mfunc(dev))
  801. return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
  802. return mlx4_qp_detach_common(dev, qp, gid, prot,
  803. MLX4_MC_STEER);
  804. default:
  805. return -EINVAL;
  806. }
  807. }
  808. EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
  809. int mlx4_unicast_attach(struct mlx4_dev *dev,
  810. struct mlx4_qp *qp, u8 gid[16],
  811. int block_mcast_loopback, enum mlx4_protocol prot)
  812. {
  813. if (prot == MLX4_PROT_ETH)
  814. gid[7] |= (MLX4_UC_STEER << 1);
  815. if (mlx4_is_mfunc(dev))
  816. return mlx4_QP_ATTACH(dev, qp, gid, 1,
  817. block_mcast_loopback, prot);
  818. return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
  819. prot, MLX4_UC_STEER);
  820. }
  821. EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
  822. int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
  823. u8 gid[16], enum mlx4_protocol prot)
  824. {
  825. if (prot == MLX4_PROT_ETH)
  826. gid[7] |= (MLX4_UC_STEER << 1);
  827. if (mlx4_is_mfunc(dev))
  828. return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
  829. return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
  830. }
  831. EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
  832. int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
  833. struct mlx4_vhcr *vhcr,
  834. struct mlx4_cmd_mailbox *inbox,
  835. struct mlx4_cmd_mailbox *outbox,
  836. struct mlx4_cmd_info *cmd)
  837. {
  838. u32 qpn = (u32) vhcr->in_param & 0xffffffff;
  839. u8 port = vhcr->in_param >> 62;
  840. enum mlx4_steer_type steer = vhcr->in_modifier;
  841. /* Promiscuous unicast is not allowed in mfunc */
  842. if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
  843. return 0;
  844. if (vhcr->op_modifier)
  845. return add_promisc_qp(dev, port, steer, qpn);
  846. else
  847. return remove_promisc_qp(dev, port, steer, qpn);
  848. }
  849. static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
  850. enum mlx4_steer_type steer, u8 add, u8 port)
  851. {
  852. return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
  853. MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
  854. MLX4_CMD_WRAPPED);
  855. }
  856. int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
  857. {
  858. if (mlx4_is_mfunc(dev))
  859. return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
  860. return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
  861. }
  862. EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
  863. int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
  864. {
  865. if (mlx4_is_mfunc(dev))
  866. return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
  867. return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
  868. }
  869. EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
  870. int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
  871. {
  872. if (mlx4_is_mfunc(dev))
  873. return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
  874. return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
  875. }
  876. EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
  877. int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
  878. {
  879. if (mlx4_is_mfunc(dev))
  880. return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
  881. return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
  882. }
  883. EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
  884. int mlx4_init_mcg_table(struct mlx4_dev *dev)
  885. {
  886. struct mlx4_priv *priv = mlx4_priv(dev);
  887. int err;
  888. err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
  889. dev->caps.num_amgms - 1, 0, 0);
  890. if (err)
  891. return err;
  892. mutex_init(&priv->mcg_table.mutex);
  893. return 0;
  894. }
  895. void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
  896. {
  897. mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
  898. }