mcg.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421
  1. /*
  2. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
  3. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/string.h>
  34. #include <linux/etherdevice.h>
  35. #include <linux/mlx4/cmd.h>
  36. #include <linux/export.h>
  37. #include "mlx4.h"
  38. static const u8 zero_gid[16]; /* automatically initialized to 0 */
  39. int mlx4_get_mgm_entry_size(struct mlx4_dev *dev)
  40. {
  41. return 1 << dev->oper_log_mgm_entry_size;
  42. }
  43. int mlx4_get_qp_per_mgm(struct mlx4_dev *dev)
  44. {
  45. return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2);
  46. }
  47. static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev,
  48. struct mlx4_cmd_mailbox *mailbox,
  49. u32 size,
  50. u64 *reg_id)
  51. {
  52. u64 imm;
  53. int err = 0;
  54. err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0,
  55. MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
  56. MLX4_CMD_NATIVE);
  57. if (err)
  58. return err;
  59. *reg_id = imm;
  60. return err;
  61. }
  62. static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid)
  63. {
  64. int err = 0;
  65. err = mlx4_cmd(dev, regid, 0, 0,
  66. MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
  67. MLX4_CMD_NATIVE);
  68. return err;
  69. }
  70. static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index,
  71. struct mlx4_cmd_mailbox *mailbox)
  72. {
  73. return mlx4_cmd_box(dev, 0, mailbox->dma, index, 0, MLX4_CMD_READ_MCG,
  74. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  75. }
  76. static int mlx4_WRITE_ENTRY(struct mlx4_dev *dev, int index,
  77. struct mlx4_cmd_mailbox *mailbox)
  78. {
  79. return mlx4_cmd(dev, mailbox->dma, index, 0, MLX4_CMD_WRITE_MCG,
  80. MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
  81. }
  82. static int mlx4_WRITE_PROMISC(struct mlx4_dev *dev, u8 port, u8 steer,
  83. struct mlx4_cmd_mailbox *mailbox)
  84. {
  85. u32 in_mod;
  86. in_mod = (u32) port << 16 | steer << 1;
  87. return mlx4_cmd(dev, mailbox->dma, in_mod, 0x1,
  88. MLX4_CMD_WRITE_MCG, MLX4_CMD_TIME_CLASS_A,
  89. MLX4_CMD_NATIVE);
  90. }
  91. static int mlx4_GID_HASH(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
  92. u16 *hash, u8 op_mod)
  93. {
  94. u64 imm;
  95. int err;
  96. err = mlx4_cmd_imm(dev, mailbox->dma, &imm, 0, op_mod,
  97. MLX4_CMD_MGID_HASH, MLX4_CMD_TIME_CLASS_A,
  98. MLX4_CMD_NATIVE);
  99. if (!err)
  100. *hash = imm;
  101. return err;
  102. }
  103. static struct mlx4_promisc_qp *get_promisc_qp(struct mlx4_dev *dev, u8 port,
  104. enum mlx4_steer_type steer,
  105. u32 qpn)
  106. {
  107. struct mlx4_steer *s_steer = &mlx4_priv(dev)->steer[port - 1];
  108. struct mlx4_promisc_qp *pqp;
  109. list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
  110. if (pqp->qpn == qpn)
  111. return pqp;
  112. }
  113. /* not found */
  114. return NULL;
  115. }
  116. /*
  117. * Add new entry to steering data structure.
  118. * All promisc QPs should be added as well
  119. */
  120. static int new_steering_entry(struct mlx4_dev *dev, u8 port,
  121. enum mlx4_steer_type steer,
  122. unsigned int index, u32 qpn)
  123. {
  124. struct mlx4_steer *s_steer;
  125. struct mlx4_cmd_mailbox *mailbox;
  126. struct mlx4_mgm *mgm;
  127. u32 members_count;
  128. struct mlx4_steer_index *new_entry;
  129. struct mlx4_promisc_qp *pqp;
  130. struct mlx4_promisc_qp *dqp = NULL;
  131. u32 prot;
  132. int err;
  133. s_steer = &mlx4_priv(dev)->steer[port - 1];
  134. new_entry = kzalloc(sizeof *new_entry, GFP_KERNEL);
  135. if (!new_entry)
  136. return -ENOMEM;
  137. INIT_LIST_HEAD(&new_entry->duplicates);
  138. new_entry->index = index;
  139. list_add_tail(&new_entry->list, &s_steer->steer_entries[steer]);
  140. /* If the given qpn is also a promisc qp,
  141. * it should be inserted to duplicates list
  142. */
  143. pqp = get_promisc_qp(dev, port, steer, qpn);
  144. if (pqp) {
  145. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  146. if (!dqp) {
  147. err = -ENOMEM;
  148. goto out_alloc;
  149. }
  150. dqp->qpn = qpn;
  151. list_add_tail(&dqp->list, &new_entry->duplicates);
  152. }
  153. /* if no promisc qps for this vep, we are done */
  154. if (list_empty(&s_steer->promisc_qps[steer]))
  155. return 0;
  156. /* now need to add all the promisc qps to the new
  157. * steering entry, as they should also receive the packets
  158. * destined to this address */
  159. mailbox = mlx4_alloc_cmd_mailbox(dev);
  160. if (IS_ERR(mailbox)) {
  161. err = -ENOMEM;
  162. goto out_alloc;
  163. }
  164. mgm = mailbox->buf;
  165. err = mlx4_READ_ENTRY(dev, index, mailbox);
  166. if (err)
  167. goto out_mailbox;
  168. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  169. prot = be32_to_cpu(mgm->members_count) >> 30;
  170. list_for_each_entry(pqp, &s_steer->promisc_qps[steer], list) {
  171. /* don't add already existing qpn */
  172. if (pqp->qpn == qpn)
  173. continue;
  174. if (members_count == dev->caps.num_qp_per_mgm) {
  175. /* out of space */
  176. err = -ENOMEM;
  177. goto out_mailbox;
  178. }
  179. /* add the qpn */
  180. mgm->qp[members_count++] = cpu_to_be32(pqp->qpn & MGM_QPN_MASK);
  181. }
  182. /* update the qps count and update the entry with all the promisc qps*/
  183. mgm->members_count = cpu_to_be32(members_count | (prot << 30));
  184. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  185. out_mailbox:
  186. mlx4_free_cmd_mailbox(dev, mailbox);
  187. if (!err)
  188. return 0;
  189. out_alloc:
  190. if (dqp) {
  191. list_del(&dqp->list);
  192. kfree(dqp);
  193. }
  194. list_del(&new_entry->list);
  195. kfree(new_entry);
  196. return err;
  197. }
  198. /* update the data structures with existing steering entry */
  199. static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
  200. enum mlx4_steer_type steer,
  201. unsigned int index, u32 qpn)
  202. {
  203. struct mlx4_steer *s_steer;
  204. struct mlx4_steer_index *tmp_entry, *entry = NULL;
  205. struct mlx4_promisc_qp *pqp;
  206. struct mlx4_promisc_qp *dqp;
  207. s_steer = &mlx4_priv(dev)->steer[port - 1];
  208. pqp = get_promisc_qp(dev, port, steer, qpn);
  209. if (!pqp)
  210. return 0; /* nothing to do */
  211. list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
  212. if (tmp_entry->index == index) {
  213. entry = tmp_entry;
  214. break;
  215. }
  216. }
  217. if (unlikely(!entry)) {
  218. mlx4_warn(dev, "Steering entry at index %x is not registered\n", index);
  219. return -EINVAL;
  220. }
  221. /* the given qpn is listed as a promisc qpn
  222. * we need to add it as a duplicate to this entry
  223. * for future references */
  224. list_for_each_entry(dqp, &entry->duplicates, list) {
  225. if (qpn == pqp->qpn)
  226. return 0; /* qp is already duplicated */
  227. }
  228. /* add the qp as a duplicate on this index */
  229. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  230. if (!dqp)
  231. return -ENOMEM;
  232. dqp->qpn = qpn;
  233. list_add_tail(&dqp->list, &entry->duplicates);
  234. return 0;
  235. }
  236. /* Check whether a qpn is a duplicate on steering entry
  237. * If so, it should not be removed from mgm */
  238. static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
  239. enum mlx4_steer_type steer,
  240. unsigned int index, u32 qpn)
  241. {
  242. struct mlx4_steer *s_steer;
  243. struct mlx4_steer_index *tmp_entry, *entry = NULL;
  244. struct mlx4_promisc_qp *dqp, *tmp_dqp;
  245. s_steer = &mlx4_priv(dev)->steer[port - 1];
  246. /* if qp is not promisc, it cannot be duplicated */
  247. if (!get_promisc_qp(dev, port, steer, qpn))
  248. return false;
  249. /* The qp is promisc qp so it is a duplicate on this index
  250. * Find the index entry, and remove the duplicate */
  251. list_for_each_entry(tmp_entry, &s_steer->steer_entries[steer], list) {
  252. if (tmp_entry->index == index) {
  253. entry = tmp_entry;
  254. break;
  255. }
  256. }
  257. if (unlikely(!entry)) {
  258. mlx4_warn(dev, "Steering entry for index %x is not registered\n", index);
  259. return false;
  260. }
  261. list_for_each_entry_safe(dqp, tmp_dqp, &entry->duplicates, list) {
  262. if (dqp->qpn == qpn) {
  263. list_del(&dqp->list);
  264. kfree(dqp);
  265. }
  266. }
  267. return true;
  268. }
  269. /* I a steering entry contains only promisc QPs, it can be removed. */
  270. static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
  271. enum mlx4_steer_type steer,
  272. unsigned int index, u32 tqpn)
  273. {
  274. struct mlx4_steer *s_steer;
  275. struct mlx4_cmd_mailbox *mailbox;
  276. struct mlx4_mgm *mgm;
  277. struct mlx4_steer_index *entry = NULL, *tmp_entry;
  278. u32 qpn;
  279. u32 members_count;
  280. bool ret = false;
  281. int i;
  282. s_steer = &mlx4_priv(dev)->steer[port - 1];
  283. mailbox = mlx4_alloc_cmd_mailbox(dev);
  284. if (IS_ERR(mailbox))
  285. return false;
  286. mgm = mailbox->buf;
  287. if (mlx4_READ_ENTRY(dev, index, mailbox))
  288. goto out;
  289. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  290. for (i = 0; i < members_count; i++) {
  291. qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
  292. if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) {
  293. /* the qp is not promisc, the entry can't be removed */
  294. goto out;
  295. }
  296. }
  297. /* All the qps currently registered for this entry are promiscuous,
  298. * Checking for duplicates */
  299. ret = true;
  300. list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
  301. if (entry->index == index) {
  302. if (list_empty(&entry->duplicates)) {
  303. list_del(&entry->list);
  304. kfree(entry);
  305. } else {
  306. /* This entry contains duplicates so it shouldn't be removed */
  307. ret = false;
  308. goto out;
  309. }
  310. }
  311. }
  312. out:
  313. mlx4_free_cmd_mailbox(dev, mailbox);
  314. return ret;
  315. }
  316. static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
  317. enum mlx4_steer_type steer, u32 qpn)
  318. {
  319. struct mlx4_steer *s_steer;
  320. struct mlx4_cmd_mailbox *mailbox;
  321. struct mlx4_mgm *mgm;
  322. struct mlx4_steer_index *entry;
  323. struct mlx4_promisc_qp *pqp;
  324. struct mlx4_promisc_qp *dqp;
  325. u32 members_count;
  326. u32 prot;
  327. int i;
  328. bool found;
  329. int err;
  330. struct mlx4_priv *priv = mlx4_priv(dev);
  331. s_steer = &mlx4_priv(dev)->steer[port - 1];
  332. mutex_lock(&priv->mcg_table.mutex);
  333. if (get_promisc_qp(dev, port, steer, qpn)) {
  334. err = 0; /* Noting to do, already exists */
  335. goto out_mutex;
  336. }
  337. pqp = kmalloc(sizeof *pqp, GFP_KERNEL);
  338. if (!pqp) {
  339. err = -ENOMEM;
  340. goto out_mutex;
  341. }
  342. pqp->qpn = qpn;
  343. mailbox = mlx4_alloc_cmd_mailbox(dev);
  344. if (IS_ERR(mailbox)) {
  345. err = -ENOMEM;
  346. goto out_alloc;
  347. }
  348. mgm = mailbox->buf;
  349. /* the promisc qp needs to be added for each one of the steering
  350. * entries, if it already exists, needs to be added as a duplicate
  351. * for this entry */
  352. list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
  353. err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
  354. if (err)
  355. goto out_mailbox;
  356. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  357. prot = be32_to_cpu(mgm->members_count) >> 30;
  358. found = false;
  359. for (i = 0; i < members_count; i++) {
  360. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
  361. /* Entry already exists, add to duplicates */
  362. dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
  363. if (!dqp) {
  364. err = -ENOMEM;
  365. goto out_mailbox;
  366. }
  367. dqp->qpn = qpn;
  368. list_add_tail(&dqp->list, &entry->duplicates);
  369. found = true;
  370. }
  371. }
  372. if (!found) {
  373. /* Need to add the qpn to mgm */
  374. if (members_count == dev->caps.num_qp_per_mgm) {
  375. /* entry is full */
  376. err = -ENOMEM;
  377. goto out_mailbox;
  378. }
  379. mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
  380. mgm->members_count = cpu_to_be32(members_count | (prot << 30));
  381. err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
  382. if (err)
  383. goto out_mailbox;
  384. }
  385. }
  386. /* add the new qpn to list of promisc qps */
  387. list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
  388. /* now need to add all the promisc qps to default entry */
  389. memset(mgm, 0, sizeof *mgm);
  390. members_count = 0;
  391. list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
  392. mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
  393. mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
  394. err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
  395. if (err)
  396. goto out_list;
  397. mlx4_free_cmd_mailbox(dev, mailbox);
  398. mutex_unlock(&priv->mcg_table.mutex);
  399. return 0;
  400. out_list:
  401. list_del(&pqp->list);
  402. out_mailbox:
  403. mlx4_free_cmd_mailbox(dev, mailbox);
  404. out_alloc:
  405. kfree(pqp);
  406. out_mutex:
  407. mutex_unlock(&priv->mcg_table.mutex);
  408. return err;
  409. }
  410. static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
  411. enum mlx4_steer_type steer, u32 qpn)
  412. {
  413. struct mlx4_priv *priv = mlx4_priv(dev);
  414. struct mlx4_steer *s_steer;
  415. struct mlx4_cmd_mailbox *mailbox;
  416. struct mlx4_mgm *mgm;
  417. struct mlx4_steer_index *entry;
  418. struct mlx4_promisc_qp *pqp;
  419. struct mlx4_promisc_qp *dqp;
  420. u32 members_count;
  421. bool found;
  422. bool back_to_list = false;
  423. int loc, i;
  424. int err;
  425. s_steer = &mlx4_priv(dev)->steer[port - 1];
  426. mutex_lock(&priv->mcg_table.mutex);
  427. pqp = get_promisc_qp(dev, port, steer, qpn);
  428. if (unlikely(!pqp)) {
  429. mlx4_warn(dev, "QP %x is not promiscuous QP\n", qpn);
  430. /* nothing to do */
  431. err = 0;
  432. goto out_mutex;
  433. }
  434. /*remove from list of promisc qps */
  435. list_del(&pqp->list);
  436. /* set the default entry not to include the removed one */
  437. mailbox = mlx4_alloc_cmd_mailbox(dev);
  438. if (IS_ERR(mailbox)) {
  439. err = -ENOMEM;
  440. back_to_list = true;
  441. goto out_list;
  442. }
  443. mgm = mailbox->buf;
  444. members_count = 0;
  445. list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
  446. mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
  447. mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
  448. err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
  449. if (err)
  450. goto out_mailbox;
  451. /* remove the qp from all the steering entries*/
  452. list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
  453. found = false;
  454. list_for_each_entry(dqp, &entry->duplicates, list) {
  455. if (dqp->qpn == qpn) {
  456. found = true;
  457. break;
  458. }
  459. }
  460. if (found) {
  461. /* a duplicate, no need to change the mgm,
  462. * only update the duplicates list */
  463. list_del(&dqp->list);
  464. kfree(dqp);
  465. } else {
  466. err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
  467. if (err)
  468. goto out_mailbox;
  469. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  470. for (loc = -1, i = 0; i < members_count; ++i)
  471. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
  472. loc = i;
  473. mgm->members_count = cpu_to_be32(--members_count |
  474. (MLX4_PROT_ETH << 30));
  475. mgm->qp[loc] = mgm->qp[i - 1];
  476. mgm->qp[i - 1] = 0;
  477. err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
  478. if (err)
  479. goto out_mailbox;
  480. }
  481. }
  482. out_mailbox:
  483. mlx4_free_cmd_mailbox(dev, mailbox);
  484. out_list:
  485. if (back_to_list)
  486. list_add_tail(&pqp->list, &s_steer->promisc_qps[steer]);
  487. else
  488. kfree(pqp);
  489. out_mutex:
  490. mutex_unlock(&priv->mcg_table.mutex);
  491. return err;
  492. }
  493. /*
  494. * Caller must hold MCG table semaphore. gid and mgm parameters must
  495. * be properly aligned for command interface.
  496. *
  497. * Returns 0 unless a firmware command error occurs.
  498. *
  499. * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1
  500. * and *mgm holds MGM entry.
  501. *
  502. * if GID is found in AMGM, *index = index in AMGM, *prev = index of
  503. * previous entry in hash chain and *mgm holds AMGM entry.
  504. *
  505. * If no AMGM exists for given gid, *index = -1, *prev = index of last
  506. * entry in hash chain and *mgm holds end of hash chain.
  507. */
  508. static int find_entry(struct mlx4_dev *dev, u8 port,
  509. u8 *gid, enum mlx4_protocol prot,
  510. struct mlx4_cmd_mailbox *mgm_mailbox,
  511. int *prev, int *index)
  512. {
  513. struct mlx4_cmd_mailbox *mailbox;
  514. struct mlx4_mgm *mgm = mgm_mailbox->buf;
  515. u8 *mgid;
  516. int err;
  517. u16 hash;
  518. u8 op_mod = (prot == MLX4_PROT_ETH) ?
  519. !!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) : 0;
  520. mailbox = mlx4_alloc_cmd_mailbox(dev);
  521. if (IS_ERR(mailbox))
  522. return -ENOMEM;
  523. mgid = mailbox->buf;
  524. memcpy(mgid, gid, 16);
  525. err = mlx4_GID_HASH(dev, mailbox, &hash, op_mod);
  526. mlx4_free_cmd_mailbox(dev, mailbox);
  527. if (err)
  528. return err;
  529. if (0)
  530. mlx4_dbg(dev, "Hash for %pI6 is %04x\n", gid, hash);
  531. *index = hash;
  532. *prev = -1;
  533. do {
  534. err = mlx4_READ_ENTRY(dev, *index, mgm_mailbox);
  535. if (err)
  536. return err;
  537. if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
  538. if (*index != hash) {
  539. mlx4_err(dev, "Found zero MGID in AMGM.\n");
  540. err = -EINVAL;
  541. }
  542. return err;
  543. }
  544. if (!memcmp(mgm->gid, gid, 16) &&
  545. be32_to_cpu(mgm->members_count) >> 30 == prot)
  546. return err;
  547. *prev = *index;
  548. *index = be32_to_cpu(mgm->next_gid_index) >> 6;
  549. } while (*index);
  550. *index = -1;
  551. return err;
  552. }
  553. static const u8 __promisc_mode[] = {
  554. [MLX4_FS_REGULAR] = 0x0,
  555. [MLX4_FS_ALL_DEFAULT] = 0x1,
  556. [MLX4_FS_MC_DEFAULT] = 0x3,
  557. [MLX4_FS_UC_SNIFFER] = 0x4,
  558. [MLX4_FS_MC_SNIFFER] = 0x5,
  559. };
  560. int mlx4_map_sw_to_hw_steering_mode(struct mlx4_dev *dev,
  561. enum mlx4_net_trans_promisc_mode flow_type)
  562. {
  563. if (flow_type >= MLX4_FS_MODE_NUM) {
  564. mlx4_err(dev, "Invalid flow type. type = %d\n", flow_type);
  565. return -EINVAL;
  566. }
  567. return __promisc_mode[flow_type];
  568. }
  569. EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_mode);
  570. static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl,
  571. struct mlx4_net_trans_rule_hw_ctrl *hw)
  572. {
  573. u8 flags = 0;
  574. flags = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0;
  575. flags |= ctrl->exclusive ? (1 << 2) : 0;
  576. flags |= ctrl->allow_loopback ? (1 << 3) : 0;
  577. hw->flags = flags;
  578. hw->type = __promisc_mode[ctrl->promisc_mode];
  579. hw->prio = cpu_to_be16(ctrl->priority);
  580. hw->port = ctrl->port;
  581. hw->qpn = cpu_to_be32(ctrl->qpn);
  582. }
  583. const u16 __sw_id_hw[] = {
  584. [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001,
  585. [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005,
  586. [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003,
  587. [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002,
  588. [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004,
  589. [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006
  590. };
  591. int mlx4_map_sw_to_hw_steering_id(struct mlx4_dev *dev,
  592. enum mlx4_net_trans_rule_id id)
  593. {
  594. if (id >= MLX4_NET_TRANS_RULE_NUM) {
  595. mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
  596. return -EINVAL;
  597. }
  598. return __sw_id_hw[id];
  599. }
  600. EXPORT_SYMBOL_GPL(mlx4_map_sw_to_hw_steering_id);
  601. static const int __rule_hw_sz[] = {
  602. [MLX4_NET_TRANS_RULE_ID_ETH] =
  603. sizeof(struct mlx4_net_trans_rule_hw_eth),
  604. [MLX4_NET_TRANS_RULE_ID_IB] =
  605. sizeof(struct mlx4_net_trans_rule_hw_ib),
  606. [MLX4_NET_TRANS_RULE_ID_IPV6] = 0,
  607. [MLX4_NET_TRANS_RULE_ID_IPV4] =
  608. sizeof(struct mlx4_net_trans_rule_hw_ipv4),
  609. [MLX4_NET_TRANS_RULE_ID_TCP] =
  610. sizeof(struct mlx4_net_trans_rule_hw_tcp_udp),
  611. [MLX4_NET_TRANS_RULE_ID_UDP] =
  612. sizeof(struct mlx4_net_trans_rule_hw_tcp_udp)
  613. };
  614. int mlx4_hw_rule_sz(struct mlx4_dev *dev,
  615. enum mlx4_net_trans_rule_id id)
  616. {
  617. if (id >= MLX4_NET_TRANS_RULE_NUM) {
  618. mlx4_err(dev, "Invalid network rule id. id = %d\n", id);
  619. return -EINVAL;
  620. }
  621. return __rule_hw_sz[id];
  622. }
  623. EXPORT_SYMBOL_GPL(mlx4_hw_rule_sz);
  624. static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec,
  625. struct _rule_hw *rule_hw)
  626. {
  627. if (mlx4_hw_rule_sz(dev, spec->id) < 0)
  628. return -EINVAL;
  629. memset(rule_hw, 0, mlx4_hw_rule_sz(dev, spec->id));
  630. rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]);
  631. rule_hw->size = mlx4_hw_rule_sz(dev, spec->id) >> 2;
  632. switch (spec->id) {
  633. case MLX4_NET_TRANS_RULE_ID_ETH:
  634. memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN);
  635. memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk,
  636. ETH_ALEN);
  637. memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN);
  638. memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk,
  639. ETH_ALEN);
  640. if (spec->eth.ether_type_enable) {
  641. rule_hw->eth.ether_type_enable = 1;
  642. rule_hw->eth.ether_type = spec->eth.ether_type;
  643. }
  644. rule_hw->eth.vlan_tag = spec->eth.vlan_id;
  645. rule_hw->eth.vlan_tag_msk = spec->eth.vlan_id_msk;
  646. break;
  647. case MLX4_NET_TRANS_RULE_ID_IB:
  648. rule_hw->ib.l3_qpn = spec->ib.l3_qpn;
  649. rule_hw->ib.qpn_mask = spec->ib.qpn_msk;
  650. memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16);
  651. memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16);
  652. break;
  653. case MLX4_NET_TRANS_RULE_ID_IPV6:
  654. return -EOPNOTSUPP;
  655. case MLX4_NET_TRANS_RULE_ID_IPV4:
  656. rule_hw->ipv4.src_ip = spec->ipv4.src_ip;
  657. rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk;
  658. rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip;
  659. rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk;
  660. break;
  661. case MLX4_NET_TRANS_RULE_ID_TCP:
  662. case MLX4_NET_TRANS_RULE_ID_UDP:
  663. rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port;
  664. rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk;
  665. rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port;
  666. rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk;
  667. break;
  668. default:
  669. return -EINVAL;
  670. }
  671. return __rule_hw_sz[spec->id];
  672. }
  673. static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
  674. struct mlx4_net_trans_rule *rule)
  675. {
  676. #define BUF_SIZE 256
  677. struct mlx4_spec_list *cur;
  678. char buf[BUF_SIZE];
  679. int len = 0;
  680. mlx4_err(dev, "%s", str);
  681. len += snprintf(buf + len, BUF_SIZE - len,
  682. "port = %d prio = 0x%x qp = 0x%x ",
  683. rule->port, rule->priority, rule->qpn);
  684. list_for_each_entry(cur, &rule->list, list) {
  685. switch (cur->id) {
  686. case MLX4_NET_TRANS_RULE_ID_ETH:
  687. len += snprintf(buf + len, BUF_SIZE - len,
  688. "dmac = %pM ", &cur->eth.dst_mac);
  689. if (cur->eth.ether_type)
  690. len += snprintf(buf + len, BUF_SIZE - len,
  691. "ethertype = 0x%x ",
  692. be16_to_cpu(cur->eth.ether_type));
  693. if (cur->eth.vlan_id)
  694. len += snprintf(buf + len, BUF_SIZE - len,
  695. "vlan-id = %d ",
  696. be16_to_cpu(cur->eth.vlan_id));
  697. break;
  698. case MLX4_NET_TRANS_RULE_ID_IPV4:
  699. if (cur->ipv4.src_ip)
  700. len += snprintf(buf + len, BUF_SIZE - len,
  701. "src-ip = %pI4 ",
  702. &cur->ipv4.src_ip);
  703. if (cur->ipv4.dst_ip)
  704. len += snprintf(buf + len, BUF_SIZE - len,
  705. "dst-ip = %pI4 ",
  706. &cur->ipv4.dst_ip);
  707. break;
  708. case MLX4_NET_TRANS_RULE_ID_TCP:
  709. case MLX4_NET_TRANS_RULE_ID_UDP:
  710. if (cur->tcp_udp.src_port)
  711. len += snprintf(buf + len, BUF_SIZE - len,
  712. "src-port = %d ",
  713. be16_to_cpu(cur->tcp_udp.src_port));
  714. if (cur->tcp_udp.dst_port)
  715. len += snprintf(buf + len, BUF_SIZE - len,
  716. "dst-port = %d ",
  717. be16_to_cpu(cur->tcp_udp.dst_port));
  718. break;
  719. case MLX4_NET_TRANS_RULE_ID_IB:
  720. len += snprintf(buf + len, BUF_SIZE - len,
  721. "dst-gid = %pI6\n", cur->ib.dst_gid);
  722. len += snprintf(buf + len, BUF_SIZE - len,
  723. "dst-gid-mask = %pI6\n",
  724. cur->ib.dst_gid_msk);
  725. break;
  726. case MLX4_NET_TRANS_RULE_ID_IPV6:
  727. break;
  728. default:
  729. break;
  730. }
  731. }
  732. len += snprintf(buf + len, BUF_SIZE - len, "\n");
  733. mlx4_err(dev, "%s", buf);
  734. if (len >= BUF_SIZE)
  735. mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
  736. }
  737. int mlx4_flow_attach(struct mlx4_dev *dev,
  738. struct mlx4_net_trans_rule *rule, u64 *reg_id)
  739. {
  740. struct mlx4_cmd_mailbox *mailbox;
  741. struct mlx4_spec_list *cur;
  742. u32 size = 0;
  743. int ret;
  744. mailbox = mlx4_alloc_cmd_mailbox(dev);
  745. if (IS_ERR(mailbox))
  746. return PTR_ERR(mailbox);
  747. trans_rule_ctrl_to_hw(rule, mailbox->buf);
  748. size += sizeof(struct mlx4_net_trans_rule_hw_ctrl);
  749. list_for_each_entry(cur, &rule->list, list) {
  750. ret = parse_trans_rule(dev, cur, mailbox->buf + size);
  751. if (ret < 0) {
  752. mlx4_free_cmd_mailbox(dev, mailbox);
  753. return -EINVAL;
  754. }
  755. size += ret;
  756. }
  757. ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
  758. if (ret == -ENOMEM)
  759. mlx4_err_rule(dev,
  760. "mcg table is full. Fail to register network rule.\n",
  761. rule);
  762. else if (ret)
  763. mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
  764. mlx4_free_cmd_mailbox(dev, mailbox);
  765. return ret;
  766. }
  767. EXPORT_SYMBOL_GPL(mlx4_flow_attach);
  768. int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id)
  769. {
  770. int err;
  771. err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id);
  772. if (err)
  773. mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n",
  774. reg_id);
  775. return err;
  776. }
  777. EXPORT_SYMBOL_GPL(mlx4_flow_detach);
  778. int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  779. int block_mcast_loopback, enum mlx4_protocol prot,
  780. enum mlx4_steer_type steer)
  781. {
  782. struct mlx4_priv *priv = mlx4_priv(dev);
  783. struct mlx4_cmd_mailbox *mailbox;
  784. struct mlx4_mgm *mgm;
  785. u32 members_count;
  786. int index, prev;
  787. int link = 0;
  788. int i;
  789. int err;
  790. u8 port = gid[5];
  791. u8 new_entry = 0;
  792. mailbox = mlx4_alloc_cmd_mailbox(dev);
  793. if (IS_ERR(mailbox))
  794. return PTR_ERR(mailbox);
  795. mgm = mailbox->buf;
  796. mutex_lock(&priv->mcg_table.mutex);
  797. err = find_entry(dev, port, gid, prot,
  798. mailbox, &prev, &index);
  799. if (err)
  800. goto out;
  801. if (index != -1) {
  802. if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
  803. new_entry = 1;
  804. memcpy(mgm->gid, gid, 16);
  805. }
  806. } else {
  807. link = 1;
  808. index = mlx4_bitmap_alloc(&priv->mcg_table.bitmap);
  809. if (index == -1) {
  810. mlx4_err(dev, "No AMGM entries left\n");
  811. err = -ENOMEM;
  812. goto out;
  813. }
  814. index += dev->caps.num_mgms;
  815. new_entry = 1;
  816. memset(mgm, 0, sizeof *mgm);
  817. memcpy(mgm->gid, gid, 16);
  818. }
  819. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  820. if (members_count == dev->caps.num_qp_per_mgm) {
  821. mlx4_err(dev, "MGM at index %x is full.\n", index);
  822. err = -ENOMEM;
  823. goto out;
  824. }
  825. for (i = 0; i < members_count; ++i)
  826. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
  827. mlx4_dbg(dev, "QP %06x already a member of MGM\n", qp->qpn);
  828. err = 0;
  829. goto out;
  830. }
  831. if (block_mcast_loopback)
  832. mgm->qp[members_count++] = cpu_to_be32((qp->qpn & MGM_QPN_MASK) |
  833. (1U << MGM_BLCK_LB_BIT));
  834. else
  835. mgm->qp[members_count++] = cpu_to_be32(qp->qpn & MGM_QPN_MASK);
  836. mgm->members_count = cpu_to_be32(members_count | (u32) prot << 30);
  837. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  838. if (err)
  839. goto out;
  840. if (!link)
  841. goto out;
  842. err = mlx4_READ_ENTRY(dev, prev, mailbox);
  843. if (err)
  844. goto out;
  845. mgm->next_gid_index = cpu_to_be32(index << 6);
  846. err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
  847. if (err)
  848. goto out;
  849. out:
  850. if (prot == MLX4_PROT_ETH) {
  851. /* manage the steering entry for promisc mode */
  852. if (new_entry)
  853. new_steering_entry(dev, port, steer, index, qp->qpn);
  854. else
  855. existing_steering_entry(dev, port, steer,
  856. index, qp->qpn);
  857. }
  858. if (err && link && index != -1) {
  859. if (index < dev->caps.num_mgms)
  860. mlx4_warn(dev, "Got AMGM index %d < %d",
  861. index, dev->caps.num_mgms);
  862. else
  863. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  864. index - dev->caps.num_mgms);
  865. }
  866. mutex_unlock(&priv->mcg_table.mutex);
  867. mlx4_free_cmd_mailbox(dev, mailbox);
  868. return err;
  869. }
  870. int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  871. enum mlx4_protocol prot, enum mlx4_steer_type steer)
  872. {
  873. struct mlx4_priv *priv = mlx4_priv(dev);
  874. struct mlx4_cmd_mailbox *mailbox;
  875. struct mlx4_mgm *mgm;
  876. u32 members_count;
  877. int prev, index;
  878. int i, loc;
  879. int err;
  880. u8 port = gid[5];
  881. bool removed_entry = false;
  882. mailbox = mlx4_alloc_cmd_mailbox(dev);
  883. if (IS_ERR(mailbox))
  884. return PTR_ERR(mailbox);
  885. mgm = mailbox->buf;
  886. mutex_lock(&priv->mcg_table.mutex);
  887. err = find_entry(dev, port, gid, prot,
  888. mailbox, &prev, &index);
  889. if (err)
  890. goto out;
  891. if (index == -1) {
  892. mlx4_err(dev, "MGID %pI6 not found\n", gid);
  893. err = -EINVAL;
  894. goto out;
  895. }
  896. /* if this pq is also a promisc qp, it shouldn't be removed */
  897. if (prot == MLX4_PROT_ETH &&
  898. check_duplicate_entry(dev, port, steer, index, qp->qpn))
  899. goto out;
  900. members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
  901. for (loc = -1, i = 0; i < members_count; ++i)
  902. if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
  903. loc = i;
  904. if (loc == -1) {
  905. mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
  906. err = -EINVAL;
  907. goto out;
  908. }
  909. mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
  910. mgm->qp[loc] = mgm->qp[i - 1];
  911. mgm->qp[i - 1] = 0;
  912. if (prot == MLX4_PROT_ETH)
  913. removed_entry = can_remove_steering_entry(dev, port, steer,
  914. index, qp->qpn);
  915. if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
  916. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  917. goto out;
  918. }
  919. /* We are going to delete the entry, members count should be 0 */
  920. mgm->members_count = cpu_to_be32((u32) prot << 30);
  921. if (prev == -1) {
  922. /* Remove entry from MGM */
  923. int amgm_index = be32_to_cpu(mgm->next_gid_index) >> 6;
  924. if (amgm_index) {
  925. err = mlx4_READ_ENTRY(dev, amgm_index, mailbox);
  926. if (err)
  927. goto out;
  928. } else
  929. memset(mgm->gid, 0, 16);
  930. err = mlx4_WRITE_ENTRY(dev, index, mailbox);
  931. if (err)
  932. goto out;
  933. if (amgm_index) {
  934. if (amgm_index < dev->caps.num_mgms)
  935. mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
  936. index, amgm_index, dev->caps.num_mgms);
  937. else
  938. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  939. amgm_index - dev->caps.num_mgms);
  940. }
  941. } else {
  942. /* Remove entry from AMGM */
  943. int cur_next_index = be32_to_cpu(mgm->next_gid_index) >> 6;
  944. err = mlx4_READ_ENTRY(dev, prev, mailbox);
  945. if (err)
  946. goto out;
  947. mgm->next_gid_index = cpu_to_be32(cur_next_index << 6);
  948. err = mlx4_WRITE_ENTRY(dev, prev, mailbox);
  949. if (err)
  950. goto out;
  951. if (index < dev->caps.num_mgms)
  952. mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
  953. prev, index, dev->caps.num_mgms);
  954. else
  955. mlx4_bitmap_free(&priv->mcg_table.bitmap,
  956. index - dev->caps.num_mgms);
  957. }
  958. out:
  959. mutex_unlock(&priv->mcg_table.mutex);
  960. mlx4_free_cmd_mailbox(dev, mailbox);
  961. return err;
  962. }
  963. static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp,
  964. u8 gid[16], u8 attach, u8 block_loopback,
  965. enum mlx4_protocol prot)
  966. {
  967. struct mlx4_cmd_mailbox *mailbox;
  968. int err = 0;
  969. int qpn;
  970. if (!mlx4_is_mfunc(dev))
  971. return -EBADF;
  972. mailbox = mlx4_alloc_cmd_mailbox(dev);
  973. if (IS_ERR(mailbox))
  974. return PTR_ERR(mailbox);
  975. memcpy(mailbox->buf, gid, 16);
  976. qpn = qp->qpn;
  977. qpn |= (prot << 28);
  978. if (attach && block_loopback)
  979. qpn |= (1 << 31);
  980. err = mlx4_cmd(dev, mailbox->dma, qpn, attach,
  981. MLX4_CMD_QP_ATTACH, MLX4_CMD_TIME_CLASS_A,
  982. MLX4_CMD_WRAPPED);
  983. mlx4_free_cmd_mailbox(dev, mailbox);
  984. return err;
  985. }
  986. int mlx4_trans_to_dmfs_attach(struct mlx4_dev *dev, struct mlx4_qp *qp,
  987. u8 gid[16], u8 port,
  988. int block_mcast_loopback,
  989. enum mlx4_protocol prot, u64 *reg_id)
  990. {
  991. struct mlx4_spec_list spec = { {NULL} };
  992. __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
  993. struct mlx4_net_trans_rule rule = {
  994. .queue_mode = MLX4_NET_TRANS_Q_FIFO,
  995. .exclusive = 0,
  996. .promisc_mode = MLX4_FS_REGULAR,
  997. .priority = MLX4_DOMAIN_NIC,
  998. };
  999. rule.allow_loopback = !block_mcast_loopback;
  1000. rule.port = port;
  1001. rule.qpn = qp->qpn;
  1002. INIT_LIST_HEAD(&rule.list);
  1003. switch (prot) {
  1004. case MLX4_PROT_ETH:
  1005. spec.id = MLX4_NET_TRANS_RULE_ID_ETH;
  1006. memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN);
  1007. memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN);
  1008. break;
  1009. case MLX4_PROT_IB_IPV6:
  1010. spec.id = MLX4_NET_TRANS_RULE_ID_IB;
  1011. memcpy(spec.ib.dst_gid, gid, 16);
  1012. memset(&spec.ib.dst_gid_msk, 0xff, 16);
  1013. break;
  1014. default:
  1015. return -EINVAL;
  1016. }
  1017. list_add_tail(&spec.list, &rule.list);
  1018. return mlx4_flow_attach(dev, &rule, reg_id);
  1019. }
  1020. int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  1021. u8 port, int block_mcast_loopback,
  1022. enum mlx4_protocol prot, u64 *reg_id)
  1023. {
  1024. switch (dev->caps.steering_mode) {
  1025. case MLX4_STEERING_MODE_A0:
  1026. if (prot == MLX4_PROT_ETH)
  1027. return 0;
  1028. case MLX4_STEERING_MODE_B0:
  1029. if (prot == MLX4_PROT_ETH)
  1030. gid[7] |= (MLX4_MC_STEER << 1);
  1031. if (mlx4_is_mfunc(dev))
  1032. return mlx4_QP_ATTACH(dev, qp, gid, 1,
  1033. block_mcast_loopback, prot);
  1034. return mlx4_qp_attach_common(dev, qp, gid,
  1035. block_mcast_loopback, prot,
  1036. MLX4_MC_STEER);
  1037. case MLX4_STEERING_MODE_DEVICE_MANAGED:
  1038. return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
  1039. block_mcast_loopback,
  1040. prot, reg_id);
  1041. default:
  1042. return -EINVAL;
  1043. }
  1044. }
  1045. EXPORT_SYMBOL_GPL(mlx4_multicast_attach);
  1046. int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
  1047. enum mlx4_protocol prot, u64 reg_id)
  1048. {
  1049. switch (dev->caps.steering_mode) {
  1050. case MLX4_STEERING_MODE_A0:
  1051. if (prot == MLX4_PROT_ETH)
  1052. return 0;
  1053. case MLX4_STEERING_MODE_B0:
  1054. if (prot == MLX4_PROT_ETH)
  1055. gid[7] |= (MLX4_MC_STEER << 1);
  1056. if (mlx4_is_mfunc(dev))
  1057. return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
  1058. return mlx4_qp_detach_common(dev, qp, gid, prot,
  1059. MLX4_MC_STEER);
  1060. case MLX4_STEERING_MODE_DEVICE_MANAGED:
  1061. return mlx4_flow_detach(dev, reg_id);
  1062. default:
  1063. return -EINVAL;
  1064. }
  1065. }
  1066. EXPORT_SYMBOL_GPL(mlx4_multicast_detach);
  1067. int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port,
  1068. u32 qpn, enum mlx4_net_trans_promisc_mode mode)
  1069. {
  1070. struct mlx4_net_trans_rule rule;
  1071. u64 *regid_p;
  1072. switch (mode) {
  1073. case MLX4_FS_ALL_DEFAULT:
  1074. regid_p = &dev->regid_promisc_array[port];
  1075. break;
  1076. case MLX4_FS_MC_DEFAULT:
  1077. regid_p = &dev->regid_allmulti_array[port];
  1078. break;
  1079. default:
  1080. return -1;
  1081. }
  1082. if (*regid_p != 0)
  1083. return -1;
  1084. rule.promisc_mode = mode;
  1085. rule.port = port;
  1086. rule.qpn = qpn;
  1087. INIT_LIST_HEAD(&rule.list);
  1088. mlx4_err(dev, "going promisc on %x\n", port);
  1089. return mlx4_flow_attach(dev, &rule, regid_p);
  1090. }
  1091. EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add);
  1092. int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port,
  1093. enum mlx4_net_trans_promisc_mode mode)
  1094. {
  1095. int ret;
  1096. u64 *regid_p;
  1097. switch (mode) {
  1098. case MLX4_FS_ALL_DEFAULT:
  1099. regid_p = &dev->regid_promisc_array[port];
  1100. break;
  1101. case MLX4_FS_MC_DEFAULT:
  1102. regid_p = &dev->regid_allmulti_array[port];
  1103. break;
  1104. default:
  1105. return -1;
  1106. }
  1107. if (*regid_p == 0)
  1108. return -1;
  1109. ret = mlx4_flow_detach(dev, *regid_p);
  1110. if (ret == 0)
  1111. *regid_p = 0;
  1112. return ret;
  1113. }
  1114. EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove);
  1115. int mlx4_unicast_attach(struct mlx4_dev *dev,
  1116. struct mlx4_qp *qp, u8 gid[16],
  1117. int block_mcast_loopback, enum mlx4_protocol prot)
  1118. {
  1119. if (prot == MLX4_PROT_ETH)
  1120. gid[7] |= (MLX4_UC_STEER << 1);
  1121. if (mlx4_is_mfunc(dev))
  1122. return mlx4_QP_ATTACH(dev, qp, gid, 1,
  1123. block_mcast_loopback, prot);
  1124. return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback,
  1125. prot, MLX4_UC_STEER);
  1126. }
  1127. EXPORT_SYMBOL_GPL(mlx4_unicast_attach);
  1128. int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
  1129. u8 gid[16], enum mlx4_protocol prot)
  1130. {
  1131. if (prot == MLX4_PROT_ETH)
  1132. gid[7] |= (MLX4_UC_STEER << 1);
  1133. if (mlx4_is_mfunc(dev))
  1134. return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot);
  1135. return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_UC_STEER);
  1136. }
  1137. EXPORT_SYMBOL_GPL(mlx4_unicast_detach);
  1138. int mlx4_PROMISC_wrapper(struct mlx4_dev *dev, int slave,
  1139. struct mlx4_vhcr *vhcr,
  1140. struct mlx4_cmd_mailbox *inbox,
  1141. struct mlx4_cmd_mailbox *outbox,
  1142. struct mlx4_cmd_info *cmd)
  1143. {
  1144. u32 qpn = (u32) vhcr->in_param & 0xffffffff;
  1145. u8 port = vhcr->in_param >> 62;
  1146. enum mlx4_steer_type steer = vhcr->in_modifier;
  1147. /* Promiscuous unicast is not allowed in mfunc */
  1148. if (mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)
  1149. return 0;
  1150. if (vhcr->op_modifier)
  1151. return add_promisc_qp(dev, port, steer, qpn);
  1152. else
  1153. return remove_promisc_qp(dev, port, steer, qpn);
  1154. }
  1155. static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn,
  1156. enum mlx4_steer_type steer, u8 add, u8 port)
  1157. {
  1158. return mlx4_cmd(dev, (u64) qpn | (u64) port << 62, (u32) steer, add,
  1159. MLX4_CMD_PROMISC, MLX4_CMD_TIME_CLASS_A,
  1160. MLX4_CMD_WRAPPED);
  1161. }
  1162. int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
  1163. {
  1164. if (mlx4_is_mfunc(dev))
  1165. return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port);
  1166. return add_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
  1167. }
  1168. EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add);
  1169. int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
  1170. {
  1171. if (mlx4_is_mfunc(dev))
  1172. return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port);
  1173. return remove_promisc_qp(dev, port, MLX4_MC_STEER, qpn);
  1174. }
  1175. EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove);
  1176. int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port)
  1177. {
  1178. if (mlx4_is_mfunc(dev))
  1179. return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port);
  1180. return add_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
  1181. }
  1182. EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add);
  1183. int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port)
  1184. {
  1185. if (mlx4_is_mfunc(dev))
  1186. return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port);
  1187. return remove_promisc_qp(dev, port, MLX4_UC_STEER, qpn);
  1188. }
  1189. EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_remove);
  1190. int mlx4_init_mcg_table(struct mlx4_dev *dev)
  1191. {
  1192. struct mlx4_priv *priv = mlx4_priv(dev);
  1193. int err;
  1194. /* No need for mcg_table when fw managed the mcg table*/
  1195. if (dev->caps.steering_mode ==
  1196. MLX4_STEERING_MODE_DEVICE_MANAGED)
  1197. return 0;
  1198. err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms,
  1199. dev->caps.num_amgms - 1, 0, 0);
  1200. if (err)
  1201. return err;
  1202. mutex_init(&priv->mcg_table.mutex);
  1203. return 0;
  1204. }
  1205. void mlx4_cleanup_mcg_table(struct mlx4_dev *dev)
  1206. {
  1207. if (dev->caps.steering_mode !=
  1208. MLX4_STEERING_MODE_DEVICE_MANAGED)
  1209. mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap);
  1210. }