cmd.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/sched.h>
  35. #include <linux/pci.h>
  36. #include <linux/errno.h>
  37. #include <linux/mlx4/cmd.h>
  38. #include <asm/io.h>
  39. #include "mlx4.h"
  40. #define CMD_POLL_TOKEN 0xffff
  41. enum {
  42. /* command completed successfully: */
  43. CMD_STAT_OK = 0x00,
  44. /* Internal error (such as a bus error) occurred while processing command: */
  45. CMD_STAT_INTERNAL_ERR = 0x01,
  46. /* Operation/command not supported or opcode modifier not supported: */
  47. CMD_STAT_BAD_OP = 0x02,
  48. /* Parameter not supported or parameter out of range: */
  49. CMD_STAT_BAD_PARAM = 0x03,
  50. /* System not enabled or bad system state: */
  51. CMD_STAT_BAD_SYS_STATE = 0x04,
  52. /* Attempt to access reserved or unallocaterd resource: */
  53. CMD_STAT_BAD_RESOURCE = 0x05,
  54. /* Requested resource is currently executing a command, or is otherwise busy: */
  55. CMD_STAT_RESOURCE_BUSY = 0x06,
  56. /* Required capability exceeds device limits: */
  57. CMD_STAT_EXCEED_LIM = 0x08,
  58. /* Resource is not in the appropriate state or ownership: */
  59. CMD_STAT_BAD_RES_STATE = 0x09,
  60. /* Index out of range: */
  61. CMD_STAT_BAD_INDEX = 0x0a,
  62. /* FW image corrupted: */
  63. CMD_STAT_BAD_NVMEM = 0x0b,
  64. /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
  65. CMD_STAT_ICM_ERROR = 0x0c,
  66. /* Attempt to modify a QP/EE which is not in the presumed state: */
  67. CMD_STAT_BAD_QP_STATE = 0x10,
  68. /* Bad segment parameters (Address/Size): */
  69. CMD_STAT_BAD_SEG_PARAM = 0x20,
  70. /* Memory Region has Memory Windows bound to: */
  71. CMD_STAT_REG_BOUND = 0x21,
  72. /* HCA local attached memory not present: */
  73. CMD_STAT_LAM_NOT_PRE = 0x22,
  74. /* Bad management packet (silently discarded): */
  75. CMD_STAT_BAD_PKT = 0x30,
  76. /* More outstanding CQEs in CQ than new CQ size: */
  77. CMD_STAT_BAD_SIZE = 0x40,
  78. /* Multi Function device support required: */
  79. CMD_STAT_MULTI_FUNC_REQ = 0x50,
  80. };
  81. enum {
  82. HCR_IN_PARAM_OFFSET = 0x00,
  83. HCR_IN_MODIFIER_OFFSET = 0x08,
  84. HCR_OUT_PARAM_OFFSET = 0x0c,
  85. HCR_TOKEN_OFFSET = 0x14,
  86. HCR_STATUS_OFFSET = 0x18,
  87. HCR_OPMOD_SHIFT = 12,
  88. HCR_T_BIT = 21,
  89. HCR_E_BIT = 22,
  90. HCR_GO_BIT = 23
  91. };
  92. enum {
  93. GO_BIT_TIMEOUT_MSECS = 10000
  94. };
  95. struct mlx4_cmd_context {
  96. struct completion done;
  97. int result;
  98. int next;
  99. u64 out_param;
  100. u16 token;
  101. };
  102. static int mlx4_status_to_errno(u8 status)
  103. {
  104. static const int trans_table[] = {
  105. [CMD_STAT_INTERNAL_ERR] = -EIO,
  106. [CMD_STAT_BAD_OP] = -EPERM,
  107. [CMD_STAT_BAD_PARAM] = -EINVAL,
  108. [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
  109. [CMD_STAT_BAD_RESOURCE] = -EBADF,
  110. [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
  111. [CMD_STAT_EXCEED_LIM] = -ENOMEM,
  112. [CMD_STAT_BAD_RES_STATE] = -EBADF,
  113. [CMD_STAT_BAD_INDEX] = -EBADF,
  114. [CMD_STAT_BAD_NVMEM] = -EFAULT,
  115. [CMD_STAT_ICM_ERROR] = -ENFILE,
  116. [CMD_STAT_BAD_QP_STATE] = -EINVAL,
  117. [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
  118. [CMD_STAT_REG_BOUND] = -EBUSY,
  119. [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
  120. [CMD_STAT_BAD_PKT] = -EINVAL,
  121. [CMD_STAT_BAD_SIZE] = -ENOMEM,
  122. [CMD_STAT_MULTI_FUNC_REQ] = -EACCES,
  123. };
  124. if (status >= ARRAY_SIZE(trans_table) ||
  125. (status != CMD_STAT_OK && trans_table[status] == 0))
  126. return -EIO;
  127. return trans_table[status];
  128. }
  129. static int cmd_pending(struct mlx4_dev *dev)
  130. {
  131. u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
  132. return (status & swab32(1 << HCR_GO_BIT)) ||
  133. (mlx4_priv(dev)->cmd.toggle ==
  134. !!(status & swab32(1 << HCR_T_BIT)));
  135. }
  136. static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
  137. u32 in_modifier, u8 op_modifier, u16 op, u16 token,
  138. int event)
  139. {
  140. struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
  141. u32 __iomem *hcr = cmd->hcr;
  142. int ret = -EAGAIN;
  143. unsigned long end;
  144. mutex_lock(&cmd->hcr_mutex);
  145. end = jiffies;
  146. if (event)
  147. end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
  148. while (cmd_pending(dev)) {
  149. if (time_after_eq(jiffies, end))
  150. goto out;
  151. cond_resched();
  152. }
  153. /*
  154. * We use writel (instead of something like memcpy_toio)
  155. * because writes of less than 32 bits to the HCR don't work
  156. * (and some architectures such as ia64 implement memcpy_toio
  157. * in terms of writeb).
  158. */
  159. __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
  160. __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
  161. __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
  162. __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
  163. __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
  164. __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
  165. /* __raw_writel may not order writes. */
  166. wmb();
  167. __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
  168. (cmd->toggle << HCR_T_BIT) |
  169. (event ? (1 << HCR_E_BIT) : 0) |
  170. (op_modifier << HCR_OPMOD_SHIFT) |
  171. op), hcr + 6);
  172. /*
  173. * Make sure that our HCR writes don't get mixed in with
  174. * writes from another CPU starting a FW command.
  175. */
  176. mmiowb();
  177. cmd->toggle = cmd->toggle ^ 1;
  178. ret = 0;
  179. out:
  180. mutex_unlock(&cmd->hcr_mutex);
  181. return ret;
  182. }
  183. static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  184. int out_is_imm, u32 in_modifier, u8 op_modifier,
  185. u16 op, unsigned long timeout)
  186. {
  187. struct mlx4_priv *priv = mlx4_priv(dev);
  188. void __iomem *hcr = priv->cmd.hcr;
  189. int err = 0;
  190. unsigned long end;
  191. down(&priv->cmd.poll_sem);
  192. err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
  193. in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
  194. if (err)
  195. goto out;
  196. end = msecs_to_jiffies(timeout) + jiffies;
  197. while (cmd_pending(dev) && time_before(jiffies, end))
  198. cond_resched();
  199. if (cmd_pending(dev)) {
  200. err = -ETIMEDOUT;
  201. goto out;
  202. }
  203. if (out_is_imm)
  204. *out_param =
  205. (u64) be32_to_cpu((__force __be32)
  206. __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
  207. (u64) be32_to_cpu((__force __be32)
  208. __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
  209. err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
  210. __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
  211. out:
  212. up(&priv->cmd.poll_sem);
  213. return err;
  214. }
  215. void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
  216. {
  217. struct mlx4_priv *priv = mlx4_priv(dev);
  218. struct mlx4_cmd_context *context =
  219. &priv->cmd.context[token & priv->cmd.token_mask];
  220. /* previously timed out command completing at long last */
  221. if (token != context->token)
  222. return;
  223. context->result = mlx4_status_to_errno(status);
  224. context->out_param = out_param;
  225. complete(&context->done);
  226. }
  227. static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  228. int out_is_imm, u32 in_modifier, u8 op_modifier,
  229. u16 op, unsigned long timeout)
  230. {
  231. struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
  232. struct mlx4_cmd_context *context;
  233. int err = 0;
  234. down(&cmd->event_sem);
  235. spin_lock(&cmd->context_lock);
  236. BUG_ON(cmd->free_head < 0);
  237. context = &cmd->context[cmd->free_head];
  238. context->token += cmd->token_mask + 1;
  239. cmd->free_head = context->next;
  240. spin_unlock(&cmd->context_lock);
  241. init_completion(&context->done);
  242. mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
  243. in_modifier, op_modifier, op, context->token, 1);
  244. if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
  245. err = -EBUSY;
  246. goto out;
  247. }
  248. err = context->result;
  249. if (err)
  250. goto out;
  251. if (out_is_imm)
  252. *out_param = context->out_param;
  253. out:
  254. spin_lock(&cmd->context_lock);
  255. context->next = cmd->free_head;
  256. cmd->free_head = context - cmd->context;
  257. spin_unlock(&cmd->context_lock);
  258. up(&cmd->event_sem);
  259. return err;
  260. }
  261. int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  262. int out_is_imm, u32 in_modifier, u8 op_modifier,
  263. u16 op, unsigned long timeout)
  264. {
  265. if (mlx4_priv(dev)->cmd.use_events)
  266. return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
  267. in_modifier, op_modifier, op, timeout);
  268. else
  269. return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
  270. in_modifier, op_modifier, op, timeout);
  271. }
  272. EXPORT_SYMBOL_GPL(__mlx4_cmd);
  273. int mlx4_cmd_init(struct mlx4_dev *dev)
  274. {
  275. struct mlx4_priv *priv = mlx4_priv(dev);
  276. mutex_init(&priv->cmd.hcr_mutex);
  277. sema_init(&priv->cmd.poll_sem, 1);
  278. priv->cmd.use_events = 0;
  279. priv->cmd.toggle = 1;
  280. priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
  281. MLX4_HCR_SIZE);
  282. if (!priv->cmd.hcr) {
  283. mlx4_err(dev, "Couldn't map command register.");
  284. return -ENOMEM;
  285. }
  286. priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
  287. MLX4_MAILBOX_SIZE,
  288. MLX4_MAILBOX_SIZE, 0);
  289. if (!priv->cmd.pool) {
  290. iounmap(priv->cmd.hcr);
  291. return -ENOMEM;
  292. }
  293. return 0;
  294. }
  295. void mlx4_cmd_cleanup(struct mlx4_dev *dev)
  296. {
  297. struct mlx4_priv *priv = mlx4_priv(dev);
  298. pci_pool_destroy(priv->cmd.pool);
  299. iounmap(priv->cmd.hcr);
  300. }
  301. /*
  302. * Switch to using events to issue FW commands (can only be called
  303. * after event queue for command events has been initialized).
  304. */
  305. int mlx4_cmd_use_events(struct mlx4_dev *dev)
  306. {
  307. struct mlx4_priv *priv = mlx4_priv(dev);
  308. int i;
  309. priv->cmd.context = kmalloc(priv->cmd.max_cmds *
  310. sizeof (struct mlx4_cmd_context),
  311. GFP_KERNEL);
  312. if (!priv->cmd.context)
  313. return -ENOMEM;
  314. for (i = 0; i < priv->cmd.max_cmds; ++i) {
  315. priv->cmd.context[i].token = i;
  316. priv->cmd.context[i].next = i + 1;
  317. }
  318. priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
  319. priv->cmd.free_head = 0;
  320. sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
  321. spin_lock_init(&priv->cmd.context_lock);
  322. for (priv->cmd.token_mask = 1;
  323. priv->cmd.token_mask < priv->cmd.max_cmds;
  324. priv->cmd.token_mask <<= 1)
  325. ; /* nothing */
  326. --priv->cmd.token_mask;
  327. priv->cmd.use_events = 1;
  328. down(&priv->cmd.poll_sem);
  329. return 0;
  330. }
  331. /*
  332. * Switch back to polling (used when shutting down the device)
  333. */
  334. void mlx4_cmd_use_polling(struct mlx4_dev *dev)
  335. {
  336. struct mlx4_priv *priv = mlx4_priv(dev);
  337. int i;
  338. priv->cmd.use_events = 0;
  339. for (i = 0; i < priv->cmd.max_cmds; ++i)
  340. down(&priv->cmd.event_sem);
  341. kfree(priv->cmd.context);
  342. up(&priv->cmd.poll_sem);
  343. }
  344. struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
  345. {
  346. struct mlx4_cmd_mailbox *mailbox;
  347. mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
  348. if (!mailbox)
  349. return ERR_PTR(-ENOMEM);
  350. mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
  351. &mailbox->dma);
  352. if (!mailbox->buf) {
  353. kfree(mailbox);
  354. return ERR_PTR(-ENOMEM);
  355. }
  356. return mailbox;
  357. }
  358. EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
  359. void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
  360. {
  361. if (!mailbox)
  362. return;
  363. pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
  364. kfree(mailbox);
  365. }
  366. EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);