cmd.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/sched.h>
  35. #include <linux/pci.h>
  36. #include <linux/errno.h>
  37. #include <linux/mlx4/cmd.h>
  38. #include <asm/io.h>
  39. #include "mlx4.h"
  40. #define CMD_POLL_TOKEN 0xffff
  41. enum {
  42. /* command completed successfully: */
  43. CMD_STAT_OK = 0x00,
  44. /* Internal error (such as a bus error) occurred while processing command: */
  45. CMD_STAT_INTERNAL_ERR = 0x01,
  46. /* Operation/command not supported or opcode modifier not supported: */
  47. CMD_STAT_BAD_OP = 0x02,
  48. /* Parameter not supported or parameter out of range: */
  49. CMD_STAT_BAD_PARAM = 0x03,
  50. /* System not enabled or bad system state: */
  51. CMD_STAT_BAD_SYS_STATE = 0x04,
  52. /* Attempt to access reserved or unallocaterd resource: */
  53. CMD_STAT_BAD_RESOURCE = 0x05,
  54. /* Requested resource is currently executing a command, or is otherwise busy: */
  55. CMD_STAT_RESOURCE_BUSY = 0x06,
  56. /* Required capability exceeds device limits: */
  57. CMD_STAT_EXCEED_LIM = 0x08,
  58. /* Resource is not in the appropriate state or ownership: */
  59. CMD_STAT_BAD_RES_STATE = 0x09,
  60. /* Index out of range: */
  61. CMD_STAT_BAD_INDEX = 0x0a,
  62. /* FW image corrupted: */
  63. CMD_STAT_BAD_NVMEM = 0x0b,
  64. /* Error in ICM mapping (e.g. not enough auxiliary ICM pages to execute command): */
  65. CMD_STAT_ICM_ERROR = 0x0c,
  66. /* Attempt to modify a QP/EE which is not in the presumed state: */
  67. CMD_STAT_BAD_QP_STATE = 0x10,
  68. /* Bad segment parameters (Address/Size): */
  69. CMD_STAT_BAD_SEG_PARAM = 0x20,
  70. /* Memory Region has Memory Windows bound to: */
  71. CMD_STAT_REG_BOUND = 0x21,
  72. /* HCA local attached memory not present: */
  73. CMD_STAT_LAM_NOT_PRE = 0x22,
  74. /* Bad management packet (silently discarded): */
  75. CMD_STAT_BAD_PKT = 0x30,
  76. /* More outstanding CQEs in CQ than new CQ size: */
  77. CMD_STAT_BAD_SIZE = 0x40
  78. };
  79. enum {
  80. HCR_IN_PARAM_OFFSET = 0x00,
  81. HCR_IN_MODIFIER_OFFSET = 0x08,
  82. HCR_OUT_PARAM_OFFSET = 0x0c,
  83. HCR_TOKEN_OFFSET = 0x14,
  84. HCR_STATUS_OFFSET = 0x18,
  85. HCR_OPMOD_SHIFT = 12,
  86. HCR_T_BIT = 21,
  87. HCR_E_BIT = 22,
  88. HCR_GO_BIT = 23
  89. };
  90. enum {
  91. GO_BIT_TIMEOUT_MSECS = 10000
  92. };
  93. struct mlx4_cmd_context {
  94. struct completion done;
  95. int result;
  96. int next;
  97. u64 out_param;
  98. u16 token;
  99. };
  100. static int mlx4_status_to_errno(u8 status)
  101. {
  102. static const int trans_table[] = {
  103. [CMD_STAT_INTERNAL_ERR] = -EIO,
  104. [CMD_STAT_BAD_OP] = -EPERM,
  105. [CMD_STAT_BAD_PARAM] = -EINVAL,
  106. [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
  107. [CMD_STAT_BAD_RESOURCE] = -EBADF,
  108. [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
  109. [CMD_STAT_EXCEED_LIM] = -ENOMEM,
  110. [CMD_STAT_BAD_RES_STATE] = -EBADF,
  111. [CMD_STAT_BAD_INDEX] = -EBADF,
  112. [CMD_STAT_BAD_NVMEM] = -EFAULT,
  113. [CMD_STAT_ICM_ERROR] = -ENFILE,
  114. [CMD_STAT_BAD_QP_STATE] = -EINVAL,
  115. [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
  116. [CMD_STAT_REG_BOUND] = -EBUSY,
  117. [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
  118. [CMD_STAT_BAD_PKT] = -EINVAL,
  119. [CMD_STAT_BAD_SIZE] = -ENOMEM,
  120. };
  121. if (status >= ARRAY_SIZE(trans_table) ||
  122. (status != CMD_STAT_OK && trans_table[status] == 0))
  123. return -EIO;
  124. return trans_table[status];
  125. }
  126. static int cmd_pending(struct mlx4_dev *dev)
  127. {
  128. u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
  129. return (status & swab32(1 << HCR_GO_BIT)) ||
  130. (mlx4_priv(dev)->cmd.toggle ==
  131. !!(status & swab32(1 << HCR_T_BIT)));
  132. }
  133. static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
  134. u32 in_modifier, u8 op_modifier, u16 op, u16 token,
  135. int event)
  136. {
  137. struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
  138. u32 __iomem *hcr = cmd->hcr;
  139. int ret = -EAGAIN;
  140. unsigned long end;
  141. mutex_lock(&cmd->hcr_mutex);
  142. end = jiffies;
  143. if (event)
  144. end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
  145. while (cmd_pending(dev)) {
  146. if (time_after_eq(jiffies, end))
  147. goto out;
  148. cond_resched();
  149. }
  150. /*
  151. * We use writel (instead of something like memcpy_toio)
  152. * because writes of less than 32 bits to the HCR don't work
  153. * (and some architectures such as ia64 implement memcpy_toio
  154. * in terms of writeb).
  155. */
  156. __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
  157. __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
  158. __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
  159. __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
  160. __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
  161. __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
  162. /* __raw_writel may not order writes. */
  163. wmb();
  164. __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
  165. (cmd->toggle << HCR_T_BIT) |
  166. (event ? (1 << HCR_E_BIT) : 0) |
  167. (op_modifier << HCR_OPMOD_SHIFT) |
  168. op), hcr + 6);
  169. /*
  170. * Make sure that our HCR writes don't get mixed in with
  171. * writes from another CPU starting a FW command.
  172. */
  173. mmiowb();
  174. cmd->toggle = cmd->toggle ^ 1;
  175. ret = 0;
  176. out:
  177. mutex_unlock(&cmd->hcr_mutex);
  178. return ret;
  179. }
  180. static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  181. int out_is_imm, u32 in_modifier, u8 op_modifier,
  182. u16 op, unsigned long timeout)
  183. {
  184. struct mlx4_priv *priv = mlx4_priv(dev);
  185. void __iomem *hcr = priv->cmd.hcr;
  186. int err = 0;
  187. unsigned long end;
  188. down(&priv->cmd.poll_sem);
  189. err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
  190. in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
  191. if (err)
  192. goto out;
  193. end = msecs_to_jiffies(timeout) + jiffies;
  194. while (cmd_pending(dev) && time_before(jiffies, end))
  195. cond_resched();
  196. if (cmd_pending(dev)) {
  197. err = -ETIMEDOUT;
  198. goto out;
  199. }
  200. if (out_is_imm)
  201. *out_param =
  202. (u64) be32_to_cpu((__force __be32)
  203. __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
  204. (u64) be32_to_cpu((__force __be32)
  205. __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
  206. err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
  207. __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
  208. out:
  209. up(&priv->cmd.poll_sem);
  210. return err;
  211. }
  212. void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
  213. {
  214. struct mlx4_priv *priv = mlx4_priv(dev);
  215. struct mlx4_cmd_context *context =
  216. &priv->cmd.context[token & priv->cmd.token_mask];
  217. /* previously timed out command completing at long last */
  218. if (token != context->token)
  219. return;
  220. context->result = mlx4_status_to_errno(status);
  221. context->out_param = out_param;
  222. complete(&context->done);
  223. }
  224. static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  225. int out_is_imm, u32 in_modifier, u8 op_modifier,
  226. u16 op, unsigned long timeout)
  227. {
  228. struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
  229. struct mlx4_cmd_context *context;
  230. int err = 0;
  231. down(&cmd->event_sem);
  232. spin_lock(&cmd->context_lock);
  233. BUG_ON(cmd->free_head < 0);
  234. context = &cmd->context[cmd->free_head];
  235. context->token += cmd->token_mask + 1;
  236. cmd->free_head = context->next;
  237. spin_unlock(&cmd->context_lock);
  238. init_completion(&context->done);
  239. mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
  240. in_modifier, op_modifier, op, context->token, 1);
  241. if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
  242. err = -EBUSY;
  243. goto out;
  244. }
  245. err = context->result;
  246. if (err)
  247. goto out;
  248. if (out_is_imm)
  249. *out_param = context->out_param;
  250. out:
  251. spin_lock(&cmd->context_lock);
  252. context->next = cmd->free_head;
  253. cmd->free_head = context - cmd->context;
  254. spin_unlock(&cmd->context_lock);
  255. up(&cmd->event_sem);
  256. return err;
  257. }
  258. int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  259. int out_is_imm, u32 in_modifier, u8 op_modifier,
  260. u16 op, unsigned long timeout)
  261. {
  262. if (mlx4_priv(dev)->cmd.use_events)
  263. return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
  264. in_modifier, op_modifier, op, timeout);
  265. else
  266. return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
  267. in_modifier, op_modifier, op, timeout);
  268. }
  269. EXPORT_SYMBOL_GPL(__mlx4_cmd);
  270. int mlx4_cmd_init(struct mlx4_dev *dev)
  271. {
  272. struct mlx4_priv *priv = mlx4_priv(dev);
  273. mutex_init(&priv->cmd.hcr_mutex);
  274. sema_init(&priv->cmd.poll_sem, 1);
  275. priv->cmd.use_events = 0;
  276. priv->cmd.toggle = 1;
  277. priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
  278. MLX4_HCR_SIZE);
  279. if (!priv->cmd.hcr) {
  280. mlx4_err(dev, "Couldn't map command register.");
  281. return -ENOMEM;
  282. }
  283. priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
  284. MLX4_MAILBOX_SIZE,
  285. MLX4_MAILBOX_SIZE, 0);
  286. if (!priv->cmd.pool) {
  287. iounmap(priv->cmd.hcr);
  288. return -ENOMEM;
  289. }
  290. return 0;
  291. }
  292. void mlx4_cmd_cleanup(struct mlx4_dev *dev)
  293. {
  294. struct mlx4_priv *priv = mlx4_priv(dev);
  295. pci_pool_destroy(priv->cmd.pool);
  296. iounmap(priv->cmd.hcr);
  297. }
  298. /*
  299. * Switch to using events to issue FW commands (can only be called
  300. * after event queue for command events has been initialized).
  301. */
  302. int mlx4_cmd_use_events(struct mlx4_dev *dev)
  303. {
  304. struct mlx4_priv *priv = mlx4_priv(dev);
  305. int i;
  306. priv->cmd.context = kmalloc(priv->cmd.max_cmds *
  307. sizeof (struct mlx4_cmd_context),
  308. GFP_KERNEL);
  309. if (!priv->cmd.context)
  310. return -ENOMEM;
  311. for (i = 0; i < priv->cmd.max_cmds; ++i) {
  312. priv->cmd.context[i].token = i;
  313. priv->cmd.context[i].next = i + 1;
  314. }
  315. priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
  316. priv->cmd.free_head = 0;
  317. sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
  318. spin_lock_init(&priv->cmd.context_lock);
  319. for (priv->cmd.token_mask = 1;
  320. priv->cmd.token_mask < priv->cmd.max_cmds;
  321. priv->cmd.token_mask <<= 1)
  322. ; /* nothing */
  323. --priv->cmd.token_mask;
  324. priv->cmd.use_events = 1;
  325. down(&priv->cmd.poll_sem);
  326. return 0;
  327. }
  328. /*
  329. * Switch back to polling (used when shutting down the device)
  330. */
  331. void mlx4_cmd_use_polling(struct mlx4_dev *dev)
  332. {
  333. struct mlx4_priv *priv = mlx4_priv(dev);
  334. int i;
  335. priv->cmd.use_events = 0;
  336. for (i = 0; i < priv->cmd.max_cmds; ++i)
  337. down(&priv->cmd.event_sem);
  338. kfree(priv->cmd.context);
  339. up(&priv->cmd.poll_sem);
  340. }
  341. struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
  342. {
  343. struct mlx4_cmd_mailbox *mailbox;
  344. mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
  345. if (!mailbox)
  346. return ERR_PTR(-ENOMEM);
  347. mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
  348. &mailbox->dma);
  349. if (!mailbox->buf) {
  350. kfree(mailbox);
  351. return ERR_PTR(-ENOMEM);
  352. }
  353. return mailbox;
  354. }
  355. EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
  356. void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
  357. {
  358. if (!mailbox)
  359. return;
  360. pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
  361. kfree(mailbox);
  362. }
  363. EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);