cmd.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/sched.h>
  35. #include <linux/pci.h>
  36. #include <linux/errno.h>
  37. #include <linux/mlx4/cmd.h>
  38. #include <asm/io.h>
  39. #include "mlx4.h"
  40. #define CMD_POLL_TOKEN 0xffff
  41. enum {
  42. /* command completed successfully: */
  43. CMD_STAT_OK = 0x00,
  44. /* Internal error (such as a bus error) occurred while processing command: */
  45. CMD_STAT_INTERNAL_ERR = 0x01,
  46. /* Operation/command not supported or opcode modifier not supported: */
  47. CMD_STAT_BAD_OP = 0x02,
  48. /* Parameter not supported or parameter out of range: */
  49. CMD_STAT_BAD_PARAM = 0x03,
  50. /* System not enabled or bad system state: */
  51. CMD_STAT_BAD_SYS_STATE = 0x04,
  52. /* Attempt to access reserved or unallocaterd resource: */
  53. CMD_STAT_BAD_RESOURCE = 0x05,
  54. /* Requested resource is currently executing a command, or is otherwise busy: */
  55. CMD_STAT_RESOURCE_BUSY = 0x06,
  56. /* Required capability exceeds device limits: */
  57. CMD_STAT_EXCEED_LIM = 0x08,
  58. /* Resource is not in the appropriate state or ownership: */
  59. CMD_STAT_BAD_RES_STATE = 0x09,
  60. /* Index out of range: */
  61. CMD_STAT_BAD_INDEX = 0x0a,
  62. /* FW image corrupted: */
  63. CMD_STAT_BAD_NVMEM = 0x0b,
  64. /* Attempt to modify a QP/EE which is not in the presumed state: */
  65. CMD_STAT_BAD_QP_STATE = 0x10,
  66. /* Bad segment parameters (Address/Size): */
  67. CMD_STAT_BAD_SEG_PARAM = 0x20,
  68. /* Memory Region has Memory Windows bound to: */
  69. CMD_STAT_REG_BOUND = 0x21,
  70. /* HCA local attached memory not present: */
  71. CMD_STAT_LAM_NOT_PRE = 0x22,
  72. /* Bad management packet (silently discarded): */
  73. CMD_STAT_BAD_PKT = 0x30,
  74. /* More outstanding CQEs in CQ than new CQ size: */
  75. CMD_STAT_BAD_SIZE = 0x40
  76. };
  77. enum {
  78. HCR_IN_PARAM_OFFSET = 0x00,
  79. HCR_IN_MODIFIER_OFFSET = 0x08,
  80. HCR_OUT_PARAM_OFFSET = 0x0c,
  81. HCR_TOKEN_OFFSET = 0x14,
  82. HCR_STATUS_OFFSET = 0x18,
  83. HCR_OPMOD_SHIFT = 12,
  84. HCR_T_BIT = 21,
  85. HCR_E_BIT = 22,
  86. HCR_GO_BIT = 23
  87. };
  88. enum {
  89. GO_BIT_TIMEOUT_MSECS = 10000
  90. };
  91. struct mlx4_cmd_context {
  92. struct completion done;
  93. int result;
  94. int next;
  95. u64 out_param;
  96. u16 token;
  97. };
  98. static int mlx4_status_to_errno(u8 status) {
  99. static const int trans_table[] = {
  100. [CMD_STAT_INTERNAL_ERR] = -EIO,
  101. [CMD_STAT_BAD_OP] = -EPERM,
  102. [CMD_STAT_BAD_PARAM] = -EINVAL,
  103. [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
  104. [CMD_STAT_BAD_RESOURCE] = -EBADF,
  105. [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
  106. [CMD_STAT_EXCEED_LIM] = -ENOMEM,
  107. [CMD_STAT_BAD_RES_STATE] = -EBADF,
  108. [CMD_STAT_BAD_INDEX] = -EBADF,
  109. [CMD_STAT_BAD_NVMEM] = -EFAULT,
  110. [CMD_STAT_BAD_QP_STATE] = -EINVAL,
  111. [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
  112. [CMD_STAT_REG_BOUND] = -EBUSY,
  113. [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
  114. [CMD_STAT_BAD_PKT] = -EINVAL,
  115. [CMD_STAT_BAD_SIZE] = -ENOMEM,
  116. };
  117. if (status >= ARRAY_SIZE(trans_table) ||
  118. (status != CMD_STAT_OK && trans_table[status] == 0))
  119. return -EIO;
  120. return trans_table[status];
  121. }
  122. static int cmd_pending(struct mlx4_dev *dev)
  123. {
  124. u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
  125. return (status & swab32(1 << HCR_GO_BIT)) ||
  126. (mlx4_priv(dev)->cmd.toggle ==
  127. !!(status & swab32(1 << HCR_T_BIT)));
  128. }
  129. static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
  130. u32 in_modifier, u8 op_modifier, u16 op, u16 token,
  131. int event)
  132. {
  133. struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
  134. u32 __iomem *hcr = cmd->hcr;
  135. int ret = -EAGAIN;
  136. unsigned long end;
  137. mutex_lock(&cmd->hcr_mutex);
  138. end = jiffies;
  139. if (event)
  140. end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
  141. while (cmd_pending(dev)) {
  142. if (time_after_eq(jiffies, end))
  143. goto out;
  144. cond_resched();
  145. }
  146. /*
  147. * We use writel (instead of something like memcpy_toio)
  148. * because writes of less than 32 bits to the HCR don't work
  149. * (and some architectures such as ia64 implement memcpy_toio
  150. * in terms of writeb).
  151. */
  152. __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
  153. __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
  154. __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
  155. __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
  156. __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
  157. __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
  158. /* __raw_writel may not order writes. */
  159. wmb();
  160. __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
  161. (cmd->toggle << HCR_T_BIT) |
  162. (event ? (1 << HCR_E_BIT) : 0) |
  163. (op_modifier << HCR_OPMOD_SHIFT) |
  164. op), hcr + 6);
  165. /*
  166. * Make sure that our HCR writes don't get mixed in with
  167. * writes from another CPU starting a FW command.
  168. */
  169. mmiowb();
  170. cmd->toggle = cmd->toggle ^ 1;
  171. ret = 0;
  172. out:
  173. mutex_unlock(&cmd->hcr_mutex);
  174. return ret;
  175. }
  176. static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  177. int out_is_imm, u32 in_modifier, u8 op_modifier,
  178. u16 op, unsigned long timeout)
  179. {
  180. struct mlx4_priv *priv = mlx4_priv(dev);
  181. void __iomem *hcr = priv->cmd.hcr;
  182. int err = 0;
  183. unsigned long end;
  184. down(&priv->cmd.poll_sem);
  185. err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
  186. in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
  187. if (err)
  188. goto out;
  189. end = msecs_to_jiffies(timeout) + jiffies;
  190. while (cmd_pending(dev) && time_before(jiffies, end))
  191. cond_resched();
  192. if (cmd_pending(dev)) {
  193. err = -ETIMEDOUT;
  194. goto out;
  195. }
  196. if (out_is_imm)
  197. *out_param =
  198. (u64) be32_to_cpu((__force __be32)
  199. __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
  200. (u64) be32_to_cpu((__force __be32)
  201. __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
  202. err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
  203. __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
  204. out:
  205. up(&priv->cmd.poll_sem);
  206. return err;
  207. }
  208. void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
  209. {
  210. struct mlx4_priv *priv = mlx4_priv(dev);
  211. struct mlx4_cmd_context *context =
  212. &priv->cmd.context[token & priv->cmd.token_mask];
  213. /* previously timed out command completing at long last */
  214. if (token != context->token)
  215. return;
  216. context->result = mlx4_status_to_errno(status);
  217. context->out_param = out_param;
  218. complete(&context->done);
  219. }
  220. static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  221. int out_is_imm, u32 in_modifier, u8 op_modifier,
  222. u16 op, unsigned long timeout)
  223. {
  224. struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
  225. struct mlx4_cmd_context *context;
  226. int err = 0;
  227. down(&cmd->event_sem);
  228. spin_lock(&cmd->context_lock);
  229. BUG_ON(cmd->free_head < 0);
  230. context = &cmd->context[cmd->free_head];
  231. context->token += cmd->token_mask + 1;
  232. cmd->free_head = context->next;
  233. spin_unlock(&cmd->context_lock);
  234. init_completion(&context->done);
  235. mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
  236. in_modifier, op_modifier, op, context->token, 1);
  237. if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
  238. err = -EBUSY;
  239. goto out;
  240. }
  241. err = context->result;
  242. if (err)
  243. goto out;
  244. if (out_is_imm)
  245. *out_param = context->out_param;
  246. out:
  247. spin_lock(&cmd->context_lock);
  248. context->next = cmd->free_head;
  249. cmd->free_head = context - cmd->context;
  250. spin_unlock(&cmd->context_lock);
  251. up(&cmd->event_sem);
  252. return err;
  253. }
  254. int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  255. int out_is_imm, u32 in_modifier, u8 op_modifier,
  256. u16 op, unsigned long timeout)
  257. {
  258. if (mlx4_priv(dev)->cmd.use_events)
  259. return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
  260. in_modifier, op_modifier, op, timeout);
  261. else
  262. return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
  263. in_modifier, op_modifier, op, timeout);
  264. }
  265. EXPORT_SYMBOL_GPL(__mlx4_cmd);
  266. int mlx4_cmd_init(struct mlx4_dev *dev)
  267. {
  268. struct mlx4_priv *priv = mlx4_priv(dev);
  269. mutex_init(&priv->cmd.hcr_mutex);
  270. sema_init(&priv->cmd.poll_sem, 1);
  271. priv->cmd.use_events = 0;
  272. priv->cmd.toggle = 1;
  273. priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
  274. MLX4_HCR_SIZE);
  275. if (!priv->cmd.hcr) {
  276. mlx4_err(dev, "Couldn't map command register.");
  277. return -ENOMEM;
  278. }
  279. priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
  280. MLX4_MAILBOX_SIZE,
  281. MLX4_MAILBOX_SIZE, 0);
  282. if (!priv->cmd.pool) {
  283. iounmap(priv->cmd.hcr);
  284. return -ENOMEM;
  285. }
  286. return 0;
  287. }
  288. void mlx4_cmd_cleanup(struct mlx4_dev *dev)
  289. {
  290. struct mlx4_priv *priv = mlx4_priv(dev);
  291. pci_pool_destroy(priv->cmd.pool);
  292. iounmap(priv->cmd.hcr);
  293. }
  294. /*
  295. * Switch to using events to issue FW commands (can only be called
  296. * after event queue for command events has been initialized).
  297. */
  298. int mlx4_cmd_use_events(struct mlx4_dev *dev)
  299. {
  300. struct mlx4_priv *priv = mlx4_priv(dev);
  301. int i;
  302. priv->cmd.context = kmalloc(priv->cmd.max_cmds *
  303. sizeof (struct mlx4_cmd_context),
  304. GFP_KERNEL);
  305. if (!priv->cmd.context)
  306. return -ENOMEM;
  307. for (i = 0; i < priv->cmd.max_cmds; ++i) {
  308. priv->cmd.context[i].token = i;
  309. priv->cmd.context[i].next = i + 1;
  310. }
  311. priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
  312. priv->cmd.free_head = 0;
  313. sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
  314. spin_lock_init(&priv->cmd.context_lock);
  315. for (priv->cmd.token_mask = 1;
  316. priv->cmd.token_mask < priv->cmd.max_cmds;
  317. priv->cmd.token_mask <<= 1)
  318. ; /* nothing */
  319. --priv->cmd.token_mask;
  320. priv->cmd.use_events = 1;
  321. down(&priv->cmd.poll_sem);
  322. return 0;
  323. }
  324. /*
  325. * Switch back to polling (used when shutting down the device)
  326. */
  327. void mlx4_cmd_use_polling(struct mlx4_dev *dev)
  328. {
  329. struct mlx4_priv *priv = mlx4_priv(dev);
  330. int i;
  331. priv->cmd.use_events = 0;
  332. for (i = 0; i < priv->cmd.max_cmds; ++i)
  333. down(&priv->cmd.event_sem);
  334. kfree(priv->cmd.context);
  335. up(&priv->cmd.poll_sem);
  336. }
  337. struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
  338. {
  339. struct mlx4_cmd_mailbox *mailbox;
  340. mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
  341. if (!mailbox)
  342. return ERR_PTR(-ENOMEM);
  343. mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
  344. &mailbox->dma);
  345. if (!mailbox->buf) {
  346. kfree(mailbox);
  347. return ERR_PTR(-ENOMEM);
  348. }
  349. return mailbox;
  350. }
  351. EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
  352. void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
  353. {
  354. if (!mailbox)
  355. return;
  356. pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
  357. kfree(mailbox);
  358. }
  359. EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);