cmd.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436
  1. /*
  2. * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
  3. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  4. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/sched.h>
  35. #include <linux/pci.h>
  36. #include <linux/errno.h>
  37. #include <linux/mlx4/cmd.h>
  38. #include <asm/io.h>
  39. #include "mlx4.h"
  40. #define CMD_POLL_TOKEN 0xffff
  41. enum {
  42. /* command completed successfully: */
  43. CMD_STAT_OK = 0x00,
  44. /* Internal error (such as a bus error) occurred while processing command: */
  45. CMD_STAT_INTERNAL_ERR = 0x01,
  46. /* Operation/command not supported or opcode modifier not supported: */
  47. CMD_STAT_BAD_OP = 0x02,
  48. /* Parameter not supported or parameter out of range: */
  49. CMD_STAT_BAD_PARAM = 0x03,
  50. /* System not enabled or bad system state: */
  51. CMD_STAT_BAD_SYS_STATE = 0x04,
  52. /* Attempt to access reserved or unallocaterd resource: */
  53. CMD_STAT_BAD_RESOURCE = 0x05,
  54. /* Requested resource is currently executing a command, or is otherwise busy: */
  55. CMD_STAT_RESOURCE_BUSY = 0x06,
  56. /* Required capability exceeds device limits: */
  57. CMD_STAT_EXCEED_LIM = 0x08,
  58. /* Resource is not in the appropriate state or ownership: */
  59. CMD_STAT_BAD_RES_STATE = 0x09,
  60. /* Index out of range: */
  61. CMD_STAT_BAD_INDEX = 0x0a,
  62. /* FW image corrupted: */
  63. CMD_STAT_BAD_NVMEM = 0x0b,
  64. /* Attempt to modify a QP/EE which is not in the presumed state: */
  65. CMD_STAT_BAD_QP_STATE = 0x10,
  66. /* Bad segment parameters (Address/Size): */
  67. CMD_STAT_BAD_SEG_PARAM = 0x20,
  68. /* Memory Region has Memory Windows bound to: */
  69. CMD_STAT_REG_BOUND = 0x21,
  70. /* HCA local attached memory not present: */
  71. CMD_STAT_LAM_NOT_PRE = 0x22,
  72. /* Bad management packet (silently discarded): */
  73. CMD_STAT_BAD_PKT = 0x30,
  74. /* More outstanding CQEs in CQ than new CQ size: */
  75. CMD_STAT_BAD_SIZE = 0x40
  76. };
  77. enum {
  78. HCR_IN_PARAM_OFFSET = 0x00,
  79. HCR_IN_MODIFIER_OFFSET = 0x08,
  80. HCR_OUT_PARAM_OFFSET = 0x0c,
  81. HCR_TOKEN_OFFSET = 0x14,
  82. HCR_STATUS_OFFSET = 0x18,
  83. HCR_OPMOD_SHIFT = 12,
  84. HCR_T_BIT = 21,
  85. HCR_E_BIT = 22,
  86. HCR_GO_BIT = 23
  87. };
  88. enum {
  89. GO_BIT_TIMEOUT_MSECS = 10000
  90. };
  91. struct mlx4_cmd_context {
  92. struct completion done;
  93. int result;
  94. int next;
  95. u64 out_param;
  96. u16 token;
  97. };
  98. static int mlx4_status_to_errno(u8 status)
  99. {
  100. static const int trans_table[] = {
  101. [CMD_STAT_INTERNAL_ERR] = -EIO,
  102. [CMD_STAT_BAD_OP] = -EPERM,
  103. [CMD_STAT_BAD_PARAM] = -EINVAL,
  104. [CMD_STAT_BAD_SYS_STATE] = -ENXIO,
  105. [CMD_STAT_BAD_RESOURCE] = -EBADF,
  106. [CMD_STAT_RESOURCE_BUSY] = -EBUSY,
  107. [CMD_STAT_EXCEED_LIM] = -ENOMEM,
  108. [CMD_STAT_BAD_RES_STATE] = -EBADF,
  109. [CMD_STAT_BAD_INDEX] = -EBADF,
  110. [CMD_STAT_BAD_NVMEM] = -EFAULT,
  111. [CMD_STAT_BAD_QP_STATE] = -EINVAL,
  112. [CMD_STAT_BAD_SEG_PARAM] = -EFAULT,
  113. [CMD_STAT_REG_BOUND] = -EBUSY,
  114. [CMD_STAT_LAM_NOT_PRE] = -EAGAIN,
  115. [CMD_STAT_BAD_PKT] = -EINVAL,
  116. [CMD_STAT_BAD_SIZE] = -ENOMEM,
  117. };
  118. if (status >= ARRAY_SIZE(trans_table) ||
  119. (status != CMD_STAT_OK && trans_table[status] == 0))
  120. return -EIO;
  121. return trans_table[status];
  122. }
  123. static int cmd_pending(struct mlx4_dev *dev)
  124. {
  125. u32 status = readl(mlx4_priv(dev)->cmd.hcr + HCR_STATUS_OFFSET);
  126. return (status & swab32(1 << HCR_GO_BIT)) ||
  127. (mlx4_priv(dev)->cmd.toggle ==
  128. !!(status & swab32(1 << HCR_T_BIT)));
  129. }
  130. static int mlx4_cmd_post(struct mlx4_dev *dev, u64 in_param, u64 out_param,
  131. u32 in_modifier, u8 op_modifier, u16 op, u16 token,
  132. int event)
  133. {
  134. struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
  135. u32 __iomem *hcr = cmd->hcr;
  136. int ret = -EAGAIN;
  137. unsigned long end;
  138. mutex_lock(&cmd->hcr_mutex);
  139. end = jiffies;
  140. if (event)
  141. end += msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS);
  142. while (cmd_pending(dev)) {
  143. if (time_after_eq(jiffies, end))
  144. goto out;
  145. cond_resched();
  146. }
  147. /*
  148. * We use writel (instead of something like memcpy_toio)
  149. * because writes of less than 32 bits to the HCR don't work
  150. * (and some architectures such as ia64 implement memcpy_toio
  151. * in terms of writeb).
  152. */
  153. __raw_writel((__force u32) cpu_to_be32(in_param >> 32), hcr + 0);
  154. __raw_writel((__force u32) cpu_to_be32(in_param & 0xfffffffful), hcr + 1);
  155. __raw_writel((__force u32) cpu_to_be32(in_modifier), hcr + 2);
  156. __raw_writel((__force u32) cpu_to_be32(out_param >> 32), hcr + 3);
  157. __raw_writel((__force u32) cpu_to_be32(out_param & 0xfffffffful), hcr + 4);
  158. __raw_writel((__force u32) cpu_to_be32(token << 16), hcr + 5);
  159. /* __raw_writel may not order writes. */
  160. wmb();
  161. __raw_writel((__force u32) cpu_to_be32((1 << HCR_GO_BIT) |
  162. (cmd->toggle << HCR_T_BIT) |
  163. (event ? (1 << HCR_E_BIT) : 0) |
  164. (op_modifier << HCR_OPMOD_SHIFT) |
  165. op), hcr + 6);
  166. /*
  167. * Make sure that our HCR writes don't get mixed in with
  168. * writes from another CPU starting a FW command.
  169. */
  170. mmiowb();
  171. cmd->toggle = cmd->toggle ^ 1;
  172. ret = 0;
  173. out:
  174. mutex_unlock(&cmd->hcr_mutex);
  175. return ret;
  176. }
  177. static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  178. int out_is_imm, u32 in_modifier, u8 op_modifier,
  179. u16 op, unsigned long timeout)
  180. {
  181. struct mlx4_priv *priv = mlx4_priv(dev);
  182. void __iomem *hcr = priv->cmd.hcr;
  183. int err = 0;
  184. unsigned long end;
  185. down(&priv->cmd.poll_sem);
  186. err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
  187. in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
  188. if (err)
  189. goto out;
  190. end = msecs_to_jiffies(timeout) + jiffies;
  191. while (cmd_pending(dev) && time_before(jiffies, end))
  192. cond_resched();
  193. if (cmd_pending(dev)) {
  194. err = -ETIMEDOUT;
  195. goto out;
  196. }
  197. if (out_is_imm)
  198. *out_param =
  199. (u64) be32_to_cpu((__force __be32)
  200. __raw_readl(hcr + HCR_OUT_PARAM_OFFSET)) << 32 |
  201. (u64) be32_to_cpu((__force __be32)
  202. __raw_readl(hcr + HCR_OUT_PARAM_OFFSET + 4));
  203. err = mlx4_status_to_errno(be32_to_cpu((__force __be32)
  204. __raw_readl(hcr + HCR_STATUS_OFFSET)) >> 24);
  205. out:
  206. up(&priv->cmd.poll_sem);
  207. return err;
  208. }
  209. void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param)
  210. {
  211. struct mlx4_priv *priv = mlx4_priv(dev);
  212. struct mlx4_cmd_context *context =
  213. &priv->cmd.context[token & priv->cmd.token_mask];
  214. /* previously timed out command completing at long last */
  215. if (token != context->token)
  216. return;
  217. context->result = mlx4_status_to_errno(status);
  218. context->out_param = out_param;
  219. complete(&context->done);
  220. }
  221. static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  222. int out_is_imm, u32 in_modifier, u8 op_modifier,
  223. u16 op, unsigned long timeout)
  224. {
  225. struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
  226. struct mlx4_cmd_context *context;
  227. int err = 0;
  228. down(&cmd->event_sem);
  229. spin_lock(&cmd->context_lock);
  230. BUG_ON(cmd->free_head < 0);
  231. context = &cmd->context[cmd->free_head];
  232. context->token += cmd->token_mask + 1;
  233. cmd->free_head = context->next;
  234. spin_unlock(&cmd->context_lock);
  235. init_completion(&context->done);
  236. mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
  237. in_modifier, op_modifier, op, context->token, 1);
  238. if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) {
  239. err = -EBUSY;
  240. goto out;
  241. }
  242. err = context->result;
  243. if (err)
  244. goto out;
  245. if (out_is_imm)
  246. *out_param = context->out_param;
  247. out:
  248. spin_lock(&cmd->context_lock);
  249. context->next = cmd->free_head;
  250. cmd->free_head = context - cmd->context;
  251. spin_unlock(&cmd->context_lock);
  252. up(&cmd->event_sem);
  253. return err;
  254. }
  255. int __mlx4_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
  256. int out_is_imm, u32 in_modifier, u8 op_modifier,
  257. u16 op, unsigned long timeout)
  258. {
  259. if (mlx4_priv(dev)->cmd.use_events)
  260. return mlx4_cmd_wait(dev, in_param, out_param, out_is_imm,
  261. in_modifier, op_modifier, op, timeout);
  262. else
  263. return mlx4_cmd_poll(dev, in_param, out_param, out_is_imm,
  264. in_modifier, op_modifier, op, timeout);
  265. }
  266. EXPORT_SYMBOL_GPL(__mlx4_cmd);
  267. int mlx4_cmd_init(struct mlx4_dev *dev)
  268. {
  269. struct mlx4_priv *priv = mlx4_priv(dev);
  270. mutex_init(&priv->cmd.hcr_mutex);
  271. sema_init(&priv->cmd.poll_sem, 1);
  272. priv->cmd.use_events = 0;
  273. priv->cmd.toggle = 1;
  274. priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_HCR_BASE,
  275. MLX4_HCR_SIZE);
  276. if (!priv->cmd.hcr) {
  277. mlx4_err(dev, "Couldn't map command register.");
  278. return -ENOMEM;
  279. }
  280. priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
  281. MLX4_MAILBOX_SIZE,
  282. MLX4_MAILBOX_SIZE, 0);
  283. if (!priv->cmd.pool) {
  284. iounmap(priv->cmd.hcr);
  285. return -ENOMEM;
  286. }
  287. return 0;
  288. }
  289. void mlx4_cmd_cleanup(struct mlx4_dev *dev)
  290. {
  291. struct mlx4_priv *priv = mlx4_priv(dev);
  292. pci_pool_destroy(priv->cmd.pool);
  293. iounmap(priv->cmd.hcr);
  294. }
  295. /*
  296. * Switch to using events to issue FW commands (can only be called
  297. * after event queue for command events has been initialized).
  298. */
  299. int mlx4_cmd_use_events(struct mlx4_dev *dev)
  300. {
  301. struct mlx4_priv *priv = mlx4_priv(dev);
  302. int i;
  303. priv->cmd.context = kmalloc(priv->cmd.max_cmds *
  304. sizeof (struct mlx4_cmd_context),
  305. GFP_KERNEL);
  306. if (!priv->cmd.context)
  307. return -ENOMEM;
  308. for (i = 0; i < priv->cmd.max_cmds; ++i) {
  309. priv->cmd.context[i].token = i;
  310. priv->cmd.context[i].next = i + 1;
  311. }
  312. priv->cmd.context[priv->cmd.max_cmds - 1].next = -1;
  313. priv->cmd.free_head = 0;
  314. sema_init(&priv->cmd.event_sem, priv->cmd.max_cmds);
  315. spin_lock_init(&priv->cmd.context_lock);
  316. for (priv->cmd.token_mask = 1;
  317. priv->cmd.token_mask < priv->cmd.max_cmds;
  318. priv->cmd.token_mask <<= 1)
  319. ; /* nothing */
  320. --priv->cmd.token_mask;
  321. priv->cmd.use_events = 1;
  322. down(&priv->cmd.poll_sem);
  323. return 0;
  324. }
  325. /*
  326. * Switch back to polling (used when shutting down the device)
  327. */
  328. void mlx4_cmd_use_polling(struct mlx4_dev *dev)
  329. {
  330. struct mlx4_priv *priv = mlx4_priv(dev);
  331. int i;
  332. priv->cmd.use_events = 0;
  333. for (i = 0; i < priv->cmd.max_cmds; ++i)
  334. down(&priv->cmd.event_sem);
  335. kfree(priv->cmd.context);
  336. up(&priv->cmd.poll_sem);
  337. }
  338. struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
  339. {
  340. struct mlx4_cmd_mailbox *mailbox;
  341. mailbox = kmalloc(sizeof *mailbox, GFP_KERNEL);
  342. if (!mailbox)
  343. return ERR_PTR(-ENOMEM);
  344. mailbox->buf = pci_pool_alloc(mlx4_priv(dev)->cmd.pool, GFP_KERNEL,
  345. &mailbox->dma);
  346. if (!mailbox->buf) {
  347. kfree(mailbox);
  348. return ERR_PTR(-ENOMEM);
  349. }
  350. return mailbox;
  351. }
  352. EXPORT_SYMBOL_GPL(mlx4_alloc_cmd_mailbox);
  353. void mlx4_free_cmd_mailbox(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox)
  354. {
  355. if (!mailbox)
  356. return;
  357. pci_pool_free(mlx4_priv(dev)->cmd.pool, mailbox->buf, mailbox->dma);
  358. kfree(mailbox);
  359. }
  360. EXPORT_SYMBOL_GPL(mlx4_free_cmd_mailbox);