icm.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379
  1. /*
  2. * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
  3. * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
  4. *
  5. * This software is available to you under a choice of one of two
  6. * licenses. You may choose to be licensed under the terms of the GNU
  7. * General Public License (GPL) Version 2, available from the file
  8. * COPYING in the main directory of this source tree, or the
  9. * OpenIB.org BSD license below:
  10. *
  11. * Redistribution and use in source and binary forms, with or
  12. * without modification, are permitted provided that the following
  13. * conditions are met:
  14. *
  15. * - Redistributions of source code must retain the above
  16. * copyright notice, this list of conditions and the following
  17. * disclaimer.
  18. *
  19. * - Redistributions in binary form must reproduce the above
  20. * copyright notice, this list of conditions and the following
  21. * disclaimer in the documentation and/or other materials
  22. * provided with the distribution.
  23. *
  24. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31. * SOFTWARE.
  32. */
  33. #include <linux/init.h>
  34. #include <linux/errno.h>
  35. #include <linux/mlx4/cmd.h>
  36. #include "mlx4.h"
  37. #include "icm.h"
  38. #include "fw.h"
  39. /*
  40. * We allocate in as big chunks as we can, up to a maximum of 256 KB
  41. * per chunk.
  42. */
  43. enum {
  44. MLX4_ICM_ALLOC_SIZE = 1 << 18,
  45. MLX4_TABLE_CHUNK_SIZE = 1 << 18
  46. };
  47. void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm)
  48. {
  49. struct mlx4_icm_chunk *chunk, *tmp;
  50. int i;
  51. list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
  52. if (chunk->nsg > 0)
  53. pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
  54. PCI_DMA_BIDIRECTIONAL);
  55. for (i = 0; i < chunk->npages; ++i)
  56. __free_pages(chunk->mem[i].page,
  57. get_order(chunk->mem[i].length));
  58. kfree(chunk);
  59. }
  60. kfree(icm);
  61. }
  62. struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
  63. gfp_t gfp_mask)
  64. {
  65. struct mlx4_icm *icm;
  66. struct mlx4_icm_chunk *chunk = NULL;
  67. int cur_order;
  68. icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
  69. if (!icm)
  70. return icm;
  71. icm->refcount = 0;
  72. INIT_LIST_HEAD(&icm->chunk_list);
  73. cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
  74. while (npages > 0) {
  75. if (!chunk) {
  76. chunk = kmalloc(sizeof *chunk,
  77. gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
  78. if (!chunk)
  79. goto fail;
  80. chunk->npages = 0;
  81. chunk->nsg = 0;
  82. list_add_tail(&chunk->list, &icm->chunk_list);
  83. }
  84. while (1 << cur_order > npages)
  85. --cur_order;
  86. chunk->mem[chunk->npages].page = alloc_pages(gfp_mask, cur_order);
  87. if (chunk->mem[chunk->npages].page) {
  88. chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order;
  89. chunk->mem[chunk->npages].offset = 0;
  90. if (++chunk->npages == MLX4_ICM_CHUNK_LEN) {
  91. chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
  92. chunk->npages,
  93. PCI_DMA_BIDIRECTIONAL);
  94. if (chunk->nsg <= 0)
  95. goto fail;
  96. chunk = NULL;
  97. }
  98. npages -= 1 << cur_order;
  99. } else {
  100. --cur_order;
  101. if (cur_order < 0)
  102. goto fail;
  103. }
  104. }
  105. if (chunk) {
  106. chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
  107. chunk->npages,
  108. PCI_DMA_BIDIRECTIONAL);
  109. if (chunk->nsg <= 0)
  110. goto fail;
  111. }
  112. return icm;
  113. fail:
  114. mlx4_free_icm(dev, icm);
  115. return NULL;
  116. }
  117. static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
  118. {
  119. return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
  120. }
  121. int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
  122. {
  123. return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
  124. MLX4_CMD_TIME_CLASS_B);
  125. }
  126. int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
  127. {
  128. struct mlx4_cmd_mailbox *mailbox;
  129. __be64 *inbox;
  130. int err;
  131. mailbox = mlx4_alloc_cmd_mailbox(dev);
  132. if (IS_ERR(mailbox))
  133. return PTR_ERR(mailbox);
  134. inbox = mailbox->buf;
  135. inbox[0] = cpu_to_be64(virt);
  136. inbox[1] = cpu_to_be64(dma_addr);
  137. err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
  138. MLX4_CMD_TIME_CLASS_B);
  139. mlx4_free_cmd_mailbox(dev, mailbox);
  140. if (!err)
  141. mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
  142. (unsigned long long) dma_addr, (unsigned long long) virt);
  143. return err;
  144. }
  145. int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
  146. {
  147. return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
  148. }
  149. int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
  150. {
  151. return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
  152. }
  153. int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
  154. {
  155. int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
  156. int ret = 0;
  157. mutex_lock(&table->mutex);
  158. if (table->icm[i]) {
  159. ++table->icm[i]->refcount;
  160. goto out;
  161. }
  162. table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
  163. (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
  164. __GFP_NOWARN);
  165. if (!table->icm[i]) {
  166. ret = -ENOMEM;
  167. goto out;
  168. }
  169. if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
  170. (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
  171. mlx4_free_icm(dev, table->icm[i]);
  172. table->icm[i] = NULL;
  173. ret = -ENOMEM;
  174. goto out;
  175. }
  176. ++table->icm[i]->refcount;
  177. out:
  178. mutex_unlock(&table->mutex);
  179. return ret;
  180. }
  181. void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
  182. {
  183. int i;
  184. i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
  185. mutex_lock(&table->mutex);
  186. if (--table->icm[i]->refcount == 0) {
  187. mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
  188. MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
  189. mlx4_free_icm(dev, table->icm[i]);
  190. table->icm[i] = NULL;
  191. }
  192. mutex_unlock(&table->mutex);
  193. }
  194. void *mlx4_table_find(struct mlx4_icm_table *table, int obj)
  195. {
  196. int idx, offset, i;
  197. struct mlx4_icm_chunk *chunk;
  198. struct mlx4_icm *icm;
  199. struct page *page = NULL;
  200. if (!table->lowmem)
  201. return NULL;
  202. mutex_lock(&table->mutex);
  203. idx = obj & (table->num_obj - 1);
  204. icm = table->icm[idx / (MLX4_TABLE_CHUNK_SIZE / table->obj_size)];
  205. offset = idx % (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
  206. if (!icm)
  207. goto out;
  208. list_for_each_entry(chunk, &icm->chunk_list, list) {
  209. for (i = 0; i < chunk->npages; ++i) {
  210. if (chunk->mem[i].length > offset) {
  211. page = chunk->mem[i].page;
  212. goto out;
  213. }
  214. offset -= chunk->mem[i].length;
  215. }
  216. }
  217. out:
  218. mutex_unlock(&table->mutex);
  219. return page ? lowmem_page_address(page) + offset : NULL;
  220. }
  221. int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
  222. int start, int end)
  223. {
  224. int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
  225. int i, err;
  226. for (i = start; i <= end; i += inc) {
  227. err = mlx4_table_get(dev, table, i);
  228. if (err)
  229. goto fail;
  230. }
  231. return 0;
  232. fail:
  233. while (i > start) {
  234. i -= inc;
  235. mlx4_table_put(dev, table, i);
  236. }
  237. return err;
  238. }
  239. void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
  240. int start, int end)
  241. {
  242. int i;
  243. for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
  244. mlx4_table_put(dev, table, i);
  245. }
  246. int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
  247. u64 virt, int obj_size, int nobj, int reserved,
  248. int use_lowmem)
  249. {
  250. int obj_per_chunk;
  251. int num_icm;
  252. unsigned chunk_size;
  253. int i;
  254. obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
  255. num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
  256. table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
  257. if (!table->icm)
  258. return -ENOMEM;
  259. table->virt = virt;
  260. table->num_icm = num_icm;
  261. table->num_obj = nobj;
  262. table->obj_size = obj_size;
  263. table->lowmem = use_lowmem;
  264. mutex_init(&table->mutex);
  265. for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
  266. chunk_size = MLX4_TABLE_CHUNK_SIZE;
  267. if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
  268. chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
  269. table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
  270. (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
  271. __GFP_NOWARN);
  272. if (!table->icm[i])
  273. goto err;
  274. if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
  275. mlx4_free_icm(dev, table->icm[i]);
  276. table->icm[i] = NULL;
  277. goto err;
  278. }
  279. /*
  280. * Add a reference to this ICM chunk so that it never
  281. * gets freed (since it contains reserved firmware objects).
  282. */
  283. ++table->icm[i]->refcount;
  284. }
  285. return 0;
  286. err:
  287. for (i = 0; i < num_icm; ++i)
  288. if (table->icm[i]) {
  289. mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
  290. MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
  291. mlx4_free_icm(dev, table->icm[i]);
  292. }
  293. return -ENOMEM;
  294. }
  295. void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
  296. {
  297. int i;
  298. for (i = 0; i < table->num_icm; ++i)
  299. if (table->icm[i]) {
  300. mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
  301. MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
  302. mlx4_free_icm(dev, table->icm[i]);
  303. }
  304. kfree(table->icm);
  305. }