resource.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. /*
  2. * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. /* Crude resource management */
  33. #include <linux/kernel.h>
  34. #include <linux/random.h>
  35. #include <linux/slab.h>
  36. #include <linux/kfifo.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/errno.h>
  39. #include <linux/genalloc.h>
  40. #include "iw_cxgb4.h"
  41. #define RANDOM_SIZE 16
  42. static int __c4iw_init_resource_fifo(struct kfifo *fifo,
  43. spinlock_t *fifo_lock,
  44. u32 nr, u32 skip_low,
  45. u32 skip_high,
  46. int random)
  47. {
  48. u32 i, j, entry = 0, idx;
  49. u32 random_bytes;
  50. u32 rarray[16];
  51. spin_lock_init(fifo_lock);
  52. if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
  53. return -ENOMEM;
  54. for (i = 0; i < skip_low + skip_high; i++)
  55. kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
  56. if (random) {
  57. j = 0;
  58. random_bytes = random32();
  59. for (i = 0; i < RANDOM_SIZE; i++)
  60. rarray[i] = i + skip_low;
  61. for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
  62. if (j >= RANDOM_SIZE) {
  63. j = 0;
  64. random_bytes = random32();
  65. }
  66. idx = (random_bytes >> (j * 2)) & 0xF;
  67. kfifo_in(fifo,
  68. (unsigned char *) &rarray[idx],
  69. sizeof(u32));
  70. rarray[idx] = i;
  71. j++;
  72. }
  73. for (i = 0; i < RANDOM_SIZE; i++)
  74. kfifo_in(fifo,
  75. (unsigned char *) &rarray[i],
  76. sizeof(u32));
  77. } else
  78. for (i = skip_low; i < nr - skip_high; i++)
  79. kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
  80. for (i = 0; i < skip_low + skip_high; i++)
  81. if (kfifo_out_locked(fifo, (unsigned char *) &entry,
  82. sizeof(u32), fifo_lock))
  83. break;
  84. return 0;
  85. }
  86. static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
  87. u32 nr, u32 skip_low, u32 skip_high)
  88. {
  89. return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
  90. skip_high, 0);
  91. }
  92. static int c4iw_init_resource_fifo_random(struct kfifo *fifo,
  93. spinlock_t *fifo_lock,
  94. u32 nr, u32 skip_low, u32 skip_high)
  95. {
  96. return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
  97. skip_high, 1);
  98. }
  99. static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev)
  100. {
  101. u32 i;
  102. spin_lock_init(&rdev->resource.qid_fifo_lock);
  103. if (kfifo_alloc(&rdev->resource.qid_fifo, T4_MAX_QIDS * sizeof(u32),
  104. GFP_KERNEL))
  105. return -ENOMEM;
  106. for (i = T4_QID_BASE; i < T4_QID_BASE + T4_MAX_QIDS; i++)
  107. if (!(i & rdev->qpmask))
  108. kfifo_in(&rdev->resource.qid_fifo,
  109. (unsigned char *) &i, sizeof(u32));
  110. return 0;
  111. }
  112. /* nr_* must be power of 2 */
  113. int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
  114. {
  115. int err = 0;
  116. err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo,
  117. &rdev->resource.tpt_fifo_lock,
  118. nr_tpt, 1, 0);
  119. if (err)
  120. goto tpt_err;
  121. err = c4iw_init_qid_fifo(rdev);
  122. if (err)
  123. goto qid_err;
  124. err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo,
  125. &rdev->resource.pdid_fifo_lock,
  126. nr_pdid, 1, 0);
  127. if (err)
  128. goto pdid_err;
  129. return 0;
  130. pdid_err:
  131. kfifo_free(&rdev->resource.qid_fifo);
  132. qid_err:
  133. kfifo_free(&rdev->resource.tpt_fifo);
  134. tpt_err:
  135. return -ENOMEM;
  136. }
  137. /*
  138. * returns 0 if no resource available
  139. */
  140. u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock)
  141. {
  142. u32 entry;
  143. if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
  144. return entry;
  145. else
  146. return 0;
  147. }
  148. void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock)
  149. {
  150. PDBG("%s entry 0x%x\n", __func__, entry);
  151. kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock);
  152. }
  153. u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
  154. {
  155. struct c4iw_qid_list *entry;
  156. u32 qid;
  157. int i;
  158. mutex_lock(&uctx->lock);
  159. if (!list_empty(&uctx->cqids)) {
  160. entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
  161. entry);
  162. list_del(&entry->entry);
  163. qid = entry->qid;
  164. kfree(entry);
  165. } else {
  166. qid = c4iw_get_resource(&rdev->resource.qid_fifo,
  167. &rdev->resource.qid_fifo_lock);
  168. if (!qid)
  169. goto out;
  170. for (i = qid+1; i & rdev->qpmask; i++) {
  171. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  172. if (!entry)
  173. goto out;
  174. entry->qid = i;
  175. list_add_tail(&entry->entry, &uctx->cqids);
  176. }
  177. /*
  178. * now put the same ids on the qp list since they all
  179. * map to the same db/gts page.
  180. */
  181. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  182. if (!entry)
  183. goto out;
  184. entry->qid = qid;
  185. list_add_tail(&entry->entry, &uctx->qpids);
  186. for (i = qid+1; i & rdev->qpmask; i++) {
  187. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  188. if (!entry)
  189. goto out;
  190. entry->qid = i;
  191. list_add_tail(&entry->entry, &uctx->qpids);
  192. }
  193. }
  194. out:
  195. mutex_unlock(&uctx->lock);
  196. PDBG("%s qid 0x%x\n", __func__, qid);
  197. return qid;
  198. }
  199. void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
  200. struct c4iw_dev_ucontext *uctx)
  201. {
  202. struct c4iw_qid_list *entry;
  203. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  204. if (!entry)
  205. return;
  206. PDBG("%s qid 0x%x\n", __func__, qid);
  207. entry->qid = qid;
  208. mutex_lock(&uctx->lock);
  209. list_add_tail(&entry->entry, &uctx->cqids);
  210. mutex_unlock(&uctx->lock);
  211. }
  212. u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
  213. {
  214. struct c4iw_qid_list *entry;
  215. u32 qid;
  216. int i;
  217. mutex_lock(&uctx->lock);
  218. if (!list_empty(&uctx->qpids)) {
  219. entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
  220. entry);
  221. list_del(&entry->entry);
  222. qid = entry->qid;
  223. kfree(entry);
  224. } else {
  225. qid = c4iw_get_resource(&rdev->resource.qid_fifo,
  226. &rdev->resource.qid_fifo_lock);
  227. if (!qid)
  228. goto out;
  229. for (i = qid+1; i & rdev->qpmask; i++) {
  230. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  231. if (!entry)
  232. goto out;
  233. entry->qid = i;
  234. list_add_tail(&entry->entry, &uctx->qpids);
  235. }
  236. /*
  237. * now put the same ids on the cq list since they all
  238. * map to the same db/gts page.
  239. */
  240. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  241. if (!entry)
  242. goto out;
  243. entry->qid = qid;
  244. list_add_tail(&entry->entry, &uctx->cqids);
  245. for (i = qid; i & rdev->qpmask; i++) {
  246. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  247. if (!entry)
  248. goto out;
  249. entry->qid = i;
  250. list_add_tail(&entry->entry, &uctx->cqids);
  251. }
  252. }
  253. out:
  254. mutex_unlock(&uctx->lock);
  255. PDBG("%s qid 0x%x\n", __func__, qid);
  256. return qid;
  257. }
  258. void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
  259. struct c4iw_dev_ucontext *uctx)
  260. {
  261. struct c4iw_qid_list *entry;
  262. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  263. if (!entry)
  264. return;
  265. PDBG("%s qid 0x%x\n", __func__, qid);
  266. entry->qid = qid;
  267. mutex_lock(&uctx->lock);
  268. list_add_tail(&entry->entry, &uctx->qpids);
  269. mutex_unlock(&uctx->lock);
  270. }
  271. void c4iw_destroy_resource(struct c4iw_resource *rscp)
  272. {
  273. kfifo_free(&rscp->tpt_fifo);
  274. kfifo_free(&rscp->qid_fifo);
  275. kfifo_free(&rscp->pdid_fifo);
  276. }
  277. /*
  278. * PBL Memory Manager. Uses Linux generic allocator.
  279. */
  280. #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
  281. u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
  282. {
  283. unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
  284. PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
  285. return (u32)addr;
  286. }
  287. void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
  288. {
  289. PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
  290. gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
  291. }
  292. int c4iw_pblpool_create(struct c4iw_rdev *rdev)
  293. {
  294. unsigned pbl_start, pbl_chunk, pbl_top;
  295. rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
  296. if (!rdev->pbl_pool)
  297. return -ENOMEM;
  298. pbl_start = rdev->lldi.vr->pbl.start;
  299. pbl_chunk = rdev->lldi.vr->pbl.size;
  300. pbl_top = pbl_start + pbl_chunk;
  301. while (pbl_start < pbl_top) {
  302. pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
  303. if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
  304. PDBG("%s failed to add PBL chunk (%x/%x)\n",
  305. __func__, pbl_start, pbl_chunk);
  306. if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
  307. printk(KERN_WARNING MOD
  308. "Failed to add all PBL chunks (%x/%x)\n",
  309. pbl_start,
  310. pbl_top - pbl_start);
  311. return 0;
  312. }
  313. pbl_chunk >>= 1;
  314. } else {
  315. PDBG("%s added PBL chunk (%x/%x)\n",
  316. __func__, pbl_start, pbl_chunk);
  317. pbl_start += pbl_chunk;
  318. }
  319. }
  320. return 0;
  321. }
  322. void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
  323. {
  324. gen_pool_destroy(rdev->pbl_pool);
  325. }
  326. /*
  327. * RQT Memory Manager. Uses Linux generic allocator.
  328. */
  329. #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
  330. u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
  331. {
  332. unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
  333. PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
  334. return (u32)addr;
  335. }
  336. void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
  337. {
  338. PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
  339. gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
  340. }
  341. int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
  342. {
  343. unsigned rqt_start, rqt_chunk, rqt_top;
  344. rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
  345. if (!rdev->rqt_pool)
  346. return -ENOMEM;
  347. rqt_start = rdev->lldi.vr->rq.start;
  348. rqt_chunk = rdev->lldi.vr->rq.size;
  349. rqt_top = rqt_start + rqt_chunk;
  350. while (rqt_start < rqt_top) {
  351. rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
  352. if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
  353. PDBG("%s failed to add RQT chunk (%x/%x)\n",
  354. __func__, rqt_start, rqt_chunk);
  355. if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
  356. printk(KERN_WARNING MOD
  357. "Failed to add all RQT chunks (%x/%x)\n",
  358. rqt_start, rqt_top - rqt_start);
  359. return 0;
  360. }
  361. rqt_chunk >>= 1;
  362. } else {
  363. PDBG("%s added RQT chunk (%x/%x)\n",
  364. __func__, rqt_start, rqt_chunk);
  365. rqt_start += rqt_chunk;
  366. }
  367. }
  368. return 0;
  369. }
  370. void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
  371. {
  372. gen_pool_destroy(rdev->rqt_pool);
  373. }