resource.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480
  1. /*
  2. * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
  3. *
  4. * This software is available to you under a choice of one of two
  5. * licenses. You may choose to be licensed under the terms of the GNU
  6. * General Public License (GPL) Version 2, available from the file
  7. * COPYING in the main directory of this source tree, or the
  8. * OpenIB.org BSD license below:
  9. *
  10. * Redistribution and use in source and binary forms, with or
  11. * without modification, are permitted provided that the following
  12. * conditions are met:
  13. *
  14. * - Redistributions of source code must retain the above
  15. * copyright notice, this list of conditions and the following
  16. * disclaimer.
  17. *
  18. * - Redistributions in binary form must reproduce the above
  19. * copyright notice, this list of conditions and the following
  20. * disclaimer in the documentation and/or other materials
  21. * provided with the distribution.
  22. *
  23. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  24. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  25. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  26. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  27. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  28. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  29. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  30. * SOFTWARE.
  31. */
  32. /* Crude resource management */
  33. #include <linux/kernel.h>
  34. #include <linux/random.h>
  35. #include <linux/slab.h>
  36. #include <linux/kfifo.h>
  37. #include <linux/spinlock.h>
  38. #include <linux/errno.h>
  39. #include <linux/genalloc.h>
  40. #include "iw_cxgb4.h"
  41. #define RANDOM_SIZE 16
  42. static int __c4iw_init_resource_fifo(struct kfifo *fifo,
  43. spinlock_t *fifo_lock,
  44. u32 nr, u32 skip_low,
  45. u32 skip_high,
  46. int random)
  47. {
  48. u32 i, j, entry = 0, idx;
  49. u32 random_bytes;
  50. u32 rarray[16];
  51. spin_lock_init(fifo_lock);
  52. if (kfifo_alloc(fifo, nr * sizeof(u32), GFP_KERNEL))
  53. return -ENOMEM;
  54. for (i = 0; i < skip_low + skip_high; i++)
  55. kfifo_in(fifo, (unsigned char *) &entry, sizeof(u32));
  56. if (random) {
  57. j = 0;
  58. random_bytes = random32();
  59. for (i = 0; i < RANDOM_SIZE; i++)
  60. rarray[i] = i + skip_low;
  61. for (i = skip_low + RANDOM_SIZE; i < nr - skip_high; i++) {
  62. if (j >= RANDOM_SIZE) {
  63. j = 0;
  64. random_bytes = random32();
  65. }
  66. idx = (random_bytes >> (j * 2)) & 0xF;
  67. kfifo_in(fifo,
  68. (unsigned char *) &rarray[idx],
  69. sizeof(u32));
  70. rarray[idx] = i;
  71. j++;
  72. }
  73. for (i = 0; i < RANDOM_SIZE; i++)
  74. kfifo_in(fifo,
  75. (unsigned char *) &rarray[i],
  76. sizeof(u32));
  77. } else
  78. for (i = skip_low; i < nr - skip_high; i++)
  79. kfifo_in(fifo, (unsigned char *) &i, sizeof(u32));
  80. for (i = 0; i < skip_low + skip_high; i++)
  81. if (kfifo_out_locked(fifo, (unsigned char *) &entry,
  82. sizeof(u32), fifo_lock))
  83. break;
  84. return 0;
  85. }
  86. static int c4iw_init_resource_fifo(struct kfifo *fifo, spinlock_t * fifo_lock,
  87. u32 nr, u32 skip_low, u32 skip_high)
  88. {
  89. return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
  90. skip_high, 0);
  91. }
  92. static int c4iw_init_resource_fifo_random(struct kfifo *fifo,
  93. spinlock_t *fifo_lock,
  94. u32 nr, u32 skip_low, u32 skip_high)
  95. {
  96. return __c4iw_init_resource_fifo(fifo, fifo_lock, nr, skip_low,
  97. skip_high, 1);
  98. }
  99. static int c4iw_init_qid_fifo(struct c4iw_rdev *rdev)
  100. {
  101. u32 i;
  102. spin_lock_init(&rdev->resource.qid_fifo_lock);
  103. if (kfifo_alloc(&rdev->resource.qid_fifo, rdev->lldi.vr->qp.size *
  104. sizeof(u32), GFP_KERNEL))
  105. return -ENOMEM;
  106. for (i = rdev->lldi.vr->qp.start;
  107. i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++)
  108. if (!(i & rdev->qpmask))
  109. kfifo_in(&rdev->resource.qid_fifo,
  110. (unsigned char *) &i, sizeof(u32));
  111. return 0;
  112. }
  113. /* nr_* must be power of 2 */
  114. int c4iw_init_resource(struct c4iw_rdev *rdev, u32 nr_tpt, u32 nr_pdid)
  115. {
  116. int err = 0;
  117. err = c4iw_init_resource_fifo_random(&rdev->resource.tpt_fifo,
  118. &rdev->resource.tpt_fifo_lock,
  119. nr_tpt, 1, 0);
  120. if (err)
  121. goto tpt_err;
  122. err = c4iw_init_qid_fifo(rdev);
  123. if (err)
  124. goto qid_err;
  125. err = c4iw_init_resource_fifo(&rdev->resource.pdid_fifo,
  126. &rdev->resource.pdid_fifo_lock,
  127. nr_pdid, 1, 0);
  128. if (err)
  129. goto pdid_err;
  130. return 0;
  131. pdid_err:
  132. kfifo_free(&rdev->resource.qid_fifo);
  133. qid_err:
  134. kfifo_free(&rdev->resource.tpt_fifo);
  135. tpt_err:
  136. return -ENOMEM;
  137. }
  138. /*
  139. * returns 0 if no resource available
  140. */
  141. u32 c4iw_get_resource(struct kfifo *fifo, spinlock_t *lock)
  142. {
  143. u32 entry;
  144. if (kfifo_out_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock))
  145. return entry;
  146. else
  147. return 0;
  148. }
  149. void c4iw_put_resource(struct kfifo *fifo, u32 entry, spinlock_t *lock)
  150. {
  151. PDBG("%s entry 0x%x\n", __func__, entry);
  152. kfifo_in_locked(fifo, (unsigned char *) &entry, sizeof(u32), lock);
  153. }
  154. u32 c4iw_get_cqid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
  155. {
  156. struct c4iw_qid_list *entry;
  157. u32 qid;
  158. int i;
  159. mutex_lock(&uctx->lock);
  160. if (!list_empty(&uctx->cqids)) {
  161. entry = list_entry(uctx->cqids.next, struct c4iw_qid_list,
  162. entry);
  163. list_del(&entry->entry);
  164. qid = entry->qid;
  165. kfree(entry);
  166. } else {
  167. qid = c4iw_get_resource(&rdev->resource.qid_fifo,
  168. &rdev->resource.qid_fifo_lock);
  169. if (!qid)
  170. goto out;
  171. for (i = qid+1; i & rdev->qpmask; i++) {
  172. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  173. if (!entry)
  174. goto out;
  175. entry->qid = i;
  176. list_add_tail(&entry->entry, &uctx->cqids);
  177. }
  178. /*
  179. * now put the same ids on the qp list since they all
  180. * map to the same db/gts page.
  181. */
  182. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  183. if (!entry)
  184. goto out;
  185. entry->qid = qid;
  186. list_add_tail(&entry->entry, &uctx->qpids);
  187. for (i = qid+1; i & rdev->qpmask; i++) {
  188. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  189. if (!entry)
  190. goto out;
  191. entry->qid = i;
  192. list_add_tail(&entry->entry, &uctx->qpids);
  193. }
  194. }
  195. out:
  196. mutex_unlock(&uctx->lock);
  197. PDBG("%s qid 0x%x\n", __func__, qid);
  198. return qid;
  199. }
  200. void c4iw_put_cqid(struct c4iw_rdev *rdev, u32 qid,
  201. struct c4iw_dev_ucontext *uctx)
  202. {
  203. struct c4iw_qid_list *entry;
  204. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  205. if (!entry)
  206. return;
  207. PDBG("%s qid 0x%x\n", __func__, qid);
  208. entry->qid = qid;
  209. mutex_lock(&uctx->lock);
  210. list_add_tail(&entry->entry, &uctx->cqids);
  211. mutex_unlock(&uctx->lock);
  212. }
  213. u32 c4iw_get_qpid(struct c4iw_rdev *rdev, struct c4iw_dev_ucontext *uctx)
  214. {
  215. struct c4iw_qid_list *entry;
  216. u32 qid;
  217. int i;
  218. mutex_lock(&uctx->lock);
  219. if (!list_empty(&uctx->qpids)) {
  220. entry = list_entry(uctx->qpids.next, struct c4iw_qid_list,
  221. entry);
  222. list_del(&entry->entry);
  223. qid = entry->qid;
  224. kfree(entry);
  225. } else {
  226. qid = c4iw_get_resource(&rdev->resource.qid_fifo,
  227. &rdev->resource.qid_fifo_lock);
  228. if (!qid)
  229. goto out;
  230. for (i = qid+1; i & rdev->qpmask; i++) {
  231. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  232. if (!entry)
  233. goto out;
  234. entry->qid = i;
  235. list_add_tail(&entry->entry, &uctx->qpids);
  236. }
  237. /*
  238. * now put the same ids on the cq list since they all
  239. * map to the same db/gts page.
  240. */
  241. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  242. if (!entry)
  243. goto out;
  244. entry->qid = qid;
  245. list_add_tail(&entry->entry, &uctx->cqids);
  246. for (i = qid; i & rdev->qpmask; i++) {
  247. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  248. if (!entry)
  249. goto out;
  250. entry->qid = i;
  251. list_add_tail(&entry->entry, &uctx->cqids);
  252. }
  253. }
  254. out:
  255. mutex_unlock(&uctx->lock);
  256. PDBG("%s qid 0x%x\n", __func__, qid);
  257. return qid;
  258. }
  259. void c4iw_put_qpid(struct c4iw_rdev *rdev, u32 qid,
  260. struct c4iw_dev_ucontext *uctx)
  261. {
  262. struct c4iw_qid_list *entry;
  263. entry = kmalloc(sizeof *entry, GFP_KERNEL);
  264. if (!entry)
  265. return;
  266. PDBG("%s qid 0x%x\n", __func__, qid);
  267. entry->qid = qid;
  268. mutex_lock(&uctx->lock);
  269. list_add_tail(&entry->entry, &uctx->qpids);
  270. mutex_unlock(&uctx->lock);
  271. }
  272. void c4iw_destroy_resource(struct c4iw_resource *rscp)
  273. {
  274. kfifo_free(&rscp->tpt_fifo);
  275. kfifo_free(&rscp->qid_fifo);
  276. kfifo_free(&rscp->pdid_fifo);
  277. }
  278. /*
  279. * PBL Memory Manager. Uses Linux generic allocator.
  280. */
  281. #define MIN_PBL_SHIFT 8 /* 256B == min PBL size (32 entries) */
  282. u32 c4iw_pblpool_alloc(struct c4iw_rdev *rdev, int size)
  283. {
  284. unsigned long addr = gen_pool_alloc(rdev->pbl_pool, size);
  285. PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
  286. if (!addr && printk_ratelimit())
  287. printk(KERN_WARNING MOD "%s: Out of PBL memory\n",
  288. pci_name(rdev->lldi.pdev));
  289. return (u32)addr;
  290. }
  291. void c4iw_pblpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
  292. {
  293. PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
  294. gen_pool_free(rdev->pbl_pool, (unsigned long)addr, size);
  295. }
  296. int c4iw_pblpool_create(struct c4iw_rdev *rdev)
  297. {
  298. unsigned pbl_start, pbl_chunk, pbl_top;
  299. rdev->pbl_pool = gen_pool_create(MIN_PBL_SHIFT, -1);
  300. if (!rdev->pbl_pool)
  301. return -ENOMEM;
  302. pbl_start = rdev->lldi.vr->pbl.start;
  303. pbl_chunk = rdev->lldi.vr->pbl.size;
  304. pbl_top = pbl_start + pbl_chunk;
  305. while (pbl_start < pbl_top) {
  306. pbl_chunk = min(pbl_top - pbl_start + 1, pbl_chunk);
  307. if (gen_pool_add(rdev->pbl_pool, pbl_start, pbl_chunk, -1)) {
  308. PDBG("%s failed to add PBL chunk (%x/%x)\n",
  309. __func__, pbl_start, pbl_chunk);
  310. if (pbl_chunk <= 1024 << MIN_PBL_SHIFT) {
  311. printk(KERN_WARNING MOD
  312. "Failed to add all PBL chunks (%x/%x)\n",
  313. pbl_start,
  314. pbl_top - pbl_start);
  315. return 0;
  316. }
  317. pbl_chunk >>= 1;
  318. } else {
  319. PDBG("%s added PBL chunk (%x/%x)\n",
  320. __func__, pbl_start, pbl_chunk);
  321. pbl_start += pbl_chunk;
  322. }
  323. }
  324. return 0;
  325. }
  326. void c4iw_pblpool_destroy(struct c4iw_rdev *rdev)
  327. {
  328. gen_pool_destroy(rdev->pbl_pool);
  329. }
  330. /*
  331. * RQT Memory Manager. Uses Linux generic allocator.
  332. */
  333. #define MIN_RQT_SHIFT 10 /* 1KB == min RQT size (16 entries) */
  334. u32 c4iw_rqtpool_alloc(struct c4iw_rdev *rdev, int size)
  335. {
  336. unsigned long addr = gen_pool_alloc(rdev->rqt_pool, size << 6);
  337. PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size << 6);
  338. if (!addr && printk_ratelimit())
  339. printk(KERN_WARNING MOD "%s: Out of RQT memory\n",
  340. pci_name(rdev->lldi.pdev));
  341. return (u32)addr;
  342. }
  343. void c4iw_rqtpool_free(struct c4iw_rdev *rdev, u32 addr, int size)
  344. {
  345. PDBG("%s addr 0x%x size %d\n", __func__, addr, size << 6);
  346. gen_pool_free(rdev->rqt_pool, (unsigned long)addr, size << 6);
  347. }
  348. int c4iw_rqtpool_create(struct c4iw_rdev *rdev)
  349. {
  350. unsigned rqt_start, rqt_chunk, rqt_top;
  351. rdev->rqt_pool = gen_pool_create(MIN_RQT_SHIFT, -1);
  352. if (!rdev->rqt_pool)
  353. return -ENOMEM;
  354. rqt_start = rdev->lldi.vr->rq.start;
  355. rqt_chunk = rdev->lldi.vr->rq.size;
  356. rqt_top = rqt_start + rqt_chunk;
  357. while (rqt_start < rqt_top) {
  358. rqt_chunk = min(rqt_top - rqt_start + 1, rqt_chunk);
  359. if (gen_pool_add(rdev->rqt_pool, rqt_start, rqt_chunk, -1)) {
  360. PDBG("%s failed to add RQT chunk (%x/%x)\n",
  361. __func__, rqt_start, rqt_chunk);
  362. if (rqt_chunk <= 1024 << MIN_RQT_SHIFT) {
  363. printk(KERN_WARNING MOD
  364. "Failed to add all RQT chunks (%x/%x)\n",
  365. rqt_start, rqt_top - rqt_start);
  366. return 0;
  367. }
  368. rqt_chunk >>= 1;
  369. } else {
  370. PDBG("%s added RQT chunk (%x/%x)\n",
  371. __func__, rqt_start, rqt_chunk);
  372. rqt_start += rqt_chunk;
  373. }
  374. }
  375. return 0;
  376. }
  377. void c4iw_rqtpool_destroy(struct c4iw_rdev *rdev)
  378. {
  379. gen_pool_destroy(rdev->rqt_pool);
  380. }
  381. /*
  382. * On-Chip QP Memory.
  383. */
  384. #define MIN_OCQP_SHIFT 12 /* 4KB == min ocqp size */
  385. u32 c4iw_ocqp_pool_alloc(struct c4iw_rdev *rdev, int size)
  386. {
  387. unsigned long addr = gen_pool_alloc(rdev->ocqp_pool, size);
  388. PDBG("%s addr 0x%x size %d\n", __func__, (u32)addr, size);
  389. return (u32)addr;
  390. }
  391. void c4iw_ocqp_pool_free(struct c4iw_rdev *rdev, u32 addr, int size)
  392. {
  393. PDBG("%s addr 0x%x size %d\n", __func__, addr, size);
  394. gen_pool_free(rdev->ocqp_pool, (unsigned long)addr, size);
  395. }
  396. int c4iw_ocqp_pool_create(struct c4iw_rdev *rdev)
  397. {
  398. unsigned start, chunk, top;
  399. rdev->ocqp_pool = gen_pool_create(MIN_OCQP_SHIFT, -1);
  400. if (!rdev->ocqp_pool)
  401. return -ENOMEM;
  402. start = rdev->lldi.vr->ocq.start;
  403. chunk = rdev->lldi.vr->ocq.size;
  404. top = start + chunk;
  405. while (start < top) {
  406. chunk = min(top - start + 1, chunk);
  407. if (gen_pool_add(rdev->ocqp_pool, start, chunk, -1)) {
  408. PDBG("%s failed to add OCQP chunk (%x/%x)\n",
  409. __func__, start, chunk);
  410. if (chunk <= 1024 << MIN_OCQP_SHIFT) {
  411. printk(KERN_WARNING MOD
  412. "Failed to add all OCQP chunks (%x/%x)\n",
  413. start, top - start);
  414. return 0;
  415. }
  416. chunk >>= 1;
  417. } else {
  418. PDBG("%s added OCQP chunk (%x/%x)\n",
  419. __func__, start, chunk);
  420. start += chunk;
  421. }
  422. }
  423. return 0;
  424. }
  425. void c4iw_ocqp_pool_destroy(struct c4iw_rdev *rdev)
  426. {
  427. gen_pool_destroy(rdev->ocqp_pool);
  428. }