ehea_qmr.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. /*
  2. * linux/drivers/net/ehea/ehea_qmr.c
  3. *
  4. * eHEA ethernet device driver for IBM eServer System p
  5. *
  6. * (C) Copyright IBM Corp. 2006
  7. *
  8. * Authors:
  9. * Christoph Raisch <raisch@de.ibm.com>
  10. * Jan-Bernd Themann <themann@de.ibm.com>
  11. * Thomas Klein <tklein@de.ibm.com>
  12. *
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2, or (at your option)
  17. * any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, write to the Free Software
  26. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  27. */
  28. #include "ehea.h"
  29. #include "ehea_phyp.h"
  30. #include "ehea_qmr.h"
  31. static void *hw_qpageit_get_inc(struct hw_queue *queue)
  32. {
  33. void *retvalue = hw_qeit_get(queue);
  34. queue->current_q_offset += queue->pagesize;
  35. if (queue->current_q_offset > queue->queue_length) {
  36. queue->current_q_offset -= queue->pagesize;
  37. retvalue = NULL;
  38. } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
  39. ehea_error("not on pageboundary");
  40. retvalue = NULL;
  41. }
  42. return retvalue;
  43. }
  44. static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
  45. const u32 pagesize, const u32 qe_size)
  46. {
  47. int pages_per_kpage = PAGE_SIZE / pagesize;
  48. int i, k;
  49. if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
  50. ehea_error("pagesize conflict! kernel pagesize=%d, "
  51. "ehea pagesize=%d", (int)PAGE_SIZE, (int)pagesize);
  52. return -EINVAL;
  53. }
  54. queue->queue_length = nr_of_pages * pagesize;
  55. queue->queue_pages = kmalloc(nr_of_pages * sizeof(void*), GFP_KERNEL);
  56. if (!queue->queue_pages) {
  57. ehea_error("no mem for queue_pages");
  58. return -ENOMEM;
  59. }
  60. /*
  61. * allocate pages for queue:
  62. * outer loop allocates whole kernel pages (page aligned) and
  63. * inner loop divides a kernel page into smaller hea queue pages
  64. */
  65. i = 0;
  66. while (i < nr_of_pages) {
  67. u8 *kpage = (u8*)get_zeroed_page(GFP_KERNEL);
  68. if (!kpage)
  69. goto out_nomem;
  70. for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
  71. (queue->queue_pages)[i] = (struct ehea_page*)kpage;
  72. kpage += pagesize;
  73. i++;
  74. }
  75. }
  76. queue->current_q_offset = 0;
  77. queue->qe_size = qe_size;
  78. queue->pagesize = pagesize;
  79. queue->toggle_state = 1;
  80. return 0;
  81. out_nomem:
  82. for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
  83. if (!(queue->queue_pages)[i])
  84. break;
  85. free_page((unsigned long)(queue->queue_pages)[i]);
  86. }
  87. return -ENOMEM;
  88. }
  89. static void hw_queue_dtor(struct hw_queue *queue)
  90. {
  91. int pages_per_kpage = PAGE_SIZE / queue->pagesize;
  92. int i, nr_pages;
  93. if (!queue || !queue->queue_pages)
  94. return;
  95. nr_pages = queue->queue_length / queue->pagesize;
  96. for (i = 0; i < nr_pages; i += pages_per_kpage)
  97. free_page((unsigned long)(queue->queue_pages)[i]);
  98. kfree(queue->queue_pages);
  99. }
  100. struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
  101. int nr_of_cqe, u64 eq_handle, u32 cq_token)
  102. {
  103. struct ehea_cq *cq;
  104. struct h_epa epa;
  105. u64 *cq_handle_ref, hret, rpage;
  106. u32 act_nr_of_entries, act_pages, counter;
  107. int ret;
  108. void *vpage;
  109. cq = kzalloc(sizeof(*cq), GFP_KERNEL);
  110. if (!cq) {
  111. ehea_error("no mem for cq");
  112. goto out_nomem;
  113. }
  114. cq->attr.max_nr_of_cqes = nr_of_cqe;
  115. cq->attr.cq_token = cq_token;
  116. cq->attr.eq_handle = eq_handle;
  117. cq->adapter = adapter;
  118. cq_handle_ref = &cq->fw_handle;
  119. act_nr_of_entries = 0;
  120. act_pages = 0;
  121. hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
  122. &cq->fw_handle, &cq->epas);
  123. if (hret != H_SUCCESS) {
  124. ehea_error("alloc_resource_cq failed");
  125. goto out_freemem;
  126. }
  127. ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
  128. EHEA_PAGESIZE, sizeof(struct ehea_cqe));
  129. if (ret)
  130. goto out_freeres;
  131. for (counter = 0; counter < cq->attr.nr_pages; counter++) {
  132. vpage = hw_qpageit_get_inc(&cq->hw_queue);
  133. if (!vpage) {
  134. ehea_error("hw_qpageit_get_inc failed");
  135. goto out_kill_hwq;
  136. }
  137. rpage = virt_to_abs(vpage);
  138. hret = ehea_h_register_rpage(adapter->handle,
  139. 0, EHEA_CQ_REGISTER_ORIG,
  140. cq->fw_handle, rpage, 1);
  141. if (hret < H_SUCCESS) {
  142. ehea_error("register_rpage_cq failed ehea_cq=%p "
  143. "hret=%lx counter=%i act_pages=%i",
  144. cq, hret, counter, cq->attr.nr_pages);
  145. goto out_kill_hwq;
  146. }
  147. if (counter == (cq->attr.nr_pages - 1)) {
  148. vpage = hw_qpageit_get_inc(&cq->hw_queue);
  149. if ((hret != H_SUCCESS) || (vpage)) {
  150. ehea_error("registration of pages not "
  151. "complete hret=%lx\n", hret);
  152. goto out_kill_hwq;
  153. }
  154. } else {
  155. if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
  156. ehea_error("CQ: registration of page failed "
  157. "hret=%lx\n", hret);
  158. goto out_kill_hwq;
  159. }
  160. }
  161. }
  162. hw_qeit_reset(&cq->hw_queue);
  163. epa = cq->epas.kernel;
  164. ehea_reset_cq_ep(cq);
  165. ehea_reset_cq_n1(cq);
  166. return cq;
  167. out_kill_hwq:
  168. hw_queue_dtor(&cq->hw_queue);
  169. out_freeres:
  170. ehea_h_free_resource(adapter->handle, cq->fw_handle);
  171. out_freemem:
  172. kfree(cq);
  173. out_nomem:
  174. return NULL;
  175. }
  176. int ehea_destroy_cq(struct ehea_cq *cq)
  177. {
  178. u64 adapter_handle, hret;
  179. adapter_handle = cq->adapter->handle;
  180. if (!cq)
  181. return 0;
  182. /* deregister all previous registered pages */
  183. hret = ehea_h_free_resource(adapter_handle, cq->fw_handle);
  184. if (hret != H_SUCCESS) {
  185. ehea_error("destroy CQ failed");
  186. return -EIO;
  187. }
  188. hw_queue_dtor(&cq->hw_queue);
  189. kfree(cq);
  190. return 0;
  191. }
  192. struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
  193. const enum ehea_eq_type type,
  194. const u32 max_nr_of_eqes, const u8 eqe_gen)
  195. {
  196. int ret, i;
  197. u64 hret, rpage;
  198. void *vpage;
  199. struct ehea_eq *eq;
  200. eq = kzalloc(sizeof(*eq), GFP_KERNEL);
  201. if (!eq) {
  202. ehea_error("no mem for eq");
  203. return NULL;
  204. }
  205. eq->adapter = adapter;
  206. eq->attr.type = type;
  207. eq->attr.max_nr_of_eqes = max_nr_of_eqes;
  208. eq->attr.eqe_gen = eqe_gen;
  209. spin_lock_init(&eq->spinlock);
  210. hret = ehea_h_alloc_resource_eq(adapter->handle,
  211. &eq->attr, &eq->fw_handle);
  212. if (hret != H_SUCCESS) {
  213. ehea_error("alloc_resource_eq failed");
  214. goto out_freemem;
  215. }
  216. ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
  217. EHEA_PAGESIZE, sizeof(struct ehea_eqe));
  218. if (ret) {
  219. ehea_error("can't allocate eq pages");
  220. goto out_freeres;
  221. }
  222. for (i = 0; i < eq->attr.nr_pages; i++) {
  223. vpage = hw_qpageit_get_inc(&eq->hw_queue);
  224. if (!vpage) {
  225. ehea_error("hw_qpageit_get_inc failed");
  226. hret = H_RESOURCE;
  227. goto out_kill_hwq;
  228. }
  229. rpage = virt_to_abs(vpage);
  230. hret = ehea_h_register_rpage(adapter->handle, 0,
  231. EHEA_EQ_REGISTER_ORIG,
  232. eq->fw_handle, rpage, 1);
  233. if (i == (eq->attr.nr_pages - 1)) {
  234. /* last page */
  235. vpage = hw_qpageit_get_inc(&eq->hw_queue);
  236. if ((hret != H_SUCCESS) || (vpage)) {
  237. goto out_kill_hwq;
  238. }
  239. } else {
  240. if ((hret != H_PAGE_REGISTERED) || (!vpage)) {
  241. goto out_kill_hwq;
  242. }
  243. }
  244. }
  245. hw_qeit_reset(&eq->hw_queue);
  246. return eq;
  247. out_kill_hwq:
  248. hw_queue_dtor(&eq->hw_queue);
  249. out_freeres:
  250. ehea_h_free_resource(adapter->handle, eq->fw_handle);
  251. out_freemem:
  252. kfree(eq);
  253. return NULL;
  254. }
  255. struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
  256. {
  257. struct ehea_eqe *eqe;
  258. unsigned long flags;
  259. spin_lock_irqsave(&eq->spinlock, flags);
  260. eqe = (struct ehea_eqe*)hw_eqit_eq_get_inc_valid(&eq->hw_queue);
  261. spin_unlock_irqrestore(&eq->spinlock, flags);
  262. return eqe;
  263. }
  264. int ehea_destroy_eq(struct ehea_eq *eq)
  265. {
  266. u64 hret;
  267. unsigned long flags;
  268. if (!eq)
  269. return 0;
  270. spin_lock_irqsave(&eq->spinlock, flags);
  271. hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle);
  272. spin_unlock_irqrestore(&eq->spinlock, flags);
  273. if (hret != H_SUCCESS) {
  274. ehea_error("destroy_eq failed");
  275. return -EIO;
  276. }
  277. hw_queue_dtor(&eq->hw_queue);
  278. kfree(eq);
  279. return 0;
  280. }
  281. /**
  282. * allocates memory for a queue and registers pages in phyp
  283. */
  284. int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
  285. int nr_pages, int wqe_size, int act_nr_sges,
  286. struct ehea_adapter *adapter, int h_call_q_selector)
  287. {
  288. u64 hret, rpage;
  289. int ret, cnt;
  290. void *vpage;
  291. ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
  292. if (ret)
  293. return ret;
  294. for (cnt = 0; cnt < nr_pages; cnt++) {
  295. vpage = hw_qpageit_get_inc(hw_queue);
  296. if (!vpage) {
  297. ehea_error("hw_qpageit_get_inc failed");
  298. goto out_kill_hwq;
  299. }
  300. rpage = virt_to_abs(vpage);
  301. hret = ehea_h_register_rpage(adapter->handle,
  302. 0, h_call_q_selector,
  303. qp->fw_handle, rpage, 1);
  304. if (hret < H_SUCCESS) {
  305. ehea_error("register_rpage_qp failed");
  306. goto out_kill_hwq;
  307. }
  308. }
  309. hw_qeit_reset(hw_queue);
  310. return 0;
  311. out_kill_hwq:
  312. hw_queue_dtor(hw_queue);
  313. return -EIO;
  314. }
  315. static inline u32 map_wqe_size(u8 wqe_enc_size)
  316. {
  317. return 128 << wqe_enc_size;
  318. }
  319. struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
  320. u32 pd, struct ehea_qp_init_attr *init_attr)
  321. {
  322. int ret;
  323. u64 hret;
  324. struct ehea_qp *qp;
  325. u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
  326. u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
  327. qp = kzalloc(sizeof(*qp), GFP_KERNEL);
  328. if (!qp) {
  329. ehea_error("no mem for qp");
  330. return NULL;
  331. }
  332. qp->adapter = adapter;
  333. hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
  334. &qp->fw_handle, &qp->epas);
  335. if (hret != H_SUCCESS) {
  336. ehea_error("ehea_h_alloc_resource_qp failed");
  337. goto out_freemem;
  338. }
  339. wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
  340. wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
  341. wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
  342. wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
  343. ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
  344. wqe_size_in_bytes_sq,
  345. init_attr->act_wqe_size_enc_sq, adapter,
  346. 0);
  347. if (ret) {
  348. ehea_error("can't register for sq ret=%x", ret);
  349. goto out_freeres;
  350. }
  351. ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
  352. init_attr->nr_rq1_pages,
  353. wqe_size_in_bytes_rq1,
  354. init_attr->act_wqe_size_enc_rq1,
  355. adapter, 1);
  356. if (ret) {
  357. ehea_error("can't register for rq1 ret=%x", ret);
  358. goto out_kill_hwsq;
  359. }
  360. if (init_attr->rq_count > 1) {
  361. ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
  362. init_attr->nr_rq2_pages,
  363. wqe_size_in_bytes_rq2,
  364. init_attr->act_wqe_size_enc_rq2,
  365. adapter, 2);
  366. if (ret) {
  367. ehea_error("can't register for rq2 ret=%x", ret);
  368. goto out_kill_hwr1q;
  369. }
  370. }
  371. if (init_attr->rq_count > 2) {
  372. ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
  373. init_attr->nr_rq3_pages,
  374. wqe_size_in_bytes_rq3,
  375. init_attr->act_wqe_size_enc_rq3,
  376. adapter, 3);
  377. if (ret) {
  378. ehea_error("can't register for rq3 ret=%x", ret);
  379. goto out_kill_hwr2q;
  380. }
  381. }
  382. qp->init_attr = *init_attr;
  383. return qp;
  384. out_kill_hwr2q:
  385. hw_queue_dtor(&qp->hw_rqueue2);
  386. out_kill_hwr1q:
  387. hw_queue_dtor(&qp->hw_rqueue1);
  388. out_kill_hwsq:
  389. hw_queue_dtor(&qp->hw_squeue);
  390. out_freeres:
  391. ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
  392. ehea_h_free_resource(adapter->handle, qp->fw_handle);
  393. out_freemem:
  394. kfree(qp);
  395. return NULL;
  396. }
  397. int ehea_destroy_qp(struct ehea_qp *qp)
  398. {
  399. u64 hret;
  400. struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
  401. if (!qp)
  402. return 0;
  403. hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle);
  404. if (hret != H_SUCCESS) {
  405. ehea_error("destroy_qp failed");
  406. return -EIO;
  407. }
  408. hw_queue_dtor(&qp->hw_squeue);
  409. hw_queue_dtor(&qp->hw_rqueue1);
  410. if (qp_attr->rq_count > 1)
  411. hw_queue_dtor(&qp->hw_rqueue2);
  412. if (qp_attr->rq_count > 2)
  413. hw_queue_dtor(&qp->hw_rqueue3);
  414. kfree(qp);
  415. return 0;
  416. }
  417. int ehea_reg_mr_adapter(struct ehea_adapter *adapter)
  418. {
  419. int i, k, ret;
  420. u64 hret, pt_abs, start, end, nr_pages;
  421. u32 acc_ctrl = EHEA_MR_ACC_CTRL;
  422. u64 *pt;
  423. start = KERNELBASE;
  424. end = (u64)high_memory;
  425. nr_pages = (end - start) / PAGE_SIZE;
  426. pt = kzalloc(PAGE_SIZE, GFP_KERNEL);
  427. if (!pt) {
  428. ehea_error("no mem");
  429. ret = -ENOMEM;
  430. goto out;
  431. }
  432. pt_abs = virt_to_abs(pt);
  433. hret = ehea_h_alloc_resource_mr(adapter->handle, start, end - start,
  434. acc_ctrl, adapter->pd,
  435. &adapter->mr.handle, &adapter->mr.lkey);
  436. if (hret != H_SUCCESS) {
  437. ehea_error("alloc_resource_mr failed");
  438. ret = -EIO;
  439. goto out;
  440. }
  441. adapter->mr.vaddr = KERNELBASE;
  442. k = 0;
  443. while (nr_pages > 0) {
  444. if (nr_pages > 1) {
  445. u64 num_pages = min(nr_pages, (u64)512);
  446. for (i = 0; i < num_pages; i++)
  447. pt[i] = virt_to_abs((void*)(((u64)start)
  448. + ((k++) *
  449. PAGE_SIZE)));
  450. hret = ehea_h_register_rpage_mr(adapter->handle,
  451. adapter->mr.handle, 0,
  452. 0, (u64)pt_abs,
  453. num_pages);
  454. nr_pages -= num_pages;
  455. } else {
  456. u64 abs_adr = virt_to_abs((void*)(((u64)start)
  457. + (k * PAGE_SIZE)));
  458. hret = ehea_h_register_rpage_mr(adapter->handle,
  459. adapter->mr.handle, 0,
  460. 0, abs_adr,1);
  461. nr_pages--;
  462. }
  463. if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED)) {
  464. ehea_h_free_resource(adapter->handle,
  465. adapter->mr.handle);
  466. ehea_error("register_rpage_mr failed: hret = %lX",
  467. hret);
  468. ret = -EIO;
  469. goto out;
  470. }
  471. }
  472. if (hret != H_SUCCESS) {
  473. ehea_h_free_resource(adapter->handle, adapter->mr.handle);
  474. ehea_error("register_rpage failed for last page: hret = %lX",
  475. hret);
  476. ret = -EIO;
  477. goto out;
  478. }
  479. ret = 0;
  480. out:
  481. kfree(pt);
  482. return ret;
  483. }