ehea_qmr.c 23 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027
  1. /*
  2. * linux/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
  3. *
  4. * eHEA ethernet device driver for IBM eServer System p
  5. *
  6. * (C) Copyright IBM Corp. 2006
  7. *
  8. * Authors:
  9. * Christoph Raisch <raisch@de.ibm.com>
  10. * Jan-Bernd Themann <themann@de.ibm.com>
  11. * Thomas Klein <tklein@de.ibm.com>
  12. *
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2, or (at your option)
  17. * any later version.
  18. *
  19. * This program is distributed in the hope that it will be useful,
  20. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  21. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  22. * GNU General Public License for more details.
  23. *
  24. * You should have received a copy of the GNU General Public License
  25. * along with this program; if not, write to the Free Software
  26. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  27. */
  28. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  29. #include <linux/mm.h>
  30. #include <linux/slab.h>
  31. #include "ehea.h"
  32. #include "ehea_phyp.h"
  33. #include "ehea_qmr.h"
  34. static struct ehea_bmap *ehea_bmap;
  35. static void *hw_qpageit_get_inc(struct hw_queue *queue)
  36. {
  37. void *retvalue = hw_qeit_get(queue);
  38. queue->current_q_offset += queue->pagesize;
  39. if (queue->current_q_offset > queue->queue_length) {
  40. queue->current_q_offset -= queue->pagesize;
  41. retvalue = NULL;
  42. } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
  43. pr_err("not on pageboundary\n");
  44. retvalue = NULL;
  45. }
  46. return retvalue;
  47. }
  48. static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
  49. const u32 pagesize, const u32 qe_size)
  50. {
  51. int pages_per_kpage = PAGE_SIZE / pagesize;
  52. int i, k;
  53. if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
  54. pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
  55. (int)PAGE_SIZE, (int)pagesize);
  56. return -EINVAL;
  57. }
  58. queue->queue_length = nr_of_pages * pagesize;
  59. queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
  60. if (!queue->queue_pages) {
  61. pr_err("no mem for queue_pages\n");
  62. return -ENOMEM;
  63. }
  64. /*
  65. * allocate pages for queue:
  66. * outer loop allocates whole kernel pages (page aligned) and
  67. * inner loop divides a kernel page into smaller hea queue pages
  68. */
  69. i = 0;
  70. while (i < nr_of_pages) {
  71. u8 *kpage = (u8 *)get_zeroed_page(GFP_KERNEL);
  72. if (!kpage)
  73. goto out_nomem;
  74. for (k = 0; k < pages_per_kpage && i < nr_of_pages; k++) {
  75. (queue->queue_pages)[i] = (struct ehea_page *)kpage;
  76. kpage += pagesize;
  77. i++;
  78. }
  79. }
  80. queue->current_q_offset = 0;
  81. queue->qe_size = qe_size;
  82. queue->pagesize = pagesize;
  83. queue->toggle_state = 1;
  84. return 0;
  85. out_nomem:
  86. for (i = 0; i < nr_of_pages; i += pages_per_kpage) {
  87. if (!(queue->queue_pages)[i])
  88. break;
  89. free_page((unsigned long)(queue->queue_pages)[i]);
  90. }
  91. return -ENOMEM;
  92. }
  93. static void hw_queue_dtor(struct hw_queue *queue)
  94. {
  95. int pages_per_kpage = PAGE_SIZE / queue->pagesize;
  96. int i, nr_pages;
  97. if (!queue || !queue->queue_pages)
  98. return;
  99. nr_pages = queue->queue_length / queue->pagesize;
  100. for (i = 0; i < nr_pages; i += pages_per_kpage)
  101. free_page((unsigned long)(queue->queue_pages)[i]);
  102. kfree(queue->queue_pages);
  103. }
  104. struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
  105. int nr_of_cqe, u64 eq_handle, u32 cq_token)
  106. {
  107. struct ehea_cq *cq;
  108. struct h_epa epa;
  109. u64 *cq_handle_ref, hret, rpage;
  110. u32 act_nr_of_entries, act_pages, counter;
  111. int ret;
  112. void *vpage;
  113. cq = kzalloc(sizeof(*cq), GFP_KERNEL);
  114. if (!cq) {
  115. pr_err("no mem for cq\n");
  116. goto out_nomem;
  117. }
  118. cq->attr.max_nr_of_cqes = nr_of_cqe;
  119. cq->attr.cq_token = cq_token;
  120. cq->attr.eq_handle = eq_handle;
  121. cq->adapter = adapter;
  122. cq_handle_ref = &cq->fw_handle;
  123. act_nr_of_entries = 0;
  124. act_pages = 0;
  125. hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
  126. &cq->fw_handle, &cq->epas);
  127. if (hret != H_SUCCESS) {
  128. pr_err("alloc_resource_cq failed\n");
  129. goto out_freemem;
  130. }
  131. ret = hw_queue_ctor(&cq->hw_queue, cq->attr.nr_pages,
  132. EHEA_PAGESIZE, sizeof(struct ehea_cqe));
  133. if (ret)
  134. goto out_freeres;
  135. for (counter = 0; counter < cq->attr.nr_pages; counter++) {
  136. vpage = hw_qpageit_get_inc(&cq->hw_queue);
  137. if (!vpage) {
  138. pr_err("hw_qpageit_get_inc failed\n");
  139. goto out_kill_hwq;
  140. }
  141. rpage = __pa(vpage);
  142. hret = ehea_h_register_rpage(adapter->handle,
  143. 0, EHEA_CQ_REGISTER_ORIG,
  144. cq->fw_handle, rpage, 1);
  145. if (hret < H_SUCCESS) {
  146. pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
  147. cq, hret, counter, cq->attr.nr_pages);
  148. goto out_kill_hwq;
  149. }
  150. if (counter == (cq->attr.nr_pages - 1)) {
  151. vpage = hw_qpageit_get_inc(&cq->hw_queue);
  152. if ((hret != H_SUCCESS) || (vpage)) {
  153. pr_err("registration of pages not complete hret=%llx\n",
  154. hret);
  155. goto out_kill_hwq;
  156. }
  157. } else {
  158. if (hret != H_PAGE_REGISTERED) {
  159. pr_err("CQ: registration of page failed hret=%llx\n",
  160. hret);
  161. goto out_kill_hwq;
  162. }
  163. }
  164. }
  165. hw_qeit_reset(&cq->hw_queue);
  166. epa = cq->epas.kernel;
  167. ehea_reset_cq_ep(cq);
  168. ehea_reset_cq_n1(cq);
  169. return cq;
  170. out_kill_hwq:
  171. hw_queue_dtor(&cq->hw_queue);
  172. out_freeres:
  173. ehea_h_free_resource(adapter->handle, cq->fw_handle, FORCE_FREE);
  174. out_freemem:
  175. kfree(cq);
  176. out_nomem:
  177. return NULL;
  178. }
  179. static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
  180. {
  181. u64 hret;
  182. u64 adapter_handle = cq->adapter->handle;
  183. /* deregister all previous registered pages */
  184. hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
  185. if (hret != H_SUCCESS)
  186. return hret;
  187. hw_queue_dtor(&cq->hw_queue);
  188. kfree(cq);
  189. return hret;
  190. }
  191. int ehea_destroy_cq(struct ehea_cq *cq)
  192. {
  193. u64 hret, aer, aerr;
  194. if (!cq)
  195. return 0;
  196. hcp_epas_dtor(&cq->epas);
  197. hret = ehea_destroy_cq_res(cq, NORMAL_FREE);
  198. if (hret == H_R_STATE) {
  199. ehea_error_data(cq->adapter, cq->fw_handle, &aer, &aerr);
  200. hret = ehea_destroy_cq_res(cq, FORCE_FREE);
  201. }
  202. if (hret != H_SUCCESS) {
  203. pr_err("destroy CQ failed\n");
  204. return -EIO;
  205. }
  206. return 0;
  207. }
  208. struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
  209. const enum ehea_eq_type type,
  210. const u32 max_nr_of_eqes, const u8 eqe_gen)
  211. {
  212. int ret, i;
  213. u64 hret, rpage;
  214. void *vpage;
  215. struct ehea_eq *eq;
  216. eq = kzalloc(sizeof(*eq), GFP_KERNEL);
  217. if (!eq) {
  218. pr_err("no mem for eq\n");
  219. return NULL;
  220. }
  221. eq->adapter = adapter;
  222. eq->attr.type = type;
  223. eq->attr.max_nr_of_eqes = max_nr_of_eqes;
  224. eq->attr.eqe_gen = eqe_gen;
  225. spin_lock_init(&eq->spinlock);
  226. hret = ehea_h_alloc_resource_eq(adapter->handle,
  227. &eq->attr, &eq->fw_handle);
  228. if (hret != H_SUCCESS) {
  229. pr_err("alloc_resource_eq failed\n");
  230. goto out_freemem;
  231. }
  232. ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
  233. EHEA_PAGESIZE, sizeof(struct ehea_eqe));
  234. if (ret) {
  235. pr_err("can't allocate eq pages\n");
  236. goto out_freeres;
  237. }
  238. for (i = 0; i < eq->attr.nr_pages; i++) {
  239. vpage = hw_qpageit_get_inc(&eq->hw_queue);
  240. if (!vpage) {
  241. pr_err("hw_qpageit_get_inc failed\n");
  242. hret = H_RESOURCE;
  243. goto out_kill_hwq;
  244. }
  245. rpage = __pa(vpage);
  246. hret = ehea_h_register_rpage(adapter->handle, 0,
  247. EHEA_EQ_REGISTER_ORIG,
  248. eq->fw_handle, rpage, 1);
  249. if (i == (eq->attr.nr_pages - 1)) {
  250. /* last page */
  251. vpage = hw_qpageit_get_inc(&eq->hw_queue);
  252. if ((hret != H_SUCCESS) || (vpage))
  253. goto out_kill_hwq;
  254. } else {
  255. if (hret != H_PAGE_REGISTERED)
  256. goto out_kill_hwq;
  257. }
  258. }
  259. hw_qeit_reset(&eq->hw_queue);
  260. return eq;
  261. out_kill_hwq:
  262. hw_queue_dtor(&eq->hw_queue);
  263. out_freeres:
  264. ehea_h_free_resource(adapter->handle, eq->fw_handle, FORCE_FREE);
  265. out_freemem:
  266. kfree(eq);
  267. return NULL;
  268. }
  269. struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
  270. {
  271. struct ehea_eqe *eqe;
  272. unsigned long flags;
  273. spin_lock_irqsave(&eq->spinlock, flags);
  274. eqe = hw_eqit_eq_get_inc_valid(&eq->hw_queue);
  275. spin_unlock_irqrestore(&eq->spinlock, flags);
  276. return eqe;
  277. }
  278. static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
  279. {
  280. u64 hret;
  281. unsigned long flags;
  282. spin_lock_irqsave(&eq->spinlock, flags);
  283. hret = ehea_h_free_resource(eq->adapter->handle, eq->fw_handle, force);
  284. spin_unlock_irqrestore(&eq->spinlock, flags);
  285. if (hret != H_SUCCESS)
  286. return hret;
  287. hw_queue_dtor(&eq->hw_queue);
  288. kfree(eq);
  289. return hret;
  290. }
  291. int ehea_destroy_eq(struct ehea_eq *eq)
  292. {
  293. u64 hret, aer, aerr;
  294. if (!eq)
  295. return 0;
  296. hcp_epas_dtor(&eq->epas);
  297. hret = ehea_destroy_eq_res(eq, NORMAL_FREE);
  298. if (hret == H_R_STATE) {
  299. ehea_error_data(eq->adapter, eq->fw_handle, &aer, &aerr);
  300. hret = ehea_destroy_eq_res(eq, FORCE_FREE);
  301. }
  302. if (hret != H_SUCCESS) {
  303. pr_err("destroy EQ failed\n");
  304. return -EIO;
  305. }
  306. return 0;
  307. }
  308. /* allocates memory for a queue and registers pages in phyp */
  309. static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
  310. int nr_pages, int wqe_size, int act_nr_sges,
  311. struct ehea_adapter *adapter, int h_call_q_selector)
  312. {
  313. u64 hret, rpage;
  314. int ret, cnt;
  315. void *vpage;
  316. ret = hw_queue_ctor(hw_queue, nr_pages, EHEA_PAGESIZE, wqe_size);
  317. if (ret)
  318. return ret;
  319. for (cnt = 0; cnt < nr_pages; cnt++) {
  320. vpage = hw_qpageit_get_inc(hw_queue);
  321. if (!vpage) {
  322. pr_err("hw_qpageit_get_inc failed\n");
  323. goto out_kill_hwq;
  324. }
  325. rpage = __pa(vpage);
  326. hret = ehea_h_register_rpage(adapter->handle,
  327. 0, h_call_q_selector,
  328. qp->fw_handle, rpage, 1);
  329. if (hret < H_SUCCESS) {
  330. pr_err("register_rpage_qp failed\n");
  331. goto out_kill_hwq;
  332. }
  333. }
  334. hw_qeit_reset(hw_queue);
  335. return 0;
  336. out_kill_hwq:
  337. hw_queue_dtor(hw_queue);
  338. return -EIO;
  339. }
  340. static inline u32 map_wqe_size(u8 wqe_enc_size)
  341. {
  342. return 128 << wqe_enc_size;
  343. }
  344. struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
  345. u32 pd, struct ehea_qp_init_attr *init_attr)
  346. {
  347. int ret;
  348. u64 hret;
  349. struct ehea_qp *qp;
  350. u32 wqe_size_in_bytes_sq, wqe_size_in_bytes_rq1;
  351. u32 wqe_size_in_bytes_rq2, wqe_size_in_bytes_rq3;
  352. qp = kzalloc(sizeof(*qp), GFP_KERNEL);
  353. if (!qp) {
  354. pr_err("no mem for qp\n");
  355. return NULL;
  356. }
  357. qp->adapter = adapter;
  358. hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
  359. &qp->fw_handle, &qp->epas);
  360. if (hret != H_SUCCESS) {
  361. pr_err("ehea_h_alloc_resource_qp failed\n");
  362. goto out_freemem;
  363. }
  364. wqe_size_in_bytes_sq = map_wqe_size(init_attr->act_wqe_size_enc_sq);
  365. wqe_size_in_bytes_rq1 = map_wqe_size(init_attr->act_wqe_size_enc_rq1);
  366. wqe_size_in_bytes_rq2 = map_wqe_size(init_attr->act_wqe_size_enc_rq2);
  367. wqe_size_in_bytes_rq3 = map_wqe_size(init_attr->act_wqe_size_enc_rq3);
  368. ret = ehea_qp_alloc_register(qp, &qp->hw_squeue, init_attr->nr_sq_pages,
  369. wqe_size_in_bytes_sq,
  370. init_attr->act_wqe_size_enc_sq, adapter,
  371. 0);
  372. if (ret) {
  373. pr_err("can't register for sq ret=%x\n", ret);
  374. goto out_freeres;
  375. }
  376. ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue1,
  377. init_attr->nr_rq1_pages,
  378. wqe_size_in_bytes_rq1,
  379. init_attr->act_wqe_size_enc_rq1,
  380. adapter, 1);
  381. if (ret) {
  382. pr_err("can't register for rq1 ret=%x\n", ret);
  383. goto out_kill_hwsq;
  384. }
  385. if (init_attr->rq_count > 1) {
  386. ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue2,
  387. init_attr->nr_rq2_pages,
  388. wqe_size_in_bytes_rq2,
  389. init_attr->act_wqe_size_enc_rq2,
  390. adapter, 2);
  391. if (ret) {
  392. pr_err("can't register for rq2 ret=%x\n", ret);
  393. goto out_kill_hwr1q;
  394. }
  395. }
  396. if (init_attr->rq_count > 2) {
  397. ret = ehea_qp_alloc_register(qp, &qp->hw_rqueue3,
  398. init_attr->nr_rq3_pages,
  399. wqe_size_in_bytes_rq3,
  400. init_attr->act_wqe_size_enc_rq3,
  401. adapter, 3);
  402. if (ret) {
  403. pr_err("can't register for rq3 ret=%x\n", ret);
  404. goto out_kill_hwr2q;
  405. }
  406. }
  407. qp->init_attr = *init_attr;
  408. return qp;
  409. out_kill_hwr2q:
  410. hw_queue_dtor(&qp->hw_rqueue2);
  411. out_kill_hwr1q:
  412. hw_queue_dtor(&qp->hw_rqueue1);
  413. out_kill_hwsq:
  414. hw_queue_dtor(&qp->hw_squeue);
  415. out_freeres:
  416. ehea_h_disable_and_get_hea(adapter->handle, qp->fw_handle);
  417. ehea_h_free_resource(adapter->handle, qp->fw_handle, FORCE_FREE);
  418. out_freemem:
  419. kfree(qp);
  420. return NULL;
  421. }
  422. static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
  423. {
  424. u64 hret;
  425. struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
  426. ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
  427. hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
  428. if (hret != H_SUCCESS)
  429. return hret;
  430. hw_queue_dtor(&qp->hw_squeue);
  431. hw_queue_dtor(&qp->hw_rqueue1);
  432. if (qp_attr->rq_count > 1)
  433. hw_queue_dtor(&qp->hw_rqueue2);
  434. if (qp_attr->rq_count > 2)
  435. hw_queue_dtor(&qp->hw_rqueue3);
  436. kfree(qp);
  437. return hret;
  438. }
  439. int ehea_destroy_qp(struct ehea_qp *qp)
  440. {
  441. u64 hret, aer, aerr;
  442. if (!qp)
  443. return 0;
  444. hcp_epas_dtor(&qp->epas);
  445. hret = ehea_destroy_qp_res(qp, NORMAL_FREE);
  446. if (hret == H_R_STATE) {
  447. ehea_error_data(qp->adapter, qp->fw_handle, &aer, &aerr);
  448. hret = ehea_destroy_qp_res(qp, FORCE_FREE);
  449. }
  450. if (hret != H_SUCCESS) {
  451. pr_err("destroy QP failed\n");
  452. return -EIO;
  453. }
  454. return 0;
  455. }
  456. static inline int ehea_calc_index(unsigned long i, unsigned long s)
  457. {
  458. return (i >> s) & EHEA_INDEX_MASK;
  459. }
  460. static inline int ehea_init_top_bmap(struct ehea_top_bmap *ehea_top_bmap,
  461. int dir)
  462. {
  463. if (!ehea_top_bmap->dir[dir]) {
  464. ehea_top_bmap->dir[dir] =
  465. kzalloc(sizeof(struct ehea_dir_bmap), GFP_KERNEL);
  466. if (!ehea_top_bmap->dir[dir])
  467. return -ENOMEM;
  468. }
  469. return 0;
  470. }
  471. static inline int ehea_init_bmap(struct ehea_bmap *ehea_bmap, int top, int dir)
  472. {
  473. if (!ehea_bmap->top[top]) {
  474. ehea_bmap->top[top] =
  475. kzalloc(sizeof(struct ehea_top_bmap), GFP_KERNEL);
  476. if (!ehea_bmap->top[top])
  477. return -ENOMEM;
  478. }
  479. return ehea_init_top_bmap(ehea_bmap->top[top], dir);
  480. }
  481. static DEFINE_MUTEX(ehea_busmap_mutex);
  482. static unsigned long ehea_mr_len;
  483. #define EHEA_BUSMAP_ADD_SECT 1
  484. #define EHEA_BUSMAP_REM_SECT 0
  485. static void ehea_rebuild_busmap(void)
  486. {
  487. u64 vaddr = EHEA_BUSMAP_START;
  488. int top, dir, idx;
  489. for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
  490. struct ehea_top_bmap *ehea_top;
  491. int valid_dir_entries = 0;
  492. if (!ehea_bmap->top[top])
  493. continue;
  494. ehea_top = ehea_bmap->top[top];
  495. for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
  496. struct ehea_dir_bmap *ehea_dir;
  497. int valid_entries = 0;
  498. if (!ehea_top->dir[dir])
  499. continue;
  500. valid_dir_entries++;
  501. ehea_dir = ehea_top->dir[dir];
  502. for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
  503. if (!ehea_dir->ent[idx])
  504. continue;
  505. valid_entries++;
  506. ehea_dir->ent[idx] = vaddr;
  507. vaddr += EHEA_SECTSIZE;
  508. }
  509. if (!valid_entries) {
  510. ehea_top->dir[dir] = NULL;
  511. kfree(ehea_dir);
  512. }
  513. }
  514. if (!valid_dir_entries) {
  515. ehea_bmap->top[top] = NULL;
  516. kfree(ehea_top);
  517. }
  518. }
  519. }
  520. static int ehea_update_busmap(unsigned long pfn, unsigned long nr_pages, int add)
  521. {
  522. unsigned long i, start_section, end_section;
  523. if (!nr_pages)
  524. return 0;
  525. if (!ehea_bmap) {
  526. ehea_bmap = kzalloc(sizeof(struct ehea_bmap), GFP_KERNEL);
  527. if (!ehea_bmap)
  528. return -ENOMEM;
  529. }
  530. start_section = (pfn * PAGE_SIZE) / EHEA_SECTSIZE;
  531. end_section = start_section + ((nr_pages * PAGE_SIZE) / EHEA_SECTSIZE);
  532. /* Mark entries as valid or invalid only; address is assigned later */
  533. for (i = start_section; i < end_section; i++) {
  534. u64 flag;
  535. int top = ehea_calc_index(i, EHEA_TOP_INDEX_SHIFT);
  536. int dir = ehea_calc_index(i, EHEA_DIR_INDEX_SHIFT);
  537. int idx = i & EHEA_INDEX_MASK;
  538. if (add) {
  539. int ret = ehea_init_bmap(ehea_bmap, top, dir);
  540. if (ret)
  541. return ret;
  542. flag = 1; /* valid */
  543. ehea_mr_len += EHEA_SECTSIZE;
  544. } else {
  545. if (!ehea_bmap->top[top])
  546. continue;
  547. if (!ehea_bmap->top[top]->dir[dir])
  548. continue;
  549. flag = 0; /* invalid */
  550. ehea_mr_len -= EHEA_SECTSIZE;
  551. }
  552. ehea_bmap->top[top]->dir[dir]->ent[idx] = flag;
  553. }
  554. ehea_rebuild_busmap(); /* Assign contiguous addresses for mr */
  555. return 0;
  556. }
  557. int ehea_add_sect_bmap(unsigned long pfn, unsigned long nr_pages)
  558. {
  559. int ret;
  560. mutex_lock(&ehea_busmap_mutex);
  561. ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
  562. mutex_unlock(&ehea_busmap_mutex);
  563. return ret;
  564. }
  565. int ehea_rem_sect_bmap(unsigned long pfn, unsigned long nr_pages)
  566. {
  567. int ret;
  568. mutex_lock(&ehea_busmap_mutex);
  569. ret = ehea_update_busmap(pfn, nr_pages, EHEA_BUSMAP_REM_SECT);
  570. mutex_unlock(&ehea_busmap_mutex);
  571. return ret;
  572. }
  573. static int ehea_is_hugepage(unsigned long pfn)
  574. {
  575. int page_order;
  576. if (pfn & EHEA_HUGEPAGE_PFN_MASK)
  577. return 0;
  578. page_order = compound_order(pfn_to_page(pfn));
  579. if (page_order + PAGE_SHIFT != EHEA_HUGEPAGESHIFT)
  580. return 0;
  581. return 1;
  582. }
  583. static int ehea_create_busmap_callback(unsigned long initial_pfn,
  584. unsigned long total_nr_pages, void *arg)
  585. {
  586. int ret;
  587. unsigned long pfn, start_pfn, end_pfn, nr_pages;
  588. if ((total_nr_pages * PAGE_SIZE) < EHEA_HUGEPAGE_SIZE)
  589. return ehea_update_busmap(initial_pfn, total_nr_pages,
  590. EHEA_BUSMAP_ADD_SECT);
  591. /* Given chunk is >= 16GB -> check for hugepages */
  592. start_pfn = initial_pfn;
  593. end_pfn = initial_pfn + total_nr_pages;
  594. pfn = start_pfn;
  595. while (pfn < end_pfn) {
  596. if (ehea_is_hugepage(pfn)) {
  597. /* Add mem found in front of the hugepage */
  598. nr_pages = pfn - start_pfn;
  599. ret = ehea_update_busmap(start_pfn, nr_pages,
  600. EHEA_BUSMAP_ADD_SECT);
  601. if (ret)
  602. return ret;
  603. /* Skip the hugepage */
  604. pfn += (EHEA_HUGEPAGE_SIZE / PAGE_SIZE);
  605. start_pfn = pfn;
  606. } else
  607. pfn += (EHEA_SECTSIZE / PAGE_SIZE);
  608. }
  609. /* Add mem found behind the hugepage(s) */
  610. nr_pages = pfn - start_pfn;
  611. return ehea_update_busmap(start_pfn, nr_pages, EHEA_BUSMAP_ADD_SECT);
  612. }
  613. int ehea_create_busmap(void)
  614. {
  615. int ret;
  616. mutex_lock(&ehea_busmap_mutex);
  617. ehea_mr_len = 0;
  618. ret = walk_system_ram_range(0, 1ULL << MAX_PHYSMEM_BITS, NULL,
  619. ehea_create_busmap_callback);
  620. mutex_unlock(&ehea_busmap_mutex);
  621. return ret;
  622. }
  623. void ehea_destroy_busmap(void)
  624. {
  625. int top, dir;
  626. mutex_lock(&ehea_busmap_mutex);
  627. if (!ehea_bmap)
  628. goto out_destroy;
  629. for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
  630. if (!ehea_bmap->top[top])
  631. continue;
  632. for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
  633. if (!ehea_bmap->top[top]->dir[dir])
  634. continue;
  635. kfree(ehea_bmap->top[top]->dir[dir]);
  636. }
  637. kfree(ehea_bmap->top[top]);
  638. }
  639. kfree(ehea_bmap);
  640. ehea_bmap = NULL;
  641. out_destroy:
  642. mutex_unlock(&ehea_busmap_mutex);
  643. }
  644. u64 ehea_map_vaddr(void *caddr)
  645. {
  646. int top, dir, idx;
  647. unsigned long index, offset;
  648. if (!ehea_bmap)
  649. return EHEA_INVAL_ADDR;
  650. index = __pa(caddr) >> SECTION_SIZE_BITS;
  651. top = (index >> EHEA_TOP_INDEX_SHIFT) & EHEA_INDEX_MASK;
  652. if (!ehea_bmap->top[top])
  653. return EHEA_INVAL_ADDR;
  654. dir = (index >> EHEA_DIR_INDEX_SHIFT) & EHEA_INDEX_MASK;
  655. if (!ehea_bmap->top[top]->dir[dir])
  656. return EHEA_INVAL_ADDR;
  657. idx = index & EHEA_INDEX_MASK;
  658. if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
  659. return EHEA_INVAL_ADDR;
  660. offset = (unsigned long)caddr & (EHEA_SECTSIZE - 1);
  661. return ehea_bmap->top[top]->dir[dir]->ent[idx] | offset;
  662. }
  663. static inline void *ehea_calc_sectbase(int top, int dir, int idx)
  664. {
  665. unsigned long ret = idx;
  666. ret |= dir << EHEA_DIR_INDEX_SHIFT;
  667. ret |= top << EHEA_TOP_INDEX_SHIFT;
  668. return __va(ret << SECTION_SIZE_BITS);
  669. }
  670. static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
  671. struct ehea_adapter *adapter,
  672. struct ehea_mr *mr)
  673. {
  674. void *pg;
  675. u64 j, m, hret;
  676. unsigned long k = 0;
  677. u64 pt_abs = __pa(pt);
  678. void *sectbase = ehea_calc_sectbase(top, dir, idx);
  679. for (j = 0; j < (EHEA_PAGES_PER_SECTION / EHEA_MAX_RPAGE); j++) {
  680. for (m = 0; m < EHEA_MAX_RPAGE; m++) {
  681. pg = sectbase + ((k++) * EHEA_PAGESIZE);
  682. pt[m] = __pa(pg);
  683. }
  684. hret = ehea_h_register_rpage_mr(adapter->handle, mr->handle, 0,
  685. 0, pt_abs, EHEA_MAX_RPAGE);
  686. if ((hret != H_SUCCESS) &&
  687. (hret != H_PAGE_REGISTERED)) {
  688. ehea_h_free_resource(adapter->handle, mr->handle,
  689. FORCE_FREE);
  690. pr_err("register_rpage_mr failed\n");
  691. return hret;
  692. }
  693. }
  694. return hret;
  695. }
  696. static u64 ehea_reg_mr_sections(int top, int dir, u64 *pt,
  697. struct ehea_adapter *adapter,
  698. struct ehea_mr *mr)
  699. {
  700. u64 hret = H_SUCCESS;
  701. int idx;
  702. for (idx = 0; idx < EHEA_MAP_ENTRIES; idx++) {
  703. if (!ehea_bmap->top[top]->dir[dir]->ent[idx])
  704. continue;
  705. hret = ehea_reg_mr_section(top, dir, idx, pt, adapter, mr);
  706. if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
  707. return hret;
  708. }
  709. return hret;
  710. }
  711. static u64 ehea_reg_mr_dir_sections(int top, u64 *pt,
  712. struct ehea_adapter *adapter,
  713. struct ehea_mr *mr)
  714. {
  715. u64 hret = H_SUCCESS;
  716. int dir;
  717. for (dir = 0; dir < EHEA_MAP_ENTRIES; dir++) {
  718. if (!ehea_bmap->top[top]->dir[dir])
  719. continue;
  720. hret = ehea_reg_mr_sections(top, dir, pt, adapter, mr);
  721. if ((hret != H_SUCCESS) && (hret != H_PAGE_REGISTERED))
  722. return hret;
  723. }
  724. return hret;
  725. }
  726. int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
  727. {
  728. int ret;
  729. u64 *pt;
  730. u64 hret;
  731. u32 acc_ctrl = EHEA_MR_ACC_CTRL;
  732. unsigned long top;
  733. pt = (void *)get_zeroed_page(GFP_KERNEL);
  734. if (!pt) {
  735. pr_err("no mem\n");
  736. ret = -ENOMEM;
  737. goto out;
  738. }
  739. hret = ehea_h_alloc_resource_mr(adapter->handle, EHEA_BUSMAP_START,
  740. ehea_mr_len, acc_ctrl, adapter->pd,
  741. &mr->handle, &mr->lkey);
  742. if (hret != H_SUCCESS) {
  743. pr_err("alloc_resource_mr failed\n");
  744. ret = -EIO;
  745. goto out;
  746. }
  747. if (!ehea_bmap) {
  748. ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
  749. pr_err("no busmap available\n");
  750. ret = -EIO;
  751. goto out;
  752. }
  753. for (top = 0; top < EHEA_MAP_ENTRIES; top++) {
  754. if (!ehea_bmap->top[top])
  755. continue;
  756. hret = ehea_reg_mr_dir_sections(top, pt, adapter, mr);
  757. if((hret != H_PAGE_REGISTERED) && (hret != H_SUCCESS))
  758. break;
  759. }
  760. if (hret != H_SUCCESS) {
  761. ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
  762. pr_err("registering mr failed\n");
  763. ret = -EIO;
  764. goto out;
  765. }
  766. mr->vaddr = EHEA_BUSMAP_START;
  767. mr->adapter = adapter;
  768. ret = 0;
  769. out:
  770. free_page((unsigned long)pt);
  771. return ret;
  772. }
  773. int ehea_rem_mr(struct ehea_mr *mr)
  774. {
  775. u64 hret;
  776. if (!mr || !mr->adapter)
  777. return -EINVAL;
  778. hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
  779. FORCE_FREE);
  780. if (hret != H_SUCCESS) {
  781. pr_err("destroy MR failed\n");
  782. return -EIO;
  783. }
  784. return 0;
  785. }
  786. int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
  787. struct ehea_mr *shared_mr)
  788. {
  789. u64 hret;
  790. hret = ehea_h_register_smr(adapter->handle, old_mr->handle,
  791. old_mr->vaddr, EHEA_MR_ACC_CTRL,
  792. adapter->pd, shared_mr);
  793. if (hret != H_SUCCESS)
  794. return -EIO;
  795. shared_mr->adapter = adapter;
  796. return 0;
  797. }
  798. static void print_error_data(u64 *data)
  799. {
  800. int length;
  801. u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
  802. u64 resource = data[1];
  803. length = EHEA_BMASK_GET(ERROR_DATA_LENGTH, data[0]);
  804. if (length > EHEA_PAGESIZE)
  805. length = EHEA_PAGESIZE;
  806. if (type == EHEA_AER_RESTYPE_QP)
  807. pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
  808. resource, data[6], data[12], data[22]);
  809. else if (type == EHEA_AER_RESTYPE_CQ)
  810. pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
  811. resource, data[6]);
  812. else if (type == EHEA_AER_RESTYPE_EQ)
  813. pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
  814. resource, data[6]);
  815. ehea_dump(data, length, "error data");
  816. }
  817. u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
  818. u64 *aer, u64 *aerr)
  819. {
  820. unsigned long ret;
  821. u64 *rblock;
  822. u64 type = 0;
  823. rblock = (void *)get_zeroed_page(GFP_KERNEL);
  824. if (!rblock) {
  825. pr_err("Cannot allocate rblock memory\n");
  826. goto out;
  827. }
  828. ret = ehea_h_error_data(adapter->handle, res_handle, rblock);
  829. if (ret == H_SUCCESS) {
  830. type = EHEA_BMASK_GET(ERROR_DATA_TYPE, rblock[2]);
  831. *aer = rblock[6];
  832. *aerr = rblock[12];
  833. print_error_data(rblock);
  834. } else if (ret == H_R_STATE) {
  835. pr_err("No error data available: %llX\n", res_handle);
  836. } else
  837. pr_err("Error data could not be fetched: %llX\n", res_handle);
  838. free_page((unsigned long)rblock);
  839. out:
  840. return type;
  841. }