grukservices.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. /*
  2. * SN Platform GRU Driver
  3. *
  4. * KERNEL SERVICES THAT USE THE GRU
  5. *
  6. * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/errno.h>
  24. #include <linux/slab.h>
  25. #include <linux/mm.h>
  26. #include <linux/smp_lock.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/device.h>
  29. #include <linux/miscdevice.h>
  30. #include <linux/proc_fs.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/uaccess.h>
  33. #include "gru.h"
  34. #include "grulib.h"
  35. #include "grutables.h"
  36. #include "grukservices.h"
  37. #include "gru_instructions.h"
  38. #include <asm/uv/uv_hub.h>
  39. /*
  40. * Kernel GRU Usage
  41. *
  42. * The following is an interim algorithm for management of kernel GRU
  43. * resources. This will likely be replaced when we better understand the
  44. * kernel/user requirements.
  45. *
  46. * At boot time, the kernel permanently reserves a fixed number of
  47. * CBRs/DSRs for each cpu to use. The resources are all taken from
  48. * the GRU chiplet 1 on the blade. This leaves the full set of resources
  49. * of chiplet 0 available to be allocated to a single user.
  50. */
  51. /* Blade percpu resources PERMANENTLY reserved for kernel use */
  52. #define GRU_NUM_KERNEL_CBR 1
  53. #define GRU_NUM_KERNEL_DSR_BYTES 256
  54. #define GRU_NUM_KERNEL_DSR_CL (GRU_NUM_KERNEL_DSR_BYTES / \
  55. GRU_CACHE_LINE_BYTES)
  56. #define KERNEL_CTXNUM 15
  57. /* GRU instruction attributes for all instructions */
  58. #define IMA IMA_CB_DELAY
  59. /* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
  60. #define __gru_cacheline_aligned__ \
  61. __attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
  62. #define MAGIC 0x1234567887654321UL
  63. /* Default retry count for GRU errors on kernel instructions */
  64. #define EXCEPTION_RETRY_LIMIT 3
  65. /* Status of message queue sections */
  66. #define MQS_EMPTY 0
  67. #define MQS_FULL 1
  68. #define MQS_NOOP 2
  69. /*----------------- RESOURCE MANAGEMENT -------------------------------------*/
  70. /* optimized for x86_64 */
  71. struct message_queue {
  72. union gru_mesqhead head __gru_cacheline_aligned__; /* CL 0 */
  73. int qlines; /* DW 1 */
  74. long hstatus[2];
  75. void *next __gru_cacheline_aligned__;/* CL 1 */
  76. void *limit;
  77. void *start;
  78. void *start2;
  79. char data ____cacheline_aligned; /* CL 2 */
  80. };
  81. /* First word in every message - used by mesq interface */
  82. struct message_header {
  83. char present;
  84. char present2;
  85. char lines;
  86. char fill;
  87. };
  88. #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
  89. static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
  90. {
  91. struct gru_blade_state *bs;
  92. int lcpu;
  93. BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
  94. preempt_disable();
  95. bs = gru_base[uv_numa_blade_id()];
  96. lcpu = uv_blade_processor_id();
  97. *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
  98. *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
  99. return 0;
  100. }
  101. static void gru_free_cpu_resources(void *cb, void *dsr)
  102. {
  103. preempt_enable();
  104. }
  105. int gru_get_cb_exception_detail(void *cb,
  106. struct control_block_extended_exc_detail *excdet)
  107. {
  108. struct gru_control_block_extended *cbe;
  109. cbe = get_cbe(GRUBASE(cb), get_cb_number(cb));
  110. prefetchw(cbe); /* Harmless on hardware, required for emulator */
  111. excdet->opc = cbe->opccpy;
  112. excdet->exopc = cbe->exopccpy;
  113. excdet->ecause = cbe->ecause;
  114. excdet->exceptdet0 = cbe->idef1upd;
  115. excdet->exceptdet1 = cbe->idef3upd;
  116. return 0;
  117. }
  118. char *gru_get_cb_exception_detail_str(int ret, void *cb,
  119. char *buf, int size)
  120. {
  121. struct gru_control_block_status *gen = (void *)cb;
  122. struct control_block_extended_exc_detail excdet;
  123. if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
  124. gru_get_cb_exception_detail(cb, &excdet);
  125. snprintf(buf, size,
  126. "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
  127. "excdet0 0x%lx, excdet1 0x%x",
  128. gen, excdet.opc, excdet.exopc, excdet.ecause,
  129. excdet.exceptdet0, excdet.exceptdet1);
  130. } else {
  131. snprintf(buf, size, "No exception");
  132. }
  133. return buf;
  134. }
  135. static int gru_wait_idle_or_exception(struct gru_control_block_status *gen)
  136. {
  137. while (gen->istatus >= CBS_ACTIVE) {
  138. cpu_relax();
  139. barrier();
  140. }
  141. return gen->istatus;
  142. }
  143. static int gru_retry_exception(void *cb)
  144. {
  145. struct gru_control_block_status *gen = (void *)cb;
  146. struct control_block_extended_exc_detail excdet;
  147. int retry = EXCEPTION_RETRY_LIMIT;
  148. while (1) {
  149. if (gru_get_cb_message_queue_substatus(cb))
  150. break;
  151. if (gru_wait_idle_or_exception(gen) == CBS_IDLE)
  152. return CBS_IDLE;
  153. gru_get_cb_exception_detail(cb, &excdet);
  154. if (excdet.ecause & ~EXCEPTION_RETRY_BITS)
  155. break;
  156. if (retry-- == 0)
  157. break;
  158. gen->icmd = 1;
  159. gru_flush_cache(gen);
  160. }
  161. return CBS_EXCEPTION;
  162. }
  163. int gru_check_status_proc(void *cb)
  164. {
  165. struct gru_control_block_status *gen = (void *)cb;
  166. int ret;
  167. ret = gen->istatus;
  168. if (ret != CBS_EXCEPTION)
  169. return ret;
  170. return gru_retry_exception(cb);
  171. }
  172. int gru_wait_proc(void *cb)
  173. {
  174. struct gru_control_block_status *gen = (void *)cb;
  175. int ret;
  176. ret = gru_wait_idle_or_exception(gen);
  177. if (ret == CBS_EXCEPTION)
  178. ret = gru_retry_exception(cb);
  179. return ret;
  180. }
  181. void gru_abort(int ret, void *cb, char *str)
  182. {
  183. char buf[GRU_EXC_STR_SIZE];
  184. panic("GRU FATAL ERROR: %s - %s\n", str,
  185. gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf)));
  186. }
  187. void gru_wait_abort_proc(void *cb)
  188. {
  189. int ret;
  190. ret = gru_wait_proc(cb);
  191. if (ret)
  192. gru_abort(ret, cb, "gru_wait_abort");
  193. }
  194. /*------------------------------ MESSAGE QUEUES -----------------------------*/
  195. /* Internal status . These are NOT returned to the user. */
  196. #define MQIE_AGAIN -1 /* try again */
  197. /*
  198. * Save/restore the "present" flag that is in the second line of 2-line
  199. * messages
  200. */
  201. static inline int get_present2(void *p)
  202. {
  203. struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
  204. return mhdr->present;
  205. }
  206. static inline void restore_present2(void *p, int val)
  207. {
  208. struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
  209. mhdr->present = val;
  210. }
  211. /*
  212. * Create a message queue.
  213. * qlines - message queue size in cache lines. Includes 2-line header.
  214. */
  215. int gru_create_message_queue(struct gru_message_queue_desc *mqd,
  216. void *p, unsigned int bytes, int nasid, int vector, int apicid)
  217. {
  218. struct message_queue *mq = p;
  219. unsigned int qlines;
  220. qlines = bytes / GRU_CACHE_LINE_BYTES - 2;
  221. memset(mq, 0, bytes);
  222. mq->start = &mq->data;
  223. mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES;
  224. mq->next = &mq->data;
  225. mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES;
  226. mq->qlines = qlines;
  227. mq->hstatus[0] = 0;
  228. mq->hstatus[1] = 1;
  229. mq->head = gru_mesq_head(2, qlines / 2 + 1);
  230. mqd->mq = mq;
  231. mqd->mq_gpa = uv_gpa(mq);
  232. mqd->qlines = qlines;
  233. mqd->interrupt_pnode = UV_NASID_TO_PNODE(nasid);
  234. mqd->interrupt_vector = vector;
  235. mqd->interrupt_apicid = apicid;
  236. return 0;
  237. }
  238. EXPORT_SYMBOL_GPL(gru_create_message_queue);
  239. /*
  240. * Send a NOOP message to a message queue
  241. * Returns:
  242. * 0 - if queue is full after the send. This is the normal case
  243. * but various races can change this.
  244. * -1 - if mesq sent successfully but queue not full
  245. * >0 - unexpected error. MQE_xxx returned
  246. */
  247. static int send_noop_message(void *cb, struct gru_message_queue_desc *mqd,
  248. void *mesg)
  249. {
  250. const struct message_header noop_header = {
  251. .present = MQS_NOOP, .lines = 1};
  252. unsigned long m;
  253. int substatus, ret;
  254. struct message_header save_mhdr, *mhdr = mesg;
  255. STAT(mesq_noop);
  256. save_mhdr = *mhdr;
  257. *mhdr = noop_header;
  258. gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), 1, IMA);
  259. ret = gru_wait(cb);
  260. if (ret) {
  261. substatus = gru_get_cb_message_queue_substatus(cb);
  262. switch (substatus) {
  263. case CBSS_NO_ERROR:
  264. STAT(mesq_noop_unexpected_error);
  265. ret = MQE_UNEXPECTED_CB_ERR;
  266. break;
  267. case CBSS_LB_OVERFLOWED:
  268. STAT(mesq_noop_lb_overflow);
  269. ret = MQE_CONGESTION;
  270. break;
  271. case CBSS_QLIMIT_REACHED:
  272. STAT(mesq_noop_qlimit_reached);
  273. ret = 0;
  274. break;
  275. case CBSS_AMO_NACKED:
  276. STAT(mesq_noop_amo_nacked);
  277. ret = MQE_CONGESTION;
  278. break;
  279. case CBSS_PUT_NACKED:
  280. STAT(mesq_noop_put_nacked);
  281. m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
  282. gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
  283. IMA);
  284. if (gru_wait(cb) == CBS_IDLE)
  285. ret = MQIE_AGAIN;
  286. else
  287. ret = MQE_UNEXPECTED_CB_ERR;
  288. break;
  289. case CBSS_PAGE_OVERFLOW:
  290. default:
  291. BUG();
  292. }
  293. }
  294. *mhdr = save_mhdr;
  295. return ret;
  296. }
  297. /*
  298. * Handle a gru_mesq full.
  299. */
  300. static int send_message_queue_full(void *cb, struct gru_message_queue_desc *mqd,
  301. void *mesg, int lines)
  302. {
  303. union gru_mesqhead mqh;
  304. unsigned int limit, head;
  305. unsigned long avalue;
  306. int half, qlines;
  307. /* Determine if switching to first/second half of q */
  308. avalue = gru_get_amo_value(cb);
  309. head = gru_get_amo_value_head(cb);
  310. limit = gru_get_amo_value_limit(cb);
  311. qlines = mqd->qlines;
  312. half = (limit != qlines);
  313. if (half)
  314. mqh = gru_mesq_head(qlines / 2 + 1, qlines);
  315. else
  316. mqh = gru_mesq_head(2, qlines / 2 + 1);
  317. /* Try to get lock for switching head pointer */
  318. gru_gamir(cb, EOP_IR_CLR, HSTATUS(mqd->mq_gpa, half), XTYPE_DW, IMA);
  319. if (gru_wait(cb) != CBS_IDLE)
  320. goto cberr;
  321. if (!gru_get_amo_value(cb)) {
  322. STAT(mesq_qf_locked);
  323. return MQE_QUEUE_FULL;
  324. }
  325. /* Got the lock. Send optional NOP if queue not full, */
  326. if (head != limit) {
  327. if (send_noop_message(cb, mqd, mesg)) {
  328. gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half),
  329. XTYPE_DW, IMA);
  330. if (gru_wait(cb) != CBS_IDLE)
  331. goto cberr;
  332. STAT(mesq_qf_noop_not_full);
  333. return MQIE_AGAIN;
  334. }
  335. avalue++;
  336. }
  337. /* Then flip queuehead to other half of queue. */
  338. gru_gamer(cb, EOP_ERR_CSWAP, mqd->mq_gpa, XTYPE_DW, mqh.val, avalue,
  339. IMA);
  340. if (gru_wait(cb) != CBS_IDLE)
  341. goto cberr;
  342. /* If not successfully in swapping queue head, clear the hstatus lock */
  343. if (gru_get_amo_value(cb) != avalue) {
  344. STAT(mesq_qf_switch_head_failed);
  345. gru_gamir(cb, EOP_IR_INC, HSTATUS(mqd->mq_gpa, half), XTYPE_DW,
  346. IMA);
  347. if (gru_wait(cb) != CBS_IDLE)
  348. goto cberr;
  349. }
  350. return MQIE_AGAIN;
  351. cberr:
  352. STAT(mesq_qf_unexpected_error);
  353. return MQE_UNEXPECTED_CB_ERR;
  354. }
  355. /*
  356. * Send a cross-partition interrupt to the SSI that contains the target
  357. * message queue. Normally, the interrupt is automatically delivered by hardware
  358. * but some error conditions require explicit delivery.
  359. */
  360. static void send_message_queue_interrupt(struct gru_message_queue_desc *mqd)
  361. {
  362. if (mqd->interrupt_vector)
  363. uv_hub_send_ipi(mqd->interrupt_pnode, mqd->interrupt_apicid,
  364. mqd->interrupt_vector);
  365. }
  366. /*
  367. * Handle a gru_mesq failure. Some of these failures are software recoverable
  368. * or retryable.
  369. */
  370. static int send_message_failure(void *cb, struct gru_message_queue_desc *mqd,
  371. void *mesg, int lines)
  372. {
  373. int substatus, ret = 0;
  374. unsigned long m;
  375. substatus = gru_get_cb_message_queue_substatus(cb);
  376. switch (substatus) {
  377. case CBSS_NO_ERROR:
  378. STAT(mesq_send_unexpected_error);
  379. ret = MQE_UNEXPECTED_CB_ERR;
  380. break;
  381. case CBSS_LB_OVERFLOWED:
  382. STAT(mesq_send_lb_overflow);
  383. ret = MQE_CONGESTION;
  384. break;
  385. case CBSS_QLIMIT_REACHED:
  386. STAT(mesq_send_qlimit_reached);
  387. ret = send_message_queue_full(cb, mqd, mesg, lines);
  388. break;
  389. case CBSS_AMO_NACKED:
  390. STAT(mesq_send_amo_nacked);
  391. ret = MQE_CONGESTION;
  392. break;
  393. case CBSS_PUT_NACKED:
  394. STAT(mesq_send_put_nacked);
  395. m = mqd->mq_gpa + (gru_get_amo_value_head(cb) << 6);
  396. gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
  397. if (gru_wait(cb) == CBS_IDLE) {
  398. ret = MQE_OK;
  399. send_message_queue_interrupt(mqd);
  400. } else {
  401. ret = MQE_UNEXPECTED_CB_ERR;
  402. }
  403. break;
  404. default:
  405. BUG();
  406. }
  407. return ret;
  408. }
  409. /*
  410. * Send a message to a message queue
  411. * mqd message queue descriptor
  412. * mesg message. ust be vaddr within a GSEG
  413. * bytes message size (<= 2 CL)
  414. */
  415. int gru_send_message_gpa(struct gru_message_queue_desc *mqd, void *mesg,
  416. unsigned int bytes)
  417. {
  418. struct message_header *mhdr;
  419. void *cb;
  420. void *dsr;
  421. int istatus, clines, ret;
  422. STAT(mesq_send);
  423. BUG_ON(bytes < sizeof(int) || bytes > 2 * GRU_CACHE_LINE_BYTES);
  424. clines = DIV_ROUND_UP(bytes, GRU_CACHE_LINE_BYTES);
  425. if (gru_get_cpu_resources(bytes, &cb, &dsr))
  426. return MQE_BUG_NO_RESOURCES;
  427. memcpy(dsr, mesg, bytes);
  428. mhdr = dsr;
  429. mhdr->present = MQS_FULL;
  430. mhdr->lines = clines;
  431. if (clines == 2) {
  432. mhdr->present2 = get_present2(mhdr);
  433. restore_present2(mhdr, MQS_FULL);
  434. }
  435. do {
  436. ret = MQE_OK;
  437. gru_mesq(cb, mqd->mq_gpa, gru_get_tri(mhdr), clines, IMA);
  438. istatus = gru_wait(cb);
  439. if (istatus != CBS_IDLE)
  440. ret = send_message_failure(cb, mqd, dsr, clines);
  441. } while (ret == MQIE_AGAIN);
  442. gru_free_cpu_resources(cb, dsr);
  443. if (ret)
  444. STAT(mesq_send_failed);
  445. return ret;
  446. }
  447. EXPORT_SYMBOL_GPL(gru_send_message_gpa);
  448. /*
  449. * Advance the receive pointer for the queue to the next message.
  450. */
  451. void gru_free_message(struct gru_message_queue_desc *mqd, void *mesg)
  452. {
  453. struct message_queue *mq = mqd->mq;
  454. struct message_header *mhdr = mq->next;
  455. void *next, *pnext;
  456. int half = -1;
  457. int lines = mhdr->lines;
  458. if (lines == 2)
  459. restore_present2(mhdr, MQS_EMPTY);
  460. mhdr->present = MQS_EMPTY;
  461. pnext = mq->next;
  462. next = pnext + GRU_CACHE_LINE_BYTES * lines;
  463. if (next == mq->limit) {
  464. next = mq->start;
  465. half = 1;
  466. } else if (pnext < mq->start2 && next >= mq->start2) {
  467. half = 0;
  468. }
  469. if (half >= 0)
  470. mq->hstatus[half] = 1;
  471. mq->next = next;
  472. }
  473. EXPORT_SYMBOL_GPL(gru_free_message);
  474. /*
  475. * Get next message from message queue. Return NULL if no message
  476. * present. User must call next_message() to move to next message.
  477. * rmq message queue
  478. */
  479. void *gru_get_next_message(struct gru_message_queue_desc *mqd)
  480. {
  481. struct message_queue *mq = mqd->mq;
  482. struct message_header *mhdr = mq->next;
  483. int present = mhdr->present;
  484. /* skip NOOP messages */
  485. STAT(mesq_receive);
  486. while (present == MQS_NOOP) {
  487. gru_free_message(mqd, mhdr);
  488. mhdr = mq->next;
  489. present = mhdr->present;
  490. }
  491. /* Wait for both halves of 2 line messages */
  492. if (present == MQS_FULL && mhdr->lines == 2 &&
  493. get_present2(mhdr) == MQS_EMPTY)
  494. present = MQS_EMPTY;
  495. if (!present) {
  496. STAT(mesq_receive_none);
  497. return NULL;
  498. }
  499. if (mhdr->lines == 2)
  500. restore_present2(mhdr, mhdr->present2);
  501. return mhdr;
  502. }
  503. EXPORT_SYMBOL_GPL(gru_get_next_message);
  504. /* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
  505. /*
  506. * Copy a block of data using the GRU resources
  507. */
  508. int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
  509. unsigned int bytes)
  510. {
  511. void *cb;
  512. void *dsr;
  513. int ret;
  514. STAT(copy_gpa);
  515. if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
  516. return MQE_BUG_NO_RESOURCES;
  517. gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
  518. XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_CL, IMA);
  519. ret = gru_wait(cb);
  520. gru_free_cpu_resources(cb, dsr);
  521. return ret;
  522. }
  523. EXPORT_SYMBOL_GPL(gru_copy_gpa);
  524. /* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
  525. /* Temp - will delete after we gain confidence in the GRU */
  526. static __cacheline_aligned unsigned long word0;
  527. static __cacheline_aligned unsigned long word1;
  528. static int quicktest(struct gru_state *gru)
  529. {
  530. void *cb;
  531. void *ds;
  532. unsigned long *p;
  533. cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0);
  534. ds = get_gseg_base_address_ds(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0);
  535. p = ds;
  536. word0 = MAGIC;
  537. gru_vload(cb, uv_gpa(&word0), 0, XTYPE_DW, 1, 1, IMA);
  538. if (gru_wait(cb) != CBS_IDLE)
  539. BUG();
  540. if (*(unsigned long *)ds != MAGIC)
  541. BUG();
  542. gru_vstore(cb, uv_gpa(&word1), 0, XTYPE_DW, 1, 1, IMA);
  543. if (gru_wait(cb) != CBS_IDLE)
  544. BUG();
  545. if (word0 != word1 || word0 != MAGIC) {
  546. printk
  547. ("GRU quicktest err: gid %d, found 0x%lx, expected 0x%lx\n",
  548. gru->gs_gid, word1, MAGIC);
  549. BUG(); /* ZZZ should not be fatal */
  550. }
  551. return 0;
  552. }
  553. int gru_kservices_init(struct gru_state *gru)
  554. {
  555. struct gru_blade_state *bs;
  556. struct gru_context_configuration_handle *cch;
  557. unsigned long cbr_map, dsr_map;
  558. int err, num, cpus_possible;
  559. /*
  560. * Currently, resources are reserved ONLY on the second chiplet
  561. * on each blade. This leaves ALL resources on chiplet 0 available
  562. * for user code.
  563. */
  564. bs = gru->gs_blade;
  565. if (gru != &bs->bs_grus[1])
  566. return 0;
  567. cpus_possible = uv_blade_nr_possible_cpus(gru->gs_blade_id);
  568. num = GRU_NUM_KERNEL_CBR * cpus_possible;
  569. cbr_map = gru_reserve_cb_resources(gru, GRU_CB_COUNT_TO_AU(num), NULL);
  570. gru->gs_reserved_cbrs += num;
  571. num = GRU_NUM_KERNEL_DSR_BYTES * cpus_possible;
  572. dsr_map = gru_reserve_ds_resources(gru, GRU_DS_BYTES_TO_AU(num), NULL);
  573. gru->gs_reserved_dsr_bytes += num;
  574. gru->gs_active_contexts++;
  575. __set_bit(KERNEL_CTXNUM, &gru->gs_context_map);
  576. cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM);
  577. bs->kernel_cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr,
  578. KERNEL_CTXNUM, 0);
  579. bs->kernel_dsr = get_gseg_base_address_ds(gru->gs_gru_base_vaddr,
  580. KERNEL_CTXNUM, 0);
  581. lock_cch_handle(cch);
  582. cch->tfm_fault_bit_enable = 0;
  583. cch->tlb_int_enable = 0;
  584. cch->tfm_done_bit_enable = 0;
  585. cch->unmap_enable = 1;
  586. err = cch_allocate(cch, 0, 0, cbr_map, dsr_map);
  587. if (err) {
  588. gru_dbg(grudev,
  589. "Unable to allocate kernel CCH: gid %d, err %d\n",
  590. gru->gs_gid, err);
  591. BUG();
  592. }
  593. if (cch_start(cch)) {
  594. gru_dbg(grudev, "Unable to start kernel CCH: gid %d, err %d\n",
  595. gru->gs_gid, err);
  596. BUG();
  597. }
  598. unlock_cch_handle(cch);
  599. if (gru_options & GRU_QUICKLOOK)
  600. quicktest(gru);
  601. return 0;
  602. }
  603. void gru_kservices_exit(struct gru_state *gru)
  604. {
  605. struct gru_context_configuration_handle *cch;
  606. struct gru_blade_state *bs;
  607. bs = gru->gs_blade;
  608. if (gru != &bs->bs_grus[1])
  609. return;
  610. cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM);
  611. lock_cch_handle(cch);
  612. if (cch_interrupt_sync(cch))
  613. BUG();
  614. if (cch_deallocate(cch))
  615. BUG();
  616. unlock_cch_handle(cch);
  617. }