grukservices.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680
  1. /*
  2. * SN Platform GRU Driver
  3. *
  4. * KERNEL SERVICES THAT USE THE GRU
  5. *
  6. * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/errno.h>
  24. #include <linux/slab.h>
  25. #include <linux/mm.h>
  26. #include <linux/smp_lock.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/device.h>
  29. #include <linux/miscdevice.h>
  30. #include <linux/proc_fs.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/uaccess.h>
  33. #include "gru.h"
  34. #include "grulib.h"
  35. #include "grutables.h"
  36. #include "grukservices.h"
  37. #include "gru_instructions.h"
  38. #include <asm/uv/uv_hub.h>
  39. /*
  40. * Kernel GRU Usage
  41. *
  42. * The following is an interim algorithm for management of kernel GRU
  43. * resources. This will likely be replaced when we better understand the
  44. * kernel/user requirements.
  45. *
  46. * At boot time, the kernel permanently reserves a fixed number of
  47. * CBRs/DSRs for each cpu to use. The resources are all taken from
  48. * the GRU chiplet 1 on the blade. This leaves the full set of resources
  49. * of chiplet 0 available to be allocated to a single user.
  50. */
  51. /* Blade percpu resources PERMANENTLY reserved for kernel use */
  52. #define GRU_NUM_KERNEL_CBR 1
  53. #define GRU_NUM_KERNEL_DSR_BYTES 256
  54. #define KERNEL_CTXNUM 15
  55. /* GRU instruction attributes for all instructions */
  56. #define IMA IMA_CB_DELAY
  57. /* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
  58. #define __gru_cacheline_aligned__ \
  59. __attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
  60. #define MAGIC 0x1234567887654321UL
  61. /* Default retry count for GRU errors on kernel instructions */
  62. #define EXCEPTION_RETRY_LIMIT 3
  63. /* Status of message queue sections */
  64. #define MQS_EMPTY 0
  65. #define MQS_FULL 1
  66. #define MQS_NOOP 2
  67. /*----------------- RESOURCE MANAGEMENT -------------------------------------*/
  68. /* optimized for x86_64 */
  69. struct message_queue {
  70. union gru_mesqhead head __gru_cacheline_aligned__; /* CL 0 */
  71. int qlines; /* DW 1 */
  72. long hstatus[2];
  73. void *next __gru_cacheline_aligned__;/* CL 1 */
  74. void *limit;
  75. void *start;
  76. void *start2;
  77. char data ____cacheline_aligned; /* CL 2 */
  78. };
  79. /* First word in every message - used by mesq interface */
  80. struct message_header {
  81. char present;
  82. char present2;
  83. char lines;
  84. char fill;
  85. };
  86. #define QLINES(mq) ((mq) + offsetof(struct message_queue, qlines))
  87. #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
  88. static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
  89. {
  90. struct gru_blade_state *bs;
  91. int lcpu;
  92. BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
  93. preempt_disable();
  94. bs = gru_base[uv_numa_blade_id()];
  95. lcpu = uv_blade_processor_id();
  96. *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
  97. *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
  98. return 0;
  99. }
  100. static void gru_free_cpu_resources(void *cb, void *dsr)
  101. {
  102. preempt_enable();
  103. }
  104. int gru_get_cb_exception_detail(void *cb,
  105. struct control_block_extended_exc_detail *excdet)
  106. {
  107. struct gru_control_block_extended *cbe;
  108. cbe = get_cbe(GRUBASE(cb), get_cb_number(cb));
  109. prefetchw(cbe); /* Harmless on hardware, required for emulator */
  110. excdet->opc = cbe->opccpy;
  111. excdet->exopc = cbe->exopccpy;
  112. excdet->ecause = cbe->ecause;
  113. excdet->exceptdet0 = cbe->idef1upd;
  114. excdet->exceptdet1 = cbe->idef3upd;
  115. return 0;
  116. }
  117. char *gru_get_cb_exception_detail_str(int ret, void *cb,
  118. char *buf, int size)
  119. {
  120. struct gru_control_block_status *gen = (void *)cb;
  121. struct control_block_extended_exc_detail excdet;
  122. if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
  123. gru_get_cb_exception_detail(cb, &excdet);
  124. snprintf(buf, size,
  125. "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
  126. "excdet0 0x%lx, excdet1 0x%x",
  127. gen, excdet.opc, excdet.exopc, excdet.ecause,
  128. excdet.exceptdet0, excdet.exceptdet1);
  129. } else {
  130. snprintf(buf, size, "No exception");
  131. }
  132. return buf;
  133. }
  134. static int gru_wait_idle_or_exception(struct gru_control_block_status *gen)
  135. {
  136. while (gen->istatus >= CBS_ACTIVE) {
  137. cpu_relax();
  138. barrier();
  139. }
  140. return gen->istatus;
  141. }
  142. static int gru_retry_exception(void *cb)
  143. {
  144. struct gru_control_block_status *gen = (void *)cb;
  145. struct control_block_extended_exc_detail excdet;
  146. int retry = EXCEPTION_RETRY_LIMIT;
  147. while (1) {
  148. if (gru_get_cb_message_queue_substatus(cb))
  149. break;
  150. if (gru_wait_idle_or_exception(gen) == CBS_IDLE)
  151. return CBS_IDLE;
  152. gru_get_cb_exception_detail(cb, &excdet);
  153. if (excdet.ecause & ~EXCEPTION_RETRY_BITS)
  154. break;
  155. if (retry-- == 0)
  156. break;
  157. gen->icmd = 1;
  158. gru_flush_cache(gen);
  159. }
  160. return CBS_EXCEPTION;
  161. }
  162. int gru_check_status_proc(void *cb)
  163. {
  164. struct gru_control_block_status *gen = (void *)cb;
  165. int ret;
  166. ret = gen->istatus;
  167. if (ret != CBS_EXCEPTION)
  168. return ret;
  169. return gru_retry_exception(cb);
  170. }
  171. int gru_wait_proc(void *cb)
  172. {
  173. struct gru_control_block_status *gen = (void *)cb;
  174. int ret;
  175. ret = gru_wait_idle_or_exception(gen);
  176. if (ret == CBS_EXCEPTION)
  177. ret = gru_retry_exception(cb);
  178. return ret;
  179. }
  180. void gru_abort(int ret, void *cb, char *str)
  181. {
  182. char buf[GRU_EXC_STR_SIZE];
  183. panic("GRU FATAL ERROR: %s - %s\n", str,
  184. gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf)));
  185. }
  186. void gru_wait_abort_proc(void *cb)
  187. {
  188. int ret;
  189. ret = gru_wait_proc(cb);
  190. if (ret)
  191. gru_abort(ret, cb, "gru_wait_abort");
  192. }
  193. /*------------------------------ MESSAGE QUEUES -----------------------------*/
  194. /* Internal status . These are NOT returned to the user. */
  195. #define MQIE_AGAIN -1 /* try again */
  196. /*
  197. * Save/restore the "present" flag that is in the second line of 2-line
  198. * messages
  199. */
  200. static inline int get_present2(void *p)
  201. {
  202. struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
  203. return mhdr->present;
  204. }
  205. static inline void restore_present2(void *p, int val)
  206. {
  207. struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
  208. mhdr->present = val;
  209. }
  210. /*
  211. * Create a message queue.
  212. * qlines - message queue size in cache lines. Includes 2-line header.
  213. */
  214. int gru_create_message_queue(void *p, unsigned int bytes)
  215. {
  216. struct message_queue *mq = p;
  217. unsigned int qlines;
  218. qlines = bytes / GRU_CACHE_LINE_BYTES - 2;
  219. memset(mq, 0, bytes);
  220. mq->start = &mq->data;
  221. mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES;
  222. mq->next = &mq->data;
  223. mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES;
  224. mq->qlines = qlines;
  225. mq->hstatus[0] = 0;
  226. mq->hstatus[1] = 1;
  227. mq->head = gru_mesq_head(2, qlines / 2 + 1);
  228. return 0;
  229. }
  230. EXPORT_SYMBOL_GPL(gru_create_message_queue);
  231. /*
  232. * Send a NOOP message to a message queue
  233. * Returns:
  234. * 0 - if queue is full after the send. This is the normal case
  235. * but various races can change this.
  236. * -1 - if mesq sent successfully but queue not full
  237. * >0 - unexpected error. MQE_xxx returned
  238. */
  239. static int send_noop_message(void *cb,
  240. unsigned long mq, void *mesg)
  241. {
  242. const struct message_header noop_header = {
  243. .present = MQS_NOOP, .lines = 1};
  244. unsigned long m;
  245. int substatus, ret;
  246. struct message_header save_mhdr, *mhdr = mesg;
  247. STAT(mesq_noop);
  248. save_mhdr = *mhdr;
  249. *mhdr = noop_header;
  250. gru_mesq(cb, mq, gru_get_tri(mhdr), 1, IMA);
  251. ret = gru_wait(cb);
  252. if (ret) {
  253. substatus = gru_get_cb_message_queue_substatus(cb);
  254. switch (substatus) {
  255. case CBSS_NO_ERROR:
  256. STAT(mesq_noop_unexpected_error);
  257. ret = MQE_UNEXPECTED_CB_ERR;
  258. break;
  259. case CBSS_LB_OVERFLOWED:
  260. STAT(mesq_noop_lb_overflow);
  261. ret = MQE_CONGESTION;
  262. break;
  263. case CBSS_QLIMIT_REACHED:
  264. STAT(mesq_noop_qlimit_reached);
  265. ret = 0;
  266. break;
  267. case CBSS_AMO_NACKED:
  268. STAT(mesq_noop_amo_nacked);
  269. ret = MQE_CONGESTION;
  270. break;
  271. case CBSS_PUT_NACKED:
  272. STAT(mesq_noop_put_nacked);
  273. m = mq + (gru_get_amo_value_head(cb) << 6);
  274. gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
  275. IMA);
  276. if (gru_wait(cb) == CBS_IDLE)
  277. ret = MQIE_AGAIN;
  278. else
  279. ret = MQE_UNEXPECTED_CB_ERR;
  280. break;
  281. case CBSS_PAGE_OVERFLOW:
  282. default:
  283. BUG();
  284. }
  285. }
  286. *mhdr = save_mhdr;
  287. return ret;
  288. }
  289. /*
  290. * Handle a gru_mesq full.
  291. */
  292. static int send_message_queue_full(void *cb,
  293. unsigned long mq, void *mesg, int lines)
  294. {
  295. union gru_mesqhead mqh;
  296. unsigned int limit, head;
  297. unsigned long avalue;
  298. int half, qlines, save;
  299. /* Determine if switching to first/second half of q */
  300. avalue = gru_get_amo_value(cb);
  301. head = gru_get_amo_value_head(cb);
  302. limit = gru_get_amo_value_limit(cb);
  303. /*
  304. * Fetch "qlines" from the queue header. Since the queue may be
  305. * in memory that can't be accessed using socket addresses, use
  306. * the GRU to access the data. Use DSR space from the message.
  307. */
  308. save = *(int *)mesg;
  309. gru_vload(cb, QLINES(mq), gru_get_tri(mesg), XTYPE_W, 1, 1, IMA);
  310. if (gru_wait(cb) != CBS_IDLE)
  311. goto cberr;
  312. qlines = *(int *)mesg;
  313. *(int *)mesg = save;
  314. half = (limit != qlines);
  315. if (half)
  316. mqh = gru_mesq_head(qlines / 2 + 1, qlines);
  317. else
  318. mqh = gru_mesq_head(2, qlines / 2 + 1);
  319. /* Try to get lock for switching head pointer */
  320. gru_gamir(cb, EOP_IR_CLR, HSTATUS(mq, half), XTYPE_DW, IMA);
  321. if (gru_wait(cb) != CBS_IDLE)
  322. goto cberr;
  323. if (!gru_get_amo_value(cb)) {
  324. STAT(mesq_qf_locked);
  325. return MQE_QUEUE_FULL;
  326. }
  327. /* Got the lock. Send optional NOP if queue not full, */
  328. if (head != limit) {
  329. if (send_noop_message(cb, mq, mesg)) {
  330. gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half),
  331. XTYPE_DW, IMA);
  332. if (gru_wait(cb) != CBS_IDLE)
  333. goto cberr;
  334. STAT(mesq_qf_noop_not_full);
  335. return MQIE_AGAIN;
  336. }
  337. avalue++;
  338. }
  339. /* Then flip queuehead to other half of queue. */
  340. gru_gamer(cb, EOP_ERR_CSWAP, mq, XTYPE_DW, mqh.val, avalue, IMA);
  341. if (gru_wait(cb) != CBS_IDLE)
  342. goto cberr;
  343. /* If not successfully in swapping queue head, clear the hstatus lock */
  344. if (gru_get_amo_value(cb) != avalue) {
  345. STAT(mesq_qf_switch_head_failed);
  346. gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half), XTYPE_DW, IMA);
  347. if (gru_wait(cb) != CBS_IDLE)
  348. goto cberr;
  349. }
  350. return MQIE_AGAIN;
  351. cberr:
  352. STAT(mesq_qf_unexpected_error);
  353. return MQE_UNEXPECTED_CB_ERR;
  354. }
  355. /*
  356. * Handle a gru_mesq failure. Some of these failures are software recoverable
  357. * or retryable.
  358. */
  359. static int send_message_failure(void *cb,
  360. unsigned long mq,
  361. void *mesg,
  362. int lines)
  363. {
  364. int substatus, ret = 0;
  365. unsigned long m;
  366. substatus = gru_get_cb_message_queue_substatus(cb);
  367. switch (substatus) {
  368. case CBSS_NO_ERROR:
  369. STAT(mesq_send_unexpected_error);
  370. ret = MQE_UNEXPECTED_CB_ERR;
  371. break;
  372. case CBSS_LB_OVERFLOWED:
  373. STAT(mesq_send_lb_overflow);
  374. ret = MQE_CONGESTION;
  375. break;
  376. case CBSS_QLIMIT_REACHED:
  377. STAT(mesq_send_qlimit_reached);
  378. ret = send_message_queue_full(cb, mq, mesg, lines);
  379. break;
  380. case CBSS_AMO_NACKED:
  381. STAT(mesq_send_amo_nacked);
  382. ret = MQE_CONGESTION;
  383. break;
  384. case CBSS_PUT_NACKED:
  385. STAT(mesq_send_put_nacked);
  386. m =mq + (gru_get_amo_value_head(cb) << 6);
  387. gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
  388. if (gru_wait(cb) == CBS_IDLE)
  389. ret = MQE_OK;
  390. else
  391. ret = MQE_UNEXPECTED_CB_ERR;
  392. break;
  393. default:
  394. BUG();
  395. }
  396. return ret;
  397. }
  398. /*
  399. * Send a message to a message queue
  400. * cb GRU control block to use to send message
  401. * mq message queue
  402. * mesg message. ust be vaddr within a GSEG
  403. * bytes message size (<= 2 CL)
  404. */
  405. int gru_send_message_gpa(unsigned long mq, void *mesg, unsigned int bytes)
  406. {
  407. struct message_header *mhdr;
  408. void *cb;
  409. void *dsr;
  410. int istatus, clines, ret;
  411. STAT(mesq_send);
  412. BUG_ON(bytes < sizeof(int) || bytes > 2 * GRU_CACHE_LINE_BYTES);
  413. clines = DIV_ROUND_UP(bytes, GRU_CACHE_LINE_BYTES);
  414. if (gru_get_cpu_resources(bytes, &cb, &dsr))
  415. return MQE_BUG_NO_RESOURCES;
  416. memcpy(dsr, mesg, bytes);
  417. mhdr = dsr;
  418. mhdr->present = MQS_FULL;
  419. mhdr->lines = clines;
  420. if (clines == 2) {
  421. mhdr->present2 = get_present2(mhdr);
  422. restore_present2(mhdr, MQS_FULL);
  423. }
  424. do {
  425. ret = MQE_OK;
  426. gru_mesq(cb, mq, gru_get_tri(mhdr), clines, IMA);
  427. istatus = gru_wait(cb);
  428. if (istatus != CBS_IDLE)
  429. ret = send_message_failure(cb, mq, dsr, clines);
  430. } while (ret == MQIE_AGAIN);
  431. gru_free_cpu_resources(cb, dsr);
  432. if (ret)
  433. STAT(mesq_send_failed);
  434. return ret;
  435. }
  436. EXPORT_SYMBOL_GPL(gru_send_message_gpa);
  437. /*
  438. * Advance the receive pointer for the queue to the next message.
  439. */
  440. void gru_free_message(void *rmq, void *mesg)
  441. {
  442. struct message_queue *mq = rmq;
  443. struct message_header *mhdr = mq->next;
  444. void *next, *pnext;
  445. int half = -1;
  446. int lines = mhdr->lines;
  447. if (lines == 2)
  448. restore_present2(mhdr, MQS_EMPTY);
  449. mhdr->present = MQS_EMPTY;
  450. pnext = mq->next;
  451. next = pnext + GRU_CACHE_LINE_BYTES * lines;
  452. if (next == mq->limit) {
  453. next = mq->start;
  454. half = 1;
  455. } else if (pnext < mq->start2 && next >= mq->start2) {
  456. half = 0;
  457. }
  458. if (half >= 0)
  459. mq->hstatus[half] = 1;
  460. mq->next = next;
  461. }
  462. EXPORT_SYMBOL_GPL(gru_free_message);
  463. /*
  464. * Get next message from message queue. Return NULL if no message
  465. * present. User must call next_message() to move to next message.
  466. * rmq message queue
  467. */
  468. void *gru_get_next_message(void *rmq)
  469. {
  470. struct message_queue *mq = rmq;
  471. struct message_header *mhdr = mq->next;
  472. int present = mhdr->present;
  473. /* skip NOOP messages */
  474. STAT(mesq_receive);
  475. while (present == MQS_NOOP) {
  476. gru_free_message(rmq, mhdr);
  477. mhdr = mq->next;
  478. present = mhdr->present;
  479. }
  480. /* Wait for both halves of 2 line messages */
  481. if (present == MQS_FULL && mhdr->lines == 2 &&
  482. get_present2(mhdr) == MQS_EMPTY)
  483. present = MQS_EMPTY;
  484. if (!present) {
  485. STAT(mesq_receive_none);
  486. return NULL;
  487. }
  488. if (mhdr->lines == 2)
  489. restore_present2(mhdr, mhdr->present2);
  490. return mhdr;
  491. }
  492. EXPORT_SYMBOL_GPL(gru_get_next_message);
  493. /* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
  494. /*
  495. * Copy a block of data using the GRU resources
  496. */
  497. int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
  498. unsigned int bytes)
  499. {
  500. void *cb;
  501. void *dsr;
  502. int ret;
  503. STAT(copy_gpa);
  504. if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
  505. return MQE_BUG_NO_RESOURCES;
  506. gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
  507. XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_BYTES, IMA);
  508. ret = gru_wait(cb);
  509. gru_free_cpu_resources(cb, dsr);
  510. return ret;
  511. }
  512. EXPORT_SYMBOL_GPL(gru_copy_gpa);
  513. /* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
  514. /* Temp - will delete after we gain confidence in the GRU */
  515. static __cacheline_aligned unsigned long word0;
  516. static __cacheline_aligned unsigned long word1;
  517. static int quicktest(struct gru_state *gru)
  518. {
  519. void *cb;
  520. void *ds;
  521. unsigned long *p;
  522. cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0);
  523. ds = get_gseg_base_address_ds(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0);
  524. p = ds;
  525. word0 = MAGIC;
  526. gru_vload(cb, uv_gpa(&word0), 0, XTYPE_DW, 1, 1, IMA);
  527. if (gru_wait(cb) != CBS_IDLE)
  528. BUG();
  529. if (*(unsigned long *)ds != MAGIC)
  530. BUG();
  531. gru_vstore(cb, uv_gpa(&word1), 0, XTYPE_DW, 1, 1, IMA);
  532. if (gru_wait(cb) != CBS_IDLE)
  533. BUG();
  534. if (word0 != word1 || word0 != MAGIC) {
  535. printk
  536. ("GRU quicktest err: gru %d, found 0x%lx, expected 0x%lx\n",
  537. gru->gs_gid, word1, MAGIC);
  538. BUG(); /* ZZZ should not be fatal */
  539. }
  540. return 0;
  541. }
  542. int gru_kservices_init(struct gru_state *gru)
  543. {
  544. struct gru_blade_state *bs;
  545. struct gru_context_configuration_handle *cch;
  546. unsigned long cbr_map, dsr_map;
  547. int err, num, cpus_possible;
  548. /*
  549. * Currently, resources are reserved ONLY on the second chiplet
  550. * on each blade. This leaves ALL resources on chiplet 0 available
  551. * for user code.
  552. */
  553. bs = gru->gs_blade;
  554. if (gru != &bs->bs_grus[1])
  555. return 0;
  556. cpus_possible = uv_blade_nr_possible_cpus(gru->gs_blade_id);
  557. num = GRU_NUM_KERNEL_CBR * cpus_possible;
  558. cbr_map = gru_reserve_cb_resources(gru, GRU_CB_COUNT_TO_AU(num), NULL);
  559. gru->gs_reserved_cbrs += num;
  560. num = GRU_NUM_KERNEL_DSR_BYTES * cpus_possible;
  561. dsr_map = gru_reserve_ds_resources(gru, GRU_DS_BYTES_TO_AU(num), NULL);
  562. gru->gs_reserved_dsr_bytes += num;
  563. gru->gs_active_contexts++;
  564. __set_bit(KERNEL_CTXNUM, &gru->gs_context_map);
  565. cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM);
  566. bs->kernel_cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr,
  567. KERNEL_CTXNUM, 0);
  568. bs->kernel_dsr = get_gseg_base_address_ds(gru->gs_gru_base_vaddr,
  569. KERNEL_CTXNUM, 0);
  570. lock_cch_handle(cch);
  571. cch->tfm_fault_bit_enable = 0;
  572. cch->tlb_int_enable = 0;
  573. cch->tfm_done_bit_enable = 0;
  574. cch->unmap_enable = 1;
  575. err = cch_allocate(cch, 0, cbr_map, dsr_map);
  576. if (err) {
  577. gru_dbg(grudev,
  578. "Unable to allocate kernel CCH: gru %d, err %d\n",
  579. gru->gs_gid, err);
  580. BUG();
  581. }
  582. if (cch_start(cch)) {
  583. gru_dbg(grudev, "Unable to start kernel CCH: gru %d, err %d\n",
  584. gru->gs_gid, err);
  585. BUG();
  586. }
  587. unlock_cch_handle(cch);
  588. if (gru_options & GRU_QUICKLOOK)
  589. quicktest(gru);
  590. return 0;
  591. }