grukservices.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679
  1. /*
  2. * SN Platform GRU Driver
  3. *
  4. * KERNEL SERVICES THAT USE THE GRU
  5. *
  6. * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. */
  22. #include <linux/kernel.h>
  23. #include <linux/errno.h>
  24. #include <linux/slab.h>
  25. #include <linux/mm.h>
  26. #include <linux/smp_lock.h>
  27. #include <linux/spinlock.h>
  28. #include <linux/device.h>
  29. #include <linux/miscdevice.h>
  30. #include <linux/proc_fs.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/uaccess.h>
  33. #include "gru.h"
  34. #include "grulib.h"
  35. #include "grutables.h"
  36. #include "grukservices.h"
  37. #include "gru_instructions.h"
  38. #include <asm/uv/uv_hub.h>
  39. /*
  40. * Kernel GRU Usage
  41. *
  42. * The following is an interim algorithm for management of kernel GRU
  43. * resources. This will likely be replaced when we better understand the
  44. * kernel/user requirements.
  45. *
  46. * At boot time, the kernel permanently reserves a fixed number of
  47. * CBRs/DSRs for each cpu to use. The resources are all taken from
  48. * the GRU chiplet 1 on the blade. This leaves the full set of resources
  49. * of chiplet 0 available to be allocated to a single user.
  50. */
  51. /* Blade percpu resources PERMANENTLY reserved for kernel use */
  52. #define GRU_NUM_KERNEL_CBR 1
  53. #define GRU_NUM_KERNEL_DSR_BYTES 256
  54. #define KERNEL_CTXNUM 15
  55. /* GRU instruction attributes for all instructions */
  56. #define IMA IMA_CB_DELAY
  57. /* GRU cacheline size is always 64 bytes - even on arches with 128 byte lines */
  58. #define __gru_cacheline_aligned__ \
  59. __attribute__((__aligned__(GRU_CACHE_LINE_BYTES)))
  60. #define MAGIC 0x1234567887654321UL
  61. /* Default retry count for GRU errors on kernel instructions */
  62. #define EXCEPTION_RETRY_LIMIT 3
  63. /* Status of message queue sections */
  64. #define MQS_EMPTY 0
  65. #define MQS_FULL 1
  66. #define MQS_NOOP 2
  67. /*----------------- RESOURCE MANAGEMENT -------------------------------------*/
  68. /* optimized for x86_64 */
  69. struct message_queue {
  70. union gru_mesqhead head __gru_cacheline_aligned__; /* CL 0 */
  71. int qlines; /* DW 1 */
  72. long hstatus[2];
  73. void *next __gru_cacheline_aligned__;/* CL 1 */
  74. void *limit;
  75. void *start;
  76. void *start2;
  77. char data ____cacheline_aligned; /* CL 2 */
  78. };
  79. /* First word in every message - used by mesq interface */
  80. struct message_header {
  81. char present;
  82. char present2;
  83. char lines;
  84. char fill;
  85. };
  86. #define QLINES(mq) ((mq) + offsetof(struct message_queue, qlines))
  87. #define HSTATUS(mq, h) ((mq) + offsetof(struct message_queue, hstatus[h]))
  88. static int gru_get_cpu_resources(int dsr_bytes, void **cb, void **dsr)
  89. {
  90. struct gru_blade_state *bs;
  91. int lcpu;
  92. BUG_ON(dsr_bytes > GRU_NUM_KERNEL_DSR_BYTES);
  93. preempt_disable();
  94. bs = gru_base[uv_numa_blade_id()];
  95. lcpu = uv_blade_processor_id();
  96. *cb = bs->kernel_cb + lcpu * GRU_HANDLE_STRIDE;
  97. *dsr = bs->kernel_dsr + lcpu * GRU_NUM_KERNEL_DSR_BYTES;
  98. return 0;
  99. }
  100. static void gru_free_cpu_resources(void *cb, void *dsr)
  101. {
  102. preempt_enable();
  103. }
  104. int gru_get_cb_exception_detail(void *cb,
  105. struct control_block_extended_exc_detail *excdet)
  106. {
  107. struct gru_control_block_extended *cbe;
  108. cbe = get_cbe(GRUBASE(cb), get_cb_number(cb));
  109. excdet->opc = cbe->opccpy;
  110. excdet->exopc = cbe->exopccpy;
  111. excdet->ecause = cbe->ecause;
  112. excdet->exceptdet0 = cbe->idef1upd;
  113. excdet->exceptdet1 = cbe->idef3upd;
  114. return 0;
  115. }
  116. char *gru_get_cb_exception_detail_str(int ret, void *cb,
  117. char *buf, int size)
  118. {
  119. struct gru_control_block_status *gen = (void *)cb;
  120. struct control_block_extended_exc_detail excdet;
  121. if (ret > 0 && gen->istatus == CBS_EXCEPTION) {
  122. gru_get_cb_exception_detail(cb, &excdet);
  123. snprintf(buf, size,
  124. "GRU exception: cb %p, opc %d, exopc %d, ecause 0x%x,"
  125. "excdet0 0x%lx, excdet1 0x%x",
  126. gen, excdet.opc, excdet.exopc, excdet.ecause,
  127. excdet.exceptdet0, excdet.exceptdet1);
  128. } else {
  129. snprintf(buf, size, "No exception");
  130. }
  131. return buf;
  132. }
  133. static int gru_wait_idle_or_exception(struct gru_control_block_status *gen)
  134. {
  135. while (gen->istatus >= CBS_ACTIVE) {
  136. cpu_relax();
  137. barrier();
  138. }
  139. return gen->istatus;
  140. }
  141. static int gru_retry_exception(void *cb)
  142. {
  143. struct gru_control_block_status *gen = (void *)cb;
  144. struct control_block_extended_exc_detail excdet;
  145. int retry = EXCEPTION_RETRY_LIMIT;
  146. while (1) {
  147. if (gru_get_cb_message_queue_substatus(cb))
  148. break;
  149. if (gru_wait_idle_or_exception(gen) == CBS_IDLE)
  150. return CBS_IDLE;
  151. gru_get_cb_exception_detail(cb, &excdet);
  152. if (excdet.ecause & ~EXCEPTION_RETRY_BITS)
  153. break;
  154. if (retry-- == 0)
  155. break;
  156. gen->icmd = 1;
  157. gru_flush_cache(gen);
  158. }
  159. return CBS_EXCEPTION;
  160. }
  161. int gru_check_status_proc(void *cb)
  162. {
  163. struct gru_control_block_status *gen = (void *)cb;
  164. int ret;
  165. ret = gen->istatus;
  166. if (ret != CBS_EXCEPTION)
  167. return ret;
  168. return gru_retry_exception(cb);
  169. }
  170. int gru_wait_proc(void *cb)
  171. {
  172. struct gru_control_block_status *gen = (void *)cb;
  173. int ret;
  174. ret = gru_wait_idle_or_exception(gen);
  175. if (ret == CBS_EXCEPTION)
  176. ret = gru_retry_exception(cb);
  177. return ret;
  178. }
  179. void gru_abort(int ret, void *cb, char *str)
  180. {
  181. char buf[GRU_EXC_STR_SIZE];
  182. panic("GRU FATAL ERROR: %s - %s\n", str,
  183. gru_get_cb_exception_detail_str(ret, cb, buf, sizeof(buf)));
  184. }
  185. void gru_wait_abort_proc(void *cb)
  186. {
  187. int ret;
  188. ret = gru_wait_proc(cb);
  189. if (ret)
  190. gru_abort(ret, cb, "gru_wait_abort");
  191. }
  192. /*------------------------------ MESSAGE QUEUES -----------------------------*/
  193. /* Internal status . These are NOT returned to the user. */
  194. #define MQIE_AGAIN -1 /* try again */
  195. /*
  196. * Save/restore the "present" flag that is in the second line of 2-line
  197. * messages
  198. */
  199. static inline int get_present2(void *p)
  200. {
  201. struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
  202. return mhdr->present;
  203. }
  204. static inline void restore_present2(void *p, int val)
  205. {
  206. struct message_header *mhdr = p + GRU_CACHE_LINE_BYTES;
  207. mhdr->present = val;
  208. }
  209. /*
  210. * Create a message queue.
  211. * qlines - message queue size in cache lines. Includes 2-line header.
  212. */
  213. int gru_create_message_queue(void *p, unsigned int bytes)
  214. {
  215. struct message_queue *mq = p;
  216. unsigned int qlines;
  217. qlines = bytes / GRU_CACHE_LINE_BYTES - 2;
  218. memset(mq, 0, bytes);
  219. mq->start = &mq->data;
  220. mq->start2 = &mq->data + (qlines / 2 - 1) * GRU_CACHE_LINE_BYTES;
  221. mq->next = &mq->data;
  222. mq->limit = &mq->data + (qlines - 2) * GRU_CACHE_LINE_BYTES;
  223. mq->qlines = qlines;
  224. mq->hstatus[0] = 0;
  225. mq->hstatus[1] = 1;
  226. mq->head = gru_mesq_head(2, qlines / 2 + 1);
  227. return 0;
  228. }
  229. EXPORT_SYMBOL_GPL(gru_create_message_queue);
  230. /*
  231. * Send a NOOP message to a message queue
  232. * Returns:
  233. * 0 - if queue is full after the send. This is the normal case
  234. * but various races can change this.
  235. * -1 - if mesq sent successfully but queue not full
  236. * >0 - unexpected error. MQE_xxx returned
  237. */
  238. static int send_noop_message(void *cb,
  239. unsigned long mq, void *mesg)
  240. {
  241. const struct message_header noop_header = {
  242. .present = MQS_NOOP, .lines = 1};
  243. unsigned long m;
  244. int substatus, ret;
  245. struct message_header save_mhdr, *mhdr = mesg;
  246. STAT(mesq_noop);
  247. save_mhdr = *mhdr;
  248. *mhdr = noop_header;
  249. gru_mesq(cb, mq, gru_get_tri(mhdr), 1, IMA);
  250. ret = gru_wait(cb);
  251. if (ret) {
  252. substatus = gru_get_cb_message_queue_substatus(cb);
  253. switch (substatus) {
  254. case CBSS_NO_ERROR:
  255. STAT(mesq_noop_unexpected_error);
  256. ret = MQE_UNEXPECTED_CB_ERR;
  257. break;
  258. case CBSS_LB_OVERFLOWED:
  259. STAT(mesq_noop_lb_overflow);
  260. ret = MQE_CONGESTION;
  261. break;
  262. case CBSS_QLIMIT_REACHED:
  263. STAT(mesq_noop_qlimit_reached);
  264. ret = 0;
  265. break;
  266. case CBSS_AMO_NACKED:
  267. STAT(mesq_noop_amo_nacked);
  268. ret = MQE_CONGESTION;
  269. break;
  270. case CBSS_PUT_NACKED:
  271. STAT(mesq_noop_put_nacked);
  272. m = mq + (gru_get_amo_value_head(cb) << 6);
  273. gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, 1, 1,
  274. IMA);
  275. if (gru_wait(cb) == CBS_IDLE)
  276. ret = MQIE_AGAIN;
  277. else
  278. ret = MQE_UNEXPECTED_CB_ERR;
  279. break;
  280. case CBSS_PAGE_OVERFLOW:
  281. default:
  282. BUG();
  283. }
  284. }
  285. *mhdr = save_mhdr;
  286. return ret;
  287. }
  288. /*
  289. * Handle a gru_mesq full.
  290. */
  291. static int send_message_queue_full(void *cb,
  292. unsigned long mq, void *mesg, int lines)
  293. {
  294. union gru_mesqhead mqh;
  295. unsigned int limit, head;
  296. unsigned long avalue;
  297. int half, qlines, save;
  298. /* Determine if switching to first/second half of q */
  299. avalue = gru_get_amo_value(cb);
  300. head = gru_get_amo_value_head(cb);
  301. limit = gru_get_amo_value_limit(cb);
  302. /*
  303. * Fetch "qlines" from the queue header. Since the queue may be
  304. * in memory that can't be accessed using socket addresses, use
  305. * the GRU to access the data. Use DSR space from the message.
  306. */
  307. save = *(int *)mesg;
  308. gru_vload(cb, QLINES(mq), gru_get_tri(mesg), XTYPE_W, 1, 1, IMA);
  309. if (gru_wait(cb) != CBS_IDLE)
  310. goto cberr;
  311. qlines = *(int *)mesg;
  312. *(int *)mesg = save;
  313. half = (limit != qlines);
  314. if (half)
  315. mqh = gru_mesq_head(qlines / 2 + 1, qlines);
  316. else
  317. mqh = gru_mesq_head(2, qlines / 2 + 1);
  318. /* Try to get lock for switching head pointer */
  319. gru_gamir(cb, EOP_IR_CLR, HSTATUS(mq, half), XTYPE_DW, IMA);
  320. if (gru_wait(cb) != CBS_IDLE)
  321. goto cberr;
  322. if (!gru_get_amo_value(cb)) {
  323. STAT(mesq_qf_locked);
  324. return MQE_QUEUE_FULL;
  325. }
  326. /* Got the lock. Send optional NOP if queue not full, */
  327. if (head != limit) {
  328. if (send_noop_message(cb, mq, mesg)) {
  329. gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half),
  330. XTYPE_DW, IMA);
  331. if (gru_wait(cb) != CBS_IDLE)
  332. goto cberr;
  333. STAT(mesq_qf_noop_not_full);
  334. return MQIE_AGAIN;
  335. }
  336. avalue++;
  337. }
  338. /* Then flip queuehead to other half of queue. */
  339. gru_gamer(cb, EOP_ERR_CSWAP, mq, XTYPE_DW, mqh.val, avalue, IMA);
  340. if (gru_wait(cb) != CBS_IDLE)
  341. goto cberr;
  342. /* If not successfully in swapping queue head, clear the hstatus lock */
  343. if (gru_get_amo_value(cb) != avalue) {
  344. STAT(mesq_qf_switch_head_failed);
  345. gru_gamir(cb, EOP_IR_INC, HSTATUS(mq, half), XTYPE_DW, IMA);
  346. if (gru_wait(cb) != CBS_IDLE)
  347. goto cberr;
  348. }
  349. return MQIE_AGAIN;
  350. cberr:
  351. STAT(mesq_qf_unexpected_error);
  352. return MQE_UNEXPECTED_CB_ERR;
  353. }
  354. /*
  355. * Handle a gru_mesq failure. Some of these failures are software recoverable
  356. * or retryable.
  357. */
  358. static int send_message_failure(void *cb,
  359. unsigned long mq,
  360. void *mesg,
  361. int lines)
  362. {
  363. int substatus, ret = 0;
  364. unsigned long m;
  365. substatus = gru_get_cb_message_queue_substatus(cb);
  366. switch (substatus) {
  367. case CBSS_NO_ERROR:
  368. STAT(mesq_send_unexpected_error);
  369. ret = MQE_UNEXPECTED_CB_ERR;
  370. break;
  371. case CBSS_LB_OVERFLOWED:
  372. STAT(mesq_send_lb_overflow);
  373. ret = MQE_CONGESTION;
  374. break;
  375. case CBSS_QLIMIT_REACHED:
  376. STAT(mesq_send_qlimit_reached);
  377. ret = send_message_queue_full(cb, mq, mesg, lines);
  378. break;
  379. case CBSS_AMO_NACKED:
  380. STAT(mesq_send_amo_nacked);
  381. ret = MQE_CONGESTION;
  382. break;
  383. case CBSS_PUT_NACKED:
  384. STAT(mesq_send_put_nacked);
  385. m =mq + (gru_get_amo_value_head(cb) << 6);
  386. gru_vstore(cb, m, gru_get_tri(mesg), XTYPE_CL, lines, 1, IMA);
  387. if (gru_wait(cb) == CBS_IDLE)
  388. ret = MQE_OK;
  389. else
  390. ret = MQE_UNEXPECTED_CB_ERR;
  391. break;
  392. default:
  393. BUG();
  394. }
  395. return ret;
  396. }
  397. /*
  398. * Send a message to a message queue
  399. * cb GRU control block to use to send message
  400. * mq message queue
  401. * mesg message. ust be vaddr within a GSEG
  402. * bytes message size (<= 2 CL)
  403. */
  404. int gru_send_message_gpa(unsigned long mq, void *mesg, unsigned int bytes)
  405. {
  406. struct message_header *mhdr;
  407. void *cb;
  408. void *dsr;
  409. int istatus, clines, ret;
  410. STAT(mesq_send);
  411. BUG_ON(bytes < sizeof(int) || bytes > 2 * GRU_CACHE_LINE_BYTES);
  412. clines = (bytes + GRU_CACHE_LINE_BYTES - 1) / GRU_CACHE_LINE_BYTES;
  413. if (gru_get_cpu_resources(bytes, &cb, &dsr))
  414. return MQE_BUG_NO_RESOURCES;
  415. memcpy(dsr, mesg, bytes);
  416. mhdr = dsr;
  417. mhdr->present = MQS_FULL;
  418. mhdr->lines = clines;
  419. if (clines == 2) {
  420. mhdr->present2 = get_present2(mhdr);
  421. restore_present2(mhdr, MQS_FULL);
  422. }
  423. do {
  424. ret = MQE_OK;
  425. gru_mesq(cb, mq, gru_get_tri(mhdr), clines, IMA);
  426. istatus = gru_wait(cb);
  427. if (istatus != CBS_IDLE)
  428. ret = send_message_failure(cb, mq, dsr, clines);
  429. } while (ret == MQIE_AGAIN);
  430. gru_free_cpu_resources(cb, dsr);
  431. if (ret)
  432. STAT(mesq_send_failed);
  433. return ret;
  434. }
  435. EXPORT_SYMBOL_GPL(gru_send_message_gpa);
  436. /*
  437. * Advance the receive pointer for the queue to the next message.
  438. */
  439. void gru_free_message(void *rmq, void *mesg)
  440. {
  441. struct message_queue *mq = rmq;
  442. struct message_header *mhdr = mq->next;
  443. void *next, *pnext;
  444. int half = -1;
  445. int lines = mhdr->lines;
  446. if (lines == 2)
  447. restore_present2(mhdr, MQS_EMPTY);
  448. mhdr->present = MQS_EMPTY;
  449. pnext = mq->next;
  450. next = pnext + GRU_CACHE_LINE_BYTES * lines;
  451. if (next == mq->limit) {
  452. next = mq->start;
  453. half = 1;
  454. } else if (pnext < mq->start2 && next >= mq->start2) {
  455. half = 0;
  456. }
  457. if (half >= 0)
  458. mq->hstatus[half] = 1;
  459. mq->next = next;
  460. }
  461. EXPORT_SYMBOL_GPL(gru_free_message);
  462. /*
  463. * Get next message from message queue. Return NULL if no message
  464. * present. User must call next_message() to move to next message.
  465. * rmq message queue
  466. */
  467. void *gru_get_next_message(void *rmq)
  468. {
  469. struct message_queue *mq = rmq;
  470. struct message_header *mhdr = mq->next;
  471. int present = mhdr->present;
  472. /* skip NOOP messages */
  473. STAT(mesq_receive);
  474. while (present == MQS_NOOP) {
  475. gru_free_message(rmq, mhdr);
  476. mhdr = mq->next;
  477. present = mhdr->present;
  478. }
  479. /* Wait for both halves of 2 line messages */
  480. if (present == MQS_FULL && mhdr->lines == 2 &&
  481. get_present2(mhdr) == MQS_EMPTY)
  482. present = MQS_EMPTY;
  483. if (!present) {
  484. STAT(mesq_receive_none);
  485. return NULL;
  486. }
  487. if (mhdr->lines == 2)
  488. restore_present2(mhdr, mhdr->present2);
  489. return mhdr;
  490. }
  491. EXPORT_SYMBOL_GPL(gru_get_next_message);
  492. /* ---------------------- GRU DATA COPY FUNCTIONS ---------------------------*/
  493. /*
  494. * Copy a block of data using the GRU resources
  495. */
  496. int gru_copy_gpa(unsigned long dest_gpa, unsigned long src_gpa,
  497. unsigned int bytes)
  498. {
  499. void *cb;
  500. void *dsr;
  501. int ret;
  502. STAT(copy_gpa);
  503. if (gru_get_cpu_resources(GRU_NUM_KERNEL_DSR_BYTES, &cb, &dsr))
  504. return MQE_BUG_NO_RESOURCES;
  505. gru_bcopy(cb, src_gpa, dest_gpa, gru_get_tri(dsr),
  506. XTYPE_B, bytes, GRU_NUM_KERNEL_DSR_BYTES, IMA);
  507. ret = gru_wait(cb);
  508. gru_free_cpu_resources(cb, dsr);
  509. return ret;
  510. }
  511. EXPORT_SYMBOL_GPL(gru_copy_gpa);
  512. /* ------------------- KERNEL QUICKTESTS RUN AT STARTUP ----------------*/
  513. /* Temp - will delete after we gain confidence in the GRU */
  514. static __cacheline_aligned unsigned long word0;
  515. static __cacheline_aligned unsigned long word1;
  516. static int quicktest(struct gru_state *gru)
  517. {
  518. void *cb;
  519. void *ds;
  520. unsigned long *p;
  521. cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0);
  522. ds = get_gseg_base_address_ds(gru->gs_gru_base_vaddr, KERNEL_CTXNUM, 0);
  523. p = ds;
  524. word0 = MAGIC;
  525. gru_vload(cb, uv_gpa(&word0), 0, XTYPE_DW, 1, 1, IMA);
  526. if (gru_wait(cb) != CBS_IDLE)
  527. BUG();
  528. if (*(unsigned long *)ds != MAGIC)
  529. BUG();
  530. gru_vstore(cb, uv_gpa(&word1), 0, XTYPE_DW, 1, 1, IMA);
  531. if (gru_wait(cb) != CBS_IDLE)
  532. BUG();
  533. if (word0 != word1 || word0 != MAGIC) {
  534. printk
  535. ("GRU quicktest err: gru %d, found 0x%lx, expected 0x%lx\n",
  536. gru->gs_gid, word1, MAGIC);
  537. BUG(); /* ZZZ should not be fatal */
  538. }
  539. return 0;
  540. }
  541. int gru_kservices_init(struct gru_state *gru)
  542. {
  543. struct gru_blade_state *bs;
  544. struct gru_context_configuration_handle *cch;
  545. unsigned long cbr_map, dsr_map;
  546. int err, num, cpus_possible;
  547. /*
  548. * Currently, resources are reserved ONLY on the second chiplet
  549. * on each blade. This leaves ALL resources on chiplet 0 available
  550. * for user code.
  551. */
  552. bs = gru->gs_blade;
  553. if (gru != &bs->bs_grus[1])
  554. return 0;
  555. cpus_possible = uv_blade_nr_possible_cpus(gru->gs_blade_id);
  556. num = GRU_NUM_KERNEL_CBR * cpus_possible;
  557. cbr_map = gru_reserve_cb_resources(gru, GRU_CB_COUNT_TO_AU(num), NULL);
  558. gru->gs_reserved_cbrs += num;
  559. num = GRU_NUM_KERNEL_DSR_BYTES * cpus_possible;
  560. dsr_map = gru_reserve_ds_resources(gru, GRU_DS_BYTES_TO_AU(num), NULL);
  561. gru->gs_reserved_dsr_bytes += num;
  562. gru->gs_active_contexts++;
  563. __set_bit(KERNEL_CTXNUM, &gru->gs_context_map);
  564. cch = get_cch(gru->gs_gru_base_vaddr, KERNEL_CTXNUM);
  565. bs->kernel_cb = get_gseg_base_address_cb(gru->gs_gru_base_vaddr,
  566. KERNEL_CTXNUM, 0);
  567. bs->kernel_dsr = get_gseg_base_address_ds(gru->gs_gru_base_vaddr,
  568. KERNEL_CTXNUM, 0);
  569. lock_cch_handle(cch);
  570. cch->tfm_fault_bit_enable = 0;
  571. cch->tlb_int_enable = 0;
  572. cch->tfm_done_bit_enable = 0;
  573. cch->unmap_enable = 1;
  574. err = cch_allocate(cch, 0, cbr_map, dsr_map);
  575. if (err) {
  576. gru_dbg(grudev,
  577. "Unable to allocate kernel CCH: gru %d, err %d\n",
  578. gru->gs_gid, err);
  579. BUG();
  580. }
  581. if (cch_start(cch)) {
  582. gru_dbg(grudev, "Unable to start kernel CCH: gru %d, err %d\n",
  583. gru->gs_gid, err);
  584. BUG();
  585. }
  586. unlock_cch_handle(cch);
  587. if (gru_options & GRU_QUICKLOOK)
  588. quicktest(gru);
  589. return 0;
  590. }