tlb_uv.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793
  1. /*
  2. * SGI UltraViolet TLB flush routines.
  3. *
  4. * (c) 2008 Cliff Wickman <cpw@sgi.com>, SGI.
  5. *
  6. * This code is released under the GNU General Public License version 2 or
  7. * later.
  8. */
  9. #include <linux/seq_file.h>
  10. #include <linux/proc_fs.h>
  11. #include <linux/kernel.h>
  12. #include <asm/mmu_context.h>
  13. #include <asm/uv/uv.h>
  14. #include <asm/uv/uv_mmrs.h>
  15. #include <asm/uv/uv_hub.h>
  16. #include <asm/uv/uv_bau.h>
  17. #include <asm/genapic.h>
  18. #include <asm/idle.h>
  19. #include <asm/tsc.h>
  20. #include <asm/irq_vectors.h>
  21. #include <mach_apic.h>
  22. static struct bau_control **uv_bau_table_bases __read_mostly;
  23. static int uv_bau_retry_limit __read_mostly;
  24. /* position of pnode (which is nasid>>1): */
  25. static int uv_nshift __read_mostly;
  26. static unsigned long uv_mmask __read_mostly;
  27. static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
  28. static DEFINE_PER_CPU(struct bau_control, bau_control);
  29. /*
  30. * Free a software acknowledge hardware resource by clearing its Pending
  31. * bit. This will return a reply to the sender.
  32. * If the message has timed out, a reply has already been sent by the
  33. * hardware but the resource has not been released. In that case our
  34. * clear of the Timeout bit (as well) will free the resource. No reply will
  35. * be sent (the hardware will only do one reply per message).
  36. */
  37. static void uv_reply_to_message(int resource,
  38. struct bau_payload_queue_entry *msg,
  39. struct bau_msg_status *msp)
  40. {
  41. unsigned long dw;
  42. dw = (1 << (resource + UV_SW_ACK_NPENDING)) | (1 << resource);
  43. msg->replied_to = 1;
  44. msg->sw_ack_vector = 0;
  45. if (msp)
  46. msp->seen_by.bits = 0;
  47. uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
  48. }
  49. /*
  50. * Do all the things a cpu should do for a TLB shootdown message.
  51. * Other cpu's may come here at the same time for this message.
  52. */
  53. static void uv_bau_process_message(struct bau_payload_queue_entry *msg,
  54. int msg_slot, int sw_ack_slot)
  55. {
  56. unsigned long this_cpu_mask;
  57. struct bau_msg_status *msp;
  58. int cpu;
  59. msp = __get_cpu_var(bau_control).msg_statuses + msg_slot;
  60. cpu = uv_blade_processor_id();
  61. msg->number_of_cpus =
  62. uv_blade_nr_online_cpus(uv_node_to_blade_id(numa_node_id()));
  63. this_cpu_mask = 1UL << cpu;
  64. if (msp->seen_by.bits & this_cpu_mask)
  65. return;
  66. atomic_or_long(&msp->seen_by.bits, this_cpu_mask);
  67. if (msg->replied_to == 1)
  68. return;
  69. if (msg->address == TLB_FLUSH_ALL) {
  70. local_flush_tlb();
  71. __get_cpu_var(ptcstats).alltlb++;
  72. } else {
  73. __flush_tlb_one(msg->address);
  74. __get_cpu_var(ptcstats).onetlb++;
  75. }
  76. __get_cpu_var(ptcstats).requestee++;
  77. atomic_inc_short(&msg->acknowledge_count);
  78. if (msg->number_of_cpus == msg->acknowledge_count)
  79. uv_reply_to_message(sw_ack_slot, msg, msp);
  80. }
  81. /*
  82. * Examine the payload queue on one distribution node to see
  83. * which messages have not been seen, and which cpu(s) have not seen them.
  84. *
  85. * Returns the number of cpu's that have not responded.
  86. */
  87. static int uv_examine_destination(struct bau_control *bau_tablesp, int sender)
  88. {
  89. struct bau_payload_queue_entry *msg;
  90. struct bau_msg_status *msp;
  91. int count = 0;
  92. int i;
  93. int j;
  94. for (msg = bau_tablesp->va_queue_first, i = 0; i < DEST_Q_SIZE;
  95. msg++, i++) {
  96. if ((msg->sending_cpu == sender) && (!msg->replied_to)) {
  97. msp = bau_tablesp->msg_statuses + i;
  98. printk(KERN_DEBUG
  99. "blade %d: address:%#lx %d of %d, not cpu(s): ",
  100. i, msg->address, msg->acknowledge_count,
  101. msg->number_of_cpus);
  102. for (j = 0; j < msg->number_of_cpus; j++) {
  103. if (!((1L << j) & msp->seen_by.bits)) {
  104. count++;
  105. printk("%d ", j);
  106. }
  107. }
  108. printk("\n");
  109. }
  110. }
  111. return count;
  112. }
  113. /*
  114. * Examine the payload queue on all the distribution nodes to see
  115. * which messages have not been seen, and which cpu(s) have not seen them.
  116. *
  117. * Returns the number of cpu's that have not responded.
  118. */
  119. static int uv_examine_destinations(struct bau_target_nodemask *distribution)
  120. {
  121. int sender;
  122. int i;
  123. int count = 0;
  124. sender = smp_processor_id();
  125. for (i = 0; i < sizeof(struct bau_target_nodemask) * BITSPERBYTE; i++) {
  126. if (!bau_node_isset(i, distribution))
  127. continue;
  128. count += uv_examine_destination(uv_bau_table_bases[i], sender);
  129. }
  130. return count;
  131. }
  132. /*
  133. * wait for completion of a broadcast message
  134. *
  135. * return COMPLETE, RETRY or GIVEUP
  136. */
  137. static int uv_wait_completion(struct bau_desc *bau_desc,
  138. unsigned long mmr_offset, int right_shift)
  139. {
  140. int exams = 0;
  141. long destination_timeouts = 0;
  142. long source_timeouts = 0;
  143. unsigned long descriptor_status;
  144. while ((descriptor_status = (((unsigned long)
  145. uv_read_local_mmr(mmr_offset) >>
  146. right_shift) & UV_ACT_STATUS_MASK)) !=
  147. DESC_STATUS_IDLE) {
  148. if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
  149. source_timeouts++;
  150. if (source_timeouts > SOURCE_TIMEOUT_LIMIT)
  151. source_timeouts = 0;
  152. __get_cpu_var(ptcstats).s_retry++;
  153. return FLUSH_RETRY;
  154. }
  155. /*
  156. * spin here looking for progress at the destinations
  157. */
  158. if (descriptor_status == DESC_STATUS_DESTINATION_TIMEOUT) {
  159. destination_timeouts++;
  160. if (destination_timeouts > DESTINATION_TIMEOUT_LIMIT) {
  161. /*
  162. * returns number of cpus not responding
  163. */
  164. if (uv_examine_destinations
  165. (&bau_desc->distribution) == 0) {
  166. __get_cpu_var(ptcstats).d_retry++;
  167. return FLUSH_RETRY;
  168. }
  169. exams++;
  170. if (exams >= uv_bau_retry_limit) {
  171. printk(KERN_DEBUG
  172. "uv_flush_tlb_others");
  173. printk("giving up on cpu %d\n",
  174. smp_processor_id());
  175. return FLUSH_GIVEUP;
  176. }
  177. /*
  178. * delays can hang the simulator
  179. udelay(1000);
  180. */
  181. destination_timeouts = 0;
  182. }
  183. }
  184. cpu_relax();
  185. }
  186. return FLUSH_COMPLETE;
  187. }
  188. /**
  189. * uv_flush_send_and_wait
  190. *
  191. * Send a broadcast and wait for a broadcast message to complete.
  192. *
  193. * The flush_mask contains the cpus the broadcast was sent to.
  194. *
  195. * Returns NULL if all remote flushing was done. The mask is zeroed.
  196. * Returns @flush_mask if some remote flushing remains to be done. The
  197. * mask will have some bits still set.
  198. */
  199. const struct cpumask *uv_flush_send_and_wait(int cpu, int this_blade,
  200. struct bau_desc *bau_desc,
  201. struct cpumask *flush_mask)
  202. {
  203. int completion_status = 0;
  204. int right_shift;
  205. int tries = 0;
  206. int blade;
  207. int bit;
  208. unsigned long mmr_offset;
  209. unsigned long index;
  210. cycles_t time1;
  211. cycles_t time2;
  212. if (cpu < UV_CPUS_PER_ACT_STATUS) {
  213. mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
  214. right_shift = cpu * UV_ACT_STATUS_SIZE;
  215. } else {
  216. mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
  217. right_shift =
  218. ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE);
  219. }
  220. time1 = get_cycles();
  221. do {
  222. tries++;
  223. index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
  224. cpu;
  225. uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
  226. completion_status = uv_wait_completion(bau_desc, mmr_offset,
  227. right_shift);
  228. } while (completion_status == FLUSH_RETRY);
  229. time2 = get_cycles();
  230. __get_cpu_var(ptcstats).sflush += (time2 - time1);
  231. if (tries > 1)
  232. __get_cpu_var(ptcstats).retriesok++;
  233. if (completion_status == FLUSH_GIVEUP) {
  234. /*
  235. * Cause the caller to do an IPI-style TLB shootdown on
  236. * the cpu's, all of which are still in the mask.
  237. */
  238. __get_cpu_var(ptcstats).ptc_i++;
  239. return flush_mask;
  240. }
  241. /*
  242. * Success, so clear the remote cpu's from the mask so we don't
  243. * use the IPI method of shootdown on them.
  244. */
  245. for_each_cpu(bit, flush_mask) {
  246. blade = uv_cpu_to_blade_id(bit);
  247. if (blade == this_blade)
  248. continue;
  249. cpumask_clear_cpu(bit, flush_mask);
  250. }
  251. if (!cpumask_empty(flush_mask))
  252. return flush_mask;
  253. return NULL;
  254. }
  255. /**
  256. * uv_flush_tlb_others - globally purge translation cache of a virtual
  257. * address or all TLB's
  258. * @cpumask: mask of all cpu's in which the address is to be removed
  259. * @mm: mm_struct containing virtual address range
  260. * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
  261. * @cpu: the current cpu
  262. *
  263. * This is the entry point for initiating any UV global TLB shootdown.
  264. *
  265. * Purges the translation caches of all specified processors of the given
  266. * virtual address, or purges all TLB's on specified processors.
  267. *
  268. * The caller has derived the cpumask from the mm_struct. This function
  269. * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
  270. *
  271. * The cpumask is converted into a nodemask of the nodes containing
  272. * the cpus.
  273. *
  274. * Note that this function should be called with preemption disabled.
  275. *
  276. * Returns NULL if all remote flushing was done.
  277. * Returns pointer to cpumask if some remote flushing remains to be
  278. * done. The returned pointer is valid till preemption is re-enabled.
  279. */
  280. const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
  281. struct mm_struct *mm,
  282. unsigned long va, unsigned int cpu)
  283. {
  284. static DEFINE_PER_CPU(cpumask_t, flush_tlb_mask);
  285. struct cpumask *flush_mask = &__get_cpu_var(flush_tlb_mask);
  286. int i;
  287. int bit;
  288. int blade;
  289. int uv_cpu;
  290. int this_blade;
  291. int locals = 0;
  292. struct bau_desc *bau_desc;
  293. WARN_ON(!in_atomic());
  294. cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
  295. uv_cpu = uv_blade_processor_id();
  296. this_blade = uv_numa_blade_id();
  297. bau_desc = __get_cpu_var(bau_control).descriptor_base;
  298. bau_desc += UV_ITEMS_PER_DESCRIPTOR * uv_cpu;
  299. bau_nodes_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
  300. i = 0;
  301. for_each_cpu(bit, flush_mask) {
  302. blade = uv_cpu_to_blade_id(bit);
  303. BUG_ON(blade > (UV_DISTRIBUTION_SIZE - 1));
  304. if (blade == this_blade) {
  305. locals++;
  306. continue;
  307. }
  308. bau_node_set(blade, &bau_desc->distribution);
  309. i++;
  310. }
  311. if (i == 0) {
  312. /*
  313. * no off_node flushing; return status for local node
  314. */
  315. if (locals)
  316. return flush_mask;
  317. else
  318. return NULL;
  319. }
  320. __get_cpu_var(ptcstats).requestor++;
  321. __get_cpu_var(ptcstats).ntargeted += i;
  322. bau_desc->payload.address = va;
  323. bau_desc->payload.sending_cpu = cpu;
  324. return uv_flush_send_and_wait(uv_cpu, this_blade, bau_desc, flush_mask);
  325. }
  326. /*
  327. * The BAU message interrupt comes here. (registered by set_intr_gate)
  328. * See entry_64.S
  329. *
  330. * We received a broadcast assist message.
  331. *
  332. * Interrupts may have been disabled; this interrupt could represent
  333. * the receipt of several messages.
  334. *
  335. * All cores/threads on this node get this interrupt.
  336. * The last one to see it does the s/w ack.
  337. * (the resource will not be freed until noninterruptable cpus see this
  338. * interrupt; hardware will timeout the s/w ack and reply ERROR)
  339. */
  340. void uv_bau_message_interrupt(struct pt_regs *regs)
  341. {
  342. struct bau_payload_queue_entry *va_queue_first;
  343. struct bau_payload_queue_entry *va_queue_last;
  344. struct bau_payload_queue_entry *msg;
  345. struct pt_regs *old_regs = set_irq_regs(regs);
  346. cycles_t time1;
  347. cycles_t time2;
  348. int msg_slot;
  349. int sw_ack_slot;
  350. int fw;
  351. int count = 0;
  352. unsigned long local_pnode;
  353. ack_APIC_irq();
  354. exit_idle();
  355. irq_enter();
  356. time1 = get_cycles();
  357. local_pnode = uv_blade_to_pnode(uv_numa_blade_id());
  358. va_queue_first = __get_cpu_var(bau_control).va_queue_first;
  359. va_queue_last = __get_cpu_var(bau_control).va_queue_last;
  360. msg = __get_cpu_var(bau_control).bau_msg_head;
  361. while (msg->sw_ack_vector) {
  362. count++;
  363. fw = msg->sw_ack_vector;
  364. msg_slot = msg - va_queue_first;
  365. sw_ack_slot = ffs(fw) - 1;
  366. uv_bau_process_message(msg, msg_slot, sw_ack_slot);
  367. msg++;
  368. if (msg > va_queue_last)
  369. msg = va_queue_first;
  370. __get_cpu_var(bau_control).bau_msg_head = msg;
  371. }
  372. if (!count)
  373. __get_cpu_var(ptcstats).nomsg++;
  374. else if (count > 1)
  375. __get_cpu_var(ptcstats).multmsg++;
  376. time2 = get_cycles();
  377. __get_cpu_var(ptcstats).dflush += (time2 - time1);
  378. irq_exit();
  379. set_irq_regs(old_regs);
  380. }
  381. static void uv_enable_timeouts(void)
  382. {
  383. int i;
  384. int blade;
  385. int last_blade;
  386. int pnode;
  387. int cur_cpu = 0;
  388. unsigned long apicid;
  389. last_blade = -1;
  390. for_each_online_node(i) {
  391. blade = uv_node_to_blade_id(i);
  392. if (blade == last_blade)
  393. continue;
  394. last_blade = blade;
  395. apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
  396. pnode = uv_blade_to_pnode(blade);
  397. cur_cpu += uv_blade_nr_possible_cpus(i);
  398. }
  399. }
  400. static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset)
  401. {
  402. if (*offset < num_possible_cpus())
  403. return offset;
  404. return NULL;
  405. }
  406. static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
  407. {
  408. (*offset)++;
  409. if (*offset < num_possible_cpus())
  410. return offset;
  411. return NULL;
  412. }
  413. static void uv_ptc_seq_stop(struct seq_file *file, void *data)
  414. {
  415. }
  416. /*
  417. * Display the statistics thru /proc
  418. * data points to the cpu number
  419. */
  420. static int uv_ptc_seq_show(struct seq_file *file, void *data)
  421. {
  422. struct ptc_stats *stat;
  423. int cpu;
  424. cpu = *(loff_t *)data;
  425. if (!cpu) {
  426. seq_printf(file,
  427. "# cpu requestor requestee one all sretry dretry ptc_i ");
  428. seq_printf(file,
  429. "sw_ack sflush dflush sok dnomsg dmult starget\n");
  430. }
  431. if (cpu < num_possible_cpus() && cpu_online(cpu)) {
  432. stat = &per_cpu(ptcstats, cpu);
  433. seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld ",
  434. cpu, stat->requestor,
  435. stat->requestee, stat->onetlb, stat->alltlb,
  436. stat->s_retry, stat->d_retry, stat->ptc_i);
  437. seq_printf(file, "%lx %ld %ld %ld %ld %ld %ld\n",
  438. uv_read_global_mmr64(uv_blade_to_pnode
  439. (uv_cpu_to_blade_id(cpu)),
  440. UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
  441. stat->sflush, stat->dflush,
  442. stat->retriesok, stat->nomsg,
  443. stat->multmsg, stat->ntargeted);
  444. }
  445. return 0;
  446. }
  447. /*
  448. * 0: display meaning of the statistics
  449. * >0: retry limit
  450. */
  451. static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
  452. size_t count, loff_t *data)
  453. {
  454. long newmode;
  455. char optstr[64];
  456. if (count == 0 || count > sizeof(optstr))
  457. return -EINVAL;
  458. if (copy_from_user(optstr, user, count))
  459. return -EFAULT;
  460. optstr[count - 1] = '\0';
  461. if (strict_strtoul(optstr, 10, &newmode) < 0) {
  462. printk(KERN_DEBUG "%s is invalid\n", optstr);
  463. return -EINVAL;
  464. }
  465. if (newmode == 0) {
  466. printk(KERN_DEBUG "# cpu: cpu number\n");
  467. printk(KERN_DEBUG
  468. "requestor: times this cpu was the flush requestor\n");
  469. printk(KERN_DEBUG
  470. "requestee: times this cpu was requested to flush its TLBs\n");
  471. printk(KERN_DEBUG
  472. "one: times requested to flush a single address\n");
  473. printk(KERN_DEBUG
  474. "all: times requested to flush all TLB's\n");
  475. printk(KERN_DEBUG
  476. "sretry: number of retries of source-side timeouts\n");
  477. printk(KERN_DEBUG
  478. "dretry: number of retries of destination-side timeouts\n");
  479. printk(KERN_DEBUG
  480. "ptc_i: times UV fell through to IPI-style flushes\n");
  481. printk(KERN_DEBUG
  482. "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
  483. printk(KERN_DEBUG
  484. "sflush_us: cycles spent in uv_flush_tlb_others()\n");
  485. printk(KERN_DEBUG
  486. "dflush_us: cycles spent in handling flush requests\n");
  487. printk(KERN_DEBUG "sok: successes on retry\n");
  488. printk(KERN_DEBUG "dnomsg: interrupts with no message\n");
  489. printk(KERN_DEBUG
  490. "dmult: interrupts with multiple messages\n");
  491. printk(KERN_DEBUG "starget: nodes targeted\n");
  492. } else {
  493. uv_bau_retry_limit = newmode;
  494. printk(KERN_DEBUG "timeout retry limit:%d\n",
  495. uv_bau_retry_limit);
  496. }
  497. return count;
  498. }
  499. static const struct seq_operations uv_ptc_seq_ops = {
  500. .start = uv_ptc_seq_start,
  501. .next = uv_ptc_seq_next,
  502. .stop = uv_ptc_seq_stop,
  503. .show = uv_ptc_seq_show
  504. };
  505. static int uv_ptc_proc_open(struct inode *inode, struct file *file)
  506. {
  507. return seq_open(file, &uv_ptc_seq_ops);
  508. }
  509. static const struct file_operations proc_uv_ptc_operations = {
  510. .open = uv_ptc_proc_open,
  511. .read = seq_read,
  512. .write = uv_ptc_proc_write,
  513. .llseek = seq_lseek,
  514. .release = seq_release,
  515. };
  516. static int __init uv_ptc_init(void)
  517. {
  518. struct proc_dir_entry *proc_uv_ptc;
  519. if (!is_uv_system())
  520. return 0;
  521. proc_uv_ptc = create_proc_entry(UV_PTC_BASENAME, 0444, NULL);
  522. if (!proc_uv_ptc) {
  523. printk(KERN_ERR "unable to create %s proc entry\n",
  524. UV_PTC_BASENAME);
  525. return -EINVAL;
  526. }
  527. proc_uv_ptc->proc_fops = &proc_uv_ptc_operations;
  528. return 0;
  529. }
  530. /*
  531. * begin the initialization of the per-blade control structures
  532. */
  533. static struct bau_control * __init uv_table_bases_init(int blade, int node)
  534. {
  535. int i;
  536. struct bau_msg_status *msp;
  537. struct bau_control *bau_tabp;
  538. bau_tabp =
  539. kmalloc_node(sizeof(struct bau_control), GFP_KERNEL, node);
  540. BUG_ON(!bau_tabp);
  541. bau_tabp->msg_statuses =
  542. kmalloc_node(sizeof(struct bau_msg_status) *
  543. DEST_Q_SIZE, GFP_KERNEL, node);
  544. BUG_ON(!bau_tabp->msg_statuses);
  545. for (i = 0, msp = bau_tabp->msg_statuses; i < DEST_Q_SIZE; i++, msp++)
  546. bau_cpubits_clear(&msp->seen_by, (int)
  547. uv_blade_nr_possible_cpus(blade));
  548. uv_bau_table_bases[blade] = bau_tabp;
  549. return bau_tabp;
  550. }
  551. /*
  552. * finish the initialization of the per-blade control structures
  553. */
  554. static void __init
  555. uv_table_bases_finish(int blade, int node, int cur_cpu,
  556. struct bau_control *bau_tablesp,
  557. struct bau_desc *adp)
  558. {
  559. struct bau_control *bcp;
  560. int i;
  561. for (i = cur_cpu; i < cur_cpu + uv_blade_nr_possible_cpus(blade); i++) {
  562. bcp = (struct bau_control *)&per_cpu(bau_control, i);
  563. bcp->bau_msg_head = bau_tablesp->va_queue_first;
  564. bcp->va_queue_first = bau_tablesp->va_queue_first;
  565. bcp->va_queue_last = bau_tablesp->va_queue_last;
  566. bcp->msg_statuses = bau_tablesp->msg_statuses;
  567. bcp->descriptor_base = adp;
  568. }
  569. }
  570. /*
  571. * initialize the sending side's sending buffers
  572. */
  573. static struct bau_desc * __init
  574. uv_activation_descriptor_init(int node, int pnode)
  575. {
  576. int i;
  577. unsigned long pa;
  578. unsigned long m;
  579. unsigned long n;
  580. unsigned long mmr_image;
  581. struct bau_desc *adp;
  582. struct bau_desc *ad2;
  583. adp = (struct bau_desc *)
  584. kmalloc_node(16384, GFP_KERNEL, node);
  585. BUG_ON(!adp);
  586. pa = __pa((unsigned long)adp);
  587. n = pa >> uv_nshift;
  588. m = pa & uv_mmask;
  589. mmr_image = uv_read_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE);
  590. if (mmr_image) {
  591. uv_write_global_mmr64(pnode, (unsigned long)
  592. UVH_LB_BAU_SB_DESCRIPTOR_BASE,
  593. (n << UV_DESC_BASE_PNODE_SHIFT | m));
  594. }
  595. for (i = 0, ad2 = adp; i < UV_ACTIVATION_DESCRIPTOR_SIZE; i++, ad2++) {
  596. memset(ad2, 0, sizeof(struct bau_desc));
  597. ad2->header.sw_ack_flag = 1;
  598. ad2->header.base_dest_nodeid =
  599. uv_blade_to_pnode(uv_cpu_to_blade_id(0));
  600. ad2->header.command = UV_NET_ENDPOINT_INTD;
  601. ad2->header.int_both = 1;
  602. /*
  603. * all others need to be set to zero:
  604. * fairness chaining multilevel count replied_to
  605. */
  606. }
  607. return adp;
  608. }
  609. /*
  610. * initialize the destination side's receiving buffers
  611. */
  612. static struct bau_payload_queue_entry * __init
  613. uv_payload_queue_init(int node, int pnode, struct bau_control *bau_tablesp)
  614. {
  615. struct bau_payload_queue_entry *pqp;
  616. char *cp;
  617. pqp = (struct bau_payload_queue_entry *) kmalloc_node(
  618. (DEST_Q_SIZE + 1) * sizeof(struct bau_payload_queue_entry),
  619. GFP_KERNEL, node);
  620. BUG_ON(!pqp);
  621. cp = (char *)pqp + 31;
  622. pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
  623. bau_tablesp->va_queue_first = pqp;
  624. uv_write_global_mmr64(pnode,
  625. UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
  626. ((unsigned long)pnode <<
  627. UV_PAYLOADQ_PNODE_SHIFT) |
  628. uv_physnodeaddr(pqp));
  629. uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
  630. uv_physnodeaddr(pqp));
  631. bau_tablesp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
  632. uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
  633. (unsigned long)
  634. uv_physnodeaddr(bau_tablesp->va_queue_last));
  635. memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
  636. return pqp;
  637. }
  638. /*
  639. * Initialization of each UV blade's structures
  640. */
  641. static int __init uv_init_blade(int blade, int node, int cur_cpu)
  642. {
  643. int pnode;
  644. unsigned long pa;
  645. unsigned long apicid;
  646. struct bau_desc *adp;
  647. struct bau_payload_queue_entry *pqp;
  648. struct bau_control *bau_tablesp;
  649. bau_tablesp = uv_table_bases_init(blade, node);
  650. pnode = uv_blade_to_pnode(blade);
  651. adp = uv_activation_descriptor_init(node, pnode);
  652. pqp = uv_payload_queue_init(node, pnode, bau_tablesp);
  653. uv_table_bases_finish(blade, node, cur_cpu, bau_tablesp, adp);
  654. /*
  655. * the below initialization can't be in firmware because the
  656. * messaging IRQ will be determined by the OS
  657. */
  658. apicid = per_cpu(x86_cpu_to_apicid, cur_cpu);
  659. pa = uv_read_global_mmr64(pnode, UVH_BAU_DATA_CONFIG);
  660. if ((pa & 0xff) != UV_BAU_MESSAGE) {
  661. uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
  662. ((apicid << 32) | UV_BAU_MESSAGE));
  663. }
  664. return 0;
  665. }
  666. /*
  667. * Initialization of BAU-related structures
  668. */
  669. static int __init uv_bau_init(void)
  670. {
  671. int blade;
  672. int node;
  673. int nblades;
  674. int last_blade;
  675. int cur_cpu = 0;
  676. if (!is_uv_system())
  677. return 0;
  678. uv_bau_retry_limit = 1;
  679. uv_nshift = uv_hub_info->n_val;
  680. uv_mmask = (1UL << uv_hub_info->n_val) - 1;
  681. nblades = 0;
  682. last_blade = -1;
  683. for_each_online_node(node) {
  684. blade = uv_node_to_blade_id(node);
  685. if (blade == last_blade)
  686. continue;
  687. last_blade = blade;
  688. nblades++;
  689. }
  690. uv_bau_table_bases = (struct bau_control **)
  691. kmalloc(nblades * sizeof(struct bau_control *), GFP_KERNEL);
  692. BUG_ON(!uv_bau_table_bases);
  693. last_blade = -1;
  694. for_each_online_node(node) {
  695. blade = uv_node_to_blade_id(node);
  696. if (blade == last_blade)
  697. continue;
  698. last_blade = blade;
  699. uv_init_blade(blade, node, cur_cpu);
  700. cur_cpu += uv_blade_nr_possible_cpus(blade);
  701. }
  702. alloc_intr_gate(UV_BAU_MESSAGE, uv_bau_message_intr1);
  703. uv_enable_timeouts();
  704. return 0;
  705. }
  706. __initcall(uv_bau_init);
  707. __initcall(uv_ptc_init);