tlb_uv.c 49 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805
  1. /*
  2. * SGI UltraViolet TLB flush routines.
  3. *
  4. * (c) 2008-2010 Cliff Wickman <cpw@sgi.com>, SGI.
  5. *
  6. * This code is released under the GNU General Public License version 2 or
  7. * later.
  8. */
  9. #include <linux/seq_file.h>
  10. #include <linux/proc_fs.h>
  11. #include <linux/debugfs.h>
  12. #include <linux/kernel.h>
  13. #include <linux/slab.h>
  14. #include <linux/delay.h>
  15. #include <asm/mmu_context.h>
  16. #include <asm/uv/uv.h>
  17. #include <asm/uv/uv_mmrs.h>
  18. #include <asm/uv/uv_hub.h>
  19. #include <asm/uv/uv_bau.h>
  20. #include <asm/apic.h>
  21. #include <asm/idle.h>
  22. #include <asm/tsc.h>
  23. #include <asm/irq_vectors.h>
  24. #include <asm/timer.h>
  25. /* timeouts in nanoseconds (indexed by UVH_AGING_PRESCALE_SEL urgency7 30:28) */
  26. static int timeout_base_ns[] = {
  27. 20,
  28. 160,
  29. 1280,
  30. 10240,
  31. 81920,
  32. 655360,
  33. 5242880,
  34. 167772160
  35. };
  36. static int timeout_us;
  37. static int nobau;
  38. static int baudisabled;
  39. static spinlock_t disable_lock;
  40. static cycles_t congested_cycles;
  41. /* tunables: */
  42. static int max_bau_concurrent = MAX_BAU_CONCURRENT;
  43. static int max_bau_concurrent_constant = MAX_BAU_CONCURRENT;
  44. static int plugged_delay = PLUGGED_DELAY;
  45. static int plugsb4reset = PLUGSB4RESET;
  46. static int timeoutsb4reset = TIMEOUTSB4RESET;
  47. static int ipi_reset_limit = IPI_RESET_LIMIT;
  48. static int complete_threshold = COMPLETE_THRESHOLD;
  49. static int congested_response_us = CONGESTED_RESPONSE_US;
  50. static int congested_reps = CONGESTED_REPS;
  51. static int congested_period = CONGESTED_PERIOD;
  52. static struct dentry *tunables_dir;
  53. static struct dentry *tunables_file;
  54. static int __init setup_nobau(char *arg)
  55. {
  56. nobau = 1;
  57. return 0;
  58. }
  59. early_param("nobau", setup_nobau);
  60. /* base pnode in this partition */
  61. static int uv_partition_base_pnode __read_mostly;
  62. /* position of pnode (which is nasid>>1): */
  63. static int uv_nshift __read_mostly;
  64. static unsigned long uv_mmask __read_mostly;
  65. static DEFINE_PER_CPU(struct ptc_stats, ptcstats);
  66. static DEFINE_PER_CPU(struct bau_control, bau_control);
  67. static DEFINE_PER_CPU(cpumask_var_t, uv_flush_tlb_mask);
  68. /*
  69. * Determine the first node on a uvhub. 'Nodes' are used for kernel
  70. * memory allocation.
  71. */
  72. static int __init uvhub_to_first_node(int uvhub)
  73. {
  74. int node, b;
  75. for_each_online_node(node) {
  76. b = uv_node_to_blade_id(node);
  77. if (uvhub == b)
  78. return node;
  79. }
  80. return -1;
  81. }
  82. /*
  83. * Determine the apicid of the first cpu on a uvhub.
  84. */
  85. static int __init uvhub_to_first_apicid(int uvhub)
  86. {
  87. int cpu;
  88. for_each_present_cpu(cpu)
  89. if (uvhub == uv_cpu_to_blade_id(cpu))
  90. return per_cpu(x86_cpu_to_apicid, cpu);
  91. return -1;
  92. }
  93. /*
  94. * Free a software acknowledge hardware resource by clearing its Pending
  95. * bit. This will return a reply to the sender.
  96. * If the message has timed out, a reply has already been sent by the
  97. * hardware but the resource has not been released. In that case our
  98. * clear of the Timeout bit (as well) will free the resource. No reply will
  99. * be sent (the hardware will only do one reply per message).
  100. */
  101. static inline void uv_reply_to_message(struct msg_desc *mdp,
  102. struct bau_control *bcp)
  103. {
  104. unsigned long dw;
  105. struct bau_payload_queue_entry *msg;
  106. msg = mdp->msg;
  107. if (!msg->canceled) {
  108. dw = (msg->sw_ack_vector << UV_SW_ACK_NPENDING) |
  109. msg->sw_ack_vector;
  110. uv_write_local_mmr(
  111. UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, dw);
  112. }
  113. msg->replied_to = 1;
  114. msg->sw_ack_vector = 0;
  115. }
  116. /*
  117. * Process the receipt of a RETRY message
  118. */
  119. static inline void uv_bau_process_retry_msg(struct msg_desc *mdp,
  120. struct bau_control *bcp)
  121. {
  122. int i;
  123. int cancel_count = 0;
  124. int slot2;
  125. unsigned long msg_res;
  126. unsigned long mmr = 0;
  127. struct bau_payload_queue_entry *msg;
  128. struct bau_payload_queue_entry *msg2;
  129. struct ptc_stats *stat;
  130. msg = mdp->msg;
  131. stat = bcp->statp;
  132. stat->d_retries++;
  133. /*
  134. * cancel any message from msg+1 to the retry itself
  135. */
  136. for (msg2 = msg+1, i = 0; i < DEST_Q_SIZE; msg2++, i++) {
  137. if (msg2 > mdp->va_queue_last)
  138. msg2 = mdp->va_queue_first;
  139. if (msg2 == msg)
  140. break;
  141. /* same conditions for cancellation as uv_do_reset */
  142. if ((msg2->replied_to == 0) && (msg2->canceled == 0) &&
  143. (msg2->sw_ack_vector) && ((msg2->sw_ack_vector &
  144. msg->sw_ack_vector) == 0) &&
  145. (msg2->sending_cpu == msg->sending_cpu) &&
  146. (msg2->msg_type != MSG_NOOP)) {
  147. slot2 = msg2 - mdp->va_queue_first;
  148. mmr = uv_read_local_mmr
  149. (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
  150. msg_res = msg2->sw_ack_vector;
  151. /*
  152. * This is a message retry; clear the resources held
  153. * by the previous message only if they timed out.
  154. * If it has not timed out we have an unexpected
  155. * situation to report.
  156. */
  157. if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
  158. /*
  159. * is the resource timed out?
  160. * make everyone ignore the cancelled message.
  161. */
  162. msg2->canceled = 1;
  163. stat->d_canceled++;
  164. cancel_count++;
  165. uv_write_local_mmr(
  166. UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
  167. (msg_res << UV_SW_ACK_NPENDING) |
  168. msg_res);
  169. }
  170. }
  171. }
  172. if (!cancel_count)
  173. stat->d_nocanceled++;
  174. }
  175. /*
  176. * Do all the things a cpu should do for a TLB shootdown message.
  177. * Other cpu's may come here at the same time for this message.
  178. */
  179. static void uv_bau_process_message(struct msg_desc *mdp,
  180. struct bau_control *bcp)
  181. {
  182. int msg_ack_count;
  183. short socket_ack_count = 0;
  184. struct ptc_stats *stat;
  185. struct bau_payload_queue_entry *msg;
  186. struct bau_control *smaster = bcp->socket_master;
  187. /*
  188. * This must be a normal message, or retry of a normal message
  189. */
  190. msg = mdp->msg;
  191. stat = bcp->statp;
  192. if (msg->address == TLB_FLUSH_ALL) {
  193. local_flush_tlb();
  194. stat->d_alltlb++;
  195. } else {
  196. __flush_tlb_one(msg->address);
  197. stat->d_onetlb++;
  198. }
  199. stat->d_requestee++;
  200. /*
  201. * One cpu on each uvhub has the additional job on a RETRY
  202. * of releasing the resource held by the message that is
  203. * being retried. That message is identified by sending
  204. * cpu number.
  205. */
  206. if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master)
  207. uv_bau_process_retry_msg(mdp, bcp);
  208. /*
  209. * This is a sw_ack message, so we have to reply to it.
  210. * Count each responding cpu on the socket. This avoids
  211. * pinging the count's cache line back and forth between
  212. * the sockets.
  213. */
  214. socket_ack_count = atomic_add_short_return(1, (struct atomic_short *)
  215. &smaster->socket_acknowledge_count[mdp->msg_slot]);
  216. if (socket_ack_count == bcp->cpus_in_socket) {
  217. /*
  218. * Both sockets dump their completed count total into
  219. * the message's count.
  220. */
  221. smaster->socket_acknowledge_count[mdp->msg_slot] = 0;
  222. msg_ack_count = atomic_add_short_return(socket_ack_count,
  223. (struct atomic_short *)&msg->acknowledge_count);
  224. if (msg_ack_count == bcp->cpus_in_uvhub) {
  225. /*
  226. * All cpus in uvhub saw it; reply
  227. */
  228. uv_reply_to_message(mdp, bcp);
  229. }
  230. }
  231. return;
  232. }
  233. /*
  234. * Determine the first cpu on a uvhub.
  235. */
  236. static int uvhub_to_first_cpu(int uvhub)
  237. {
  238. int cpu;
  239. for_each_present_cpu(cpu)
  240. if (uvhub == uv_cpu_to_blade_id(cpu))
  241. return cpu;
  242. return -1;
  243. }
  244. /*
  245. * Last resort when we get a large number of destination timeouts is
  246. * to clear resources held by a given cpu.
  247. * Do this with IPI so that all messages in the BAU message queue
  248. * can be identified by their nonzero sw_ack_vector field.
  249. *
  250. * This is entered for a single cpu on the uvhub.
  251. * The sender want's this uvhub to free a specific message's
  252. * sw_ack resources.
  253. */
  254. static void
  255. uv_do_reset(void *ptr)
  256. {
  257. int i;
  258. int slot;
  259. int count = 0;
  260. unsigned long mmr;
  261. unsigned long msg_res;
  262. struct bau_control *bcp;
  263. struct reset_args *rap;
  264. struct bau_payload_queue_entry *msg;
  265. struct ptc_stats *stat;
  266. bcp = &per_cpu(bau_control, smp_processor_id());
  267. rap = (struct reset_args *)ptr;
  268. stat = bcp->statp;
  269. stat->d_resets++;
  270. /*
  271. * We're looking for the given sender, and
  272. * will free its sw_ack resource.
  273. * If all cpu's finally responded after the timeout, its
  274. * message 'replied_to' was set.
  275. */
  276. for (msg = bcp->va_queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) {
  277. /* uv_do_reset: same conditions for cancellation as
  278. uv_bau_process_retry_msg() */
  279. if ((msg->replied_to == 0) &&
  280. (msg->canceled == 0) &&
  281. (msg->sending_cpu == rap->sender) &&
  282. (msg->sw_ack_vector) &&
  283. (msg->msg_type != MSG_NOOP)) {
  284. /*
  285. * make everyone else ignore this message
  286. */
  287. msg->canceled = 1;
  288. slot = msg - bcp->va_queue_first;
  289. count++;
  290. /*
  291. * only reset the resource if it is still pending
  292. */
  293. mmr = uv_read_local_mmr
  294. (UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
  295. msg_res = msg->sw_ack_vector;
  296. if (mmr & msg_res) {
  297. stat->d_rcanceled++;
  298. uv_write_local_mmr(
  299. UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS,
  300. (msg_res << UV_SW_ACK_NPENDING) |
  301. msg_res);
  302. }
  303. }
  304. }
  305. return;
  306. }
  307. /*
  308. * Use IPI to get all target uvhubs to release resources held by
  309. * a given sending cpu number.
  310. */
  311. static void uv_reset_with_ipi(struct bau_target_uvhubmask *distribution,
  312. int sender)
  313. {
  314. int uvhub;
  315. int cpu;
  316. cpumask_t mask;
  317. struct reset_args reset_args;
  318. reset_args.sender = sender;
  319. cpus_clear(mask);
  320. /* find a single cpu for each uvhub in this distribution mask */
  321. for (uvhub = 0;
  322. uvhub < sizeof(struct bau_target_uvhubmask) * BITSPERBYTE;
  323. uvhub++) {
  324. if (!bau_uvhub_isset(uvhub, distribution))
  325. continue;
  326. /* find a cpu for this uvhub */
  327. cpu = uvhub_to_first_cpu(uvhub);
  328. cpu_set(cpu, mask);
  329. }
  330. /* IPI all cpus; Preemption is already disabled */
  331. smp_call_function_many(&mask, uv_do_reset, (void *)&reset_args, 1);
  332. return;
  333. }
  334. static inline unsigned long
  335. cycles_2_us(unsigned long long cyc)
  336. {
  337. unsigned long long ns;
  338. unsigned long us;
  339. ns = (cyc * per_cpu(cyc2ns, smp_processor_id()))
  340. >> CYC2NS_SCALE_FACTOR;
  341. us = ns / 1000;
  342. return us;
  343. }
  344. /*
  345. * wait for all cpus on this hub to finish their sends and go quiet
  346. * leaves uvhub_quiesce set so that no new broadcasts are started by
  347. * bau_flush_send_and_wait()
  348. */
  349. static inline void
  350. quiesce_local_uvhub(struct bau_control *hmaster)
  351. {
  352. atomic_add_short_return(1, (struct atomic_short *)
  353. &hmaster->uvhub_quiesce);
  354. }
  355. /*
  356. * mark this quiet-requestor as done
  357. */
  358. static inline void
  359. end_uvhub_quiesce(struct bau_control *hmaster)
  360. {
  361. atomic_add_short_return(-1, (struct atomic_short *)
  362. &hmaster->uvhub_quiesce);
  363. }
  364. /*
  365. * Wait for completion of a broadcast software ack message
  366. * return COMPLETE, RETRY(PLUGGED or TIMEOUT) or GIVEUP
  367. */
  368. static int uv1_wait_completion(struct bau_desc *bau_desc,
  369. unsigned long mmr_offset, int right_shift, int this_cpu,
  370. struct bau_control *bcp, struct bau_control *smaster, long try)
  371. {
  372. unsigned long descriptor_status;
  373. cycles_t ttime;
  374. struct ptc_stats *stat = bcp->statp;
  375. /* spin on the status MMR, waiting for it to go idle */
  376. while ((descriptor_status = (((unsigned long)
  377. uv_read_local_mmr(mmr_offset) >>
  378. right_shift) & UV_ACT_STATUS_MASK)) !=
  379. DESC_STATUS_IDLE) {
  380. /*
  381. * Our software ack messages may be blocked because
  382. * there are no swack resources available. As long
  383. * as none of them has timed out hardware will NACK
  384. * our message and its state will stay IDLE.
  385. */
  386. if (descriptor_status == DESC_STATUS_SOURCE_TIMEOUT) {
  387. stat->s_stimeout++;
  388. return FLUSH_GIVEUP;
  389. } else if (descriptor_status ==
  390. DESC_STATUS_DESTINATION_TIMEOUT) {
  391. stat->s_dtimeout++;
  392. ttime = get_cycles();
  393. /*
  394. * Our retries may be blocked by all destination
  395. * swack resources being consumed, and a timeout
  396. * pending. In that case hardware returns the
  397. * ERROR that looks like a destination timeout.
  398. */
  399. if (cycles_2_us(ttime - bcp->send_message) <
  400. timeout_us) {
  401. bcp->conseccompletes = 0;
  402. return FLUSH_RETRY_PLUGGED;
  403. }
  404. bcp->conseccompletes = 0;
  405. return FLUSH_RETRY_TIMEOUT;
  406. } else {
  407. /*
  408. * descriptor_status is still BUSY
  409. */
  410. cpu_relax();
  411. }
  412. }
  413. bcp->conseccompletes++;
  414. return FLUSH_COMPLETE;
  415. }
  416. static int uv2_wait_completion(struct bau_desc *bau_desc,
  417. unsigned long mmr_offset, int right_shift, int this_cpu,
  418. struct bau_control *bcp, struct bau_control *smaster, long try)
  419. {
  420. unsigned long descriptor_status;
  421. unsigned long descriptor_status2;
  422. int cpu;
  423. cycles_t ttime;
  424. struct ptc_stats *stat = bcp->statp;
  425. /* UV2 has an extra bit of status */
  426. cpu = bcp->uvhub_cpu;
  427. /* spin on the status MMR, waiting for it to go idle */
  428. descriptor_status = (((unsigned long)(uv_read_local_mmr
  429. (mmr_offset)) >> right_shift) & UV_ACT_STATUS_MASK);
  430. descriptor_status2 = (((unsigned long)uv_read_local_mmr
  431. (UV2H_LB_BAU_SB_ACTIVATION_STATUS_2) >> cpu) & 0x1UL);
  432. descriptor_status = (descriptor_status << 1) |
  433. descriptor_status2;
  434. while (descriptor_status != UV2H_DESC_IDLE) {
  435. /*
  436. * Our software ack messages may be blocked because
  437. * there are no swack resources available. As long
  438. * as none of them has timed out hardware will NACK
  439. * our message and its state will stay IDLE.
  440. */
  441. if ((descriptor_status == UV2H_DESC_SOURCE_TIMEOUT) ||
  442. (descriptor_status == UV2H_DESC_DEST_STRONG_NACK) ||
  443. (descriptor_status == UV2H_DESC_DEST_PUT_ERR)) {
  444. stat->s_stimeout++;
  445. return FLUSH_GIVEUP;
  446. } else if (descriptor_status == UV2H_DESC_DEST_TIMEOUT) {
  447. stat->s_dtimeout++;
  448. ttime = get_cycles();
  449. /*
  450. * Our retries may be blocked by all destination
  451. * swack resources being consumed, and a timeout
  452. * pending. In that case hardware returns the
  453. * ERROR that looks like a destination timeout.
  454. */
  455. if (cycles_2_us(ttime - bcp->send_message) <
  456. timeout_us) {
  457. bcp->conseccompletes = 0;
  458. return FLUSH_RETRY_PLUGGED;
  459. }
  460. bcp->conseccompletes = 0;
  461. return FLUSH_RETRY_TIMEOUT;
  462. } else {
  463. /*
  464. * descriptor_status is still BUSY
  465. */
  466. cpu_relax();
  467. }
  468. descriptor_status = (((unsigned long)(uv_read_local_mmr
  469. (mmr_offset)) >> right_shift) &
  470. UV_ACT_STATUS_MASK);
  471. descriptor_status2 = (((unsigned long)uv_read_local_mmr
  472. (UV2H_LB_BAU_SB_ACTIVATION_STATUS_2) >> cpu) &
  473. 0x1UL);
  474. descriptor_status = (descriptor_status << 1) |
  475. descriptor_status2;
  476. }
  477. bcp->conseccompletes++;
  478. return FLUSH_COMPLETE;
  479. }
  480. static int uv_wait_completion(struct bau_desc *bau_desc,
  481. unsigned long mmr_offset, int right_shift, int this_cpu,
  482. struct bau_control *bcp, struct bau_control *smaster, long try)
  483. {
  484. if (is_uv1_hub())
  485. return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
  486. this_cpu, bcp, smaster, try);
  487. else
  488. return uv2_wait_completion(bau_desc, mmr_offset, right_shift,
  489. this_cpu, bcp, smaster, try);
  490. }
  491. static inline cycles_t
  492. sec_2_cycles(unsigned long sec)
  493. {
  494. unsigned long ns;
  495. cycles_t cyc;
  496. ns = sec * 1000000000;
  497. cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
  498. return cyc;
  499. }
  500. /*
  501. * conditionally add 1 to *v, unless *v is >= u
  502. * return 0 if we cannot add 1 to *v because it is >= u
  503. * return 1 if we can add 1 to *v because it is < u
  504. * the add is atomic
  505. *
  506. * This is close to atomic_add_unless(), but this allows the 'u' value
  507. * to be lowered below the current 'v'. atomic_add_unless can only stop
  508. * on equal.
  509. */
  510. static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
  511. {
  512. spin_lock(lock);
  513. if (atomic_read(v) >= u) {
  514. spin_unlock(lock);
  515. return 0;
  516. }
  517. atomic_inc(v);
  518. spin_unlock(lock);
  519. return 1;
  520. }
  521. /*
  522. * Our retries are blocked by all destination swack resources being
  523. * in use, and a timeout is pending. In that case hardware immediately
  524. * returns the ERROR that looks like a destination timeout.
  525. */
  526. static void
  527. destination_plugged(struct bau_desc *bau_desc, struct bau_control *bcp,
  528. struct bau_control *hmaster, struct ptc_stats *stat)
  529. {
  530. udelay(bcp->plugged_delay);
  531. bcp->plugged_tries++;
  532. if (bcp->plugged_tries >= bcp->plugsb4reset) {
  533. bcp->plugged_tries = 0;
  534. quiesce_local_uvhub(hmaster);
  535. spin_lock(&hmaster->queue_lock);
  536. uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
  537. spin_unlock(&hmaster->queue_lock);
  538. end_uvhub_quiesce(hmaster);
  539. bcp->ipi_attempts++;
  540. stat->s_resets_plug++;
  541. }
  542. }
  543. static void
  544. destination_timeout(struct bau_desc *bau_desc, struct bau_control *bcp,
  545. struct bau_control *hmaster, struct ptc_stats *stat)
  546. {
  547. hmaster->max_bau_concurrent = 1;
  548. bcp->timeout_tries++;
  549. if (bcp->timeout_tries >= bcp->timeoutsb4reset) {
  550. bcp->timeout_tries = 0;
  551. quiesce_local_uvhub(hmaster);
  552. spin_lock(&hmaster->queue_lock);
  553. uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu);
  554. spin_unlock(&hmaster->queue_lock);
  555. end_uvhub_quiesce(hmaster);
  556. bcp->ipi_attempts++;
  557. stat->s_resets_timeout++;
  558. }
  559. }
  560. /*
  561. * Completions are taking a very long time due to a congested numalink
  562. * network.
  563. */
  564. static void
  565. disable_for_congestion(struct bau_control *bcp, struct ptc_stats *stat)
  566. {
  567. int tcpu;
  568. struct bau_control *tbcp;
  569. /* let only one cpu do this disabling */
  570. spin_lock(&disable_lock);
  571. if (!baudisabled && bcp->period_requests &&
  572. ((bcp->period_time / bcp->period_requests) > congested_cycles)) {
  573. /* it becomes this cpu's job to turn on the use of the
  574. BAU again */
  575. baudisabled = 1;
  576. bcp->set_bau_off = 1;
  577. bcp->set_bau_on_time = get_cycles() +
  578. sec_2_cycles(bcp->congested_period);
  579. stat->s_bau_disabled++;
  580. for_each_present_cpu(tcpu) {
  581. tbcp = &per_cpu(bau_control, tcpu);
  582. tbcp->baudisabled = 1;
  583. }
  584. }
  585. spin_unlock(&disable_lock);
  586. }
  587. /**
  588. * uv_flush_send_and_wait
  589. *
  590. * Send a broadcast and wait for it to complete.
  591. *
  592. * The flush_mask contains the cpus the broadcast is to be sent to including
  593. * cpus that are on the local uvhub.
  594. *
  595. * Returns 0 if all flushing represented in the mask was done.
  596. * Returns 1 if it gives up entirely and the original cpu mask is to be
  597. * returned to the kernel.
  598. */
  599. int uv_flush_send_and_wait(struct bau_desc *bau_desc,
  600. struct cpumask *flush_mask, struct bau_control *bcp)
  601. {
  602. int right_shift;
  603. int completion_status = 0;
  604. int seq_number = 0;
  605. long try = 0;
  606. int cpu = bcp->uvhub_cpu;
  607. int this_cpu = bcp->cpu;
  608. unsigned long mmr_offset;
  609. unsigned long index;
  610. cycles_t time1;
  611. cycles_t time2;
  612. cycles_t elapsed;
  613. struct ptc_stats *stat = bcp->statp;
  614. struct bau_control *smaster = bcp->socket_master;
  615. struct bau_control *hmaster = bcp->uvhub_master;
  616. if (is_uv1_hub() &&
  617. !atomic_inc_unless_ge(&hmaster->uvhub_lock,
  618. &hmaster->active_descriptor_count,
  619. hmaster->max_bau_concurrent)) {
  620. stat->s_throttles++;
  621. do {
  622. cpu_relax();
  623. } while (!atomic_inc_unless_ge(&hmaster->uvhub_lock,
  624. &hmaster->active_descriptor_count,
  625. hmaster->max_bau_concurrent));
  626. }
  627. while (hmaster->uvhub_quiesce)
  628. cpu_relax();
  629. if (cpu < UV_CPUS_PER_ACT_STATUS) {
  630. mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
  631. right_shift = cpu * UV_ACT_STATUS_SIZE;
  632. } else {
  633. mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
  634. right_shift =
  635. ((cpu - UV_CPUS_PER_ACT_STATUS) * UV_ACT_STATUS_SIZE);
  636. }
  637. time1 = get_cycles();
  638. do {
  639. if (try == 0) {
  640. bau_desc->header.msg_type = MSG_REGULAR;
  641. seq_number = bcp->message_number++;
  642. } else {
  643. bau_desc->header.msg_type = MSG_RETRY;
  644. stat->s_retry_messages++;
  645. }
  646. bau_desc->header.sequence = seq_number;
  647. index = (1UL << UVH_LB_BAU_SB_ACTIVATION_CONTROL_PUSH_SHFT) |
  648. bcp->uvhub_cpu;
  649. bcp->send_message = get_cycles();
  650. uv_write_local_mmr(UVH_LB_BAU_SB_ACTIVATION_CONTROL, index);
  651. try++;
  652. completion_status = uv_wait_completion(bau_desc, mmr_offset,
  653. right_shift, this_cpu, bcp, smaster, try);
  654. if (completion_status == FLUSH_RETRY_PLUGGED) {
  655. destination_plugged(bau_desc, bcp, hmaster, stat);
  656. } else if (completion_status == FLUSH_RETRY_TIMEOUT) {
  657. destination_timeout(bau_desc, bcp, hmaster, stat);
  658. }
  659. if (bcp->ipi_attempts >= bcp->ipi_reset_limit) {
  660. bcp->ipi_attempts = 0;
  661. completion_status = FLUSH_GIVEUP;
  662. break;
  663. }
  664. cpu_relax();
  665. } while ((completion_status == FLUSH_RETRY_PLUGGED) ||
  666. (completion_status == FLUSH_RETRY_TIMEOUT));
  667. time2 = get_cycles();
  668. bcp->plugged_tries = 0;
  669. bcp->timeout_tries = 0;
  670. if ((completion_status == FLUSH_COMPLETE) &&
  671. (bcp->conseccompletes > bcp->complete_threshold) &&
  672. (hmaster->max_bau_concurrent <
  673. hmaster->max_bau_concurrent_constant))
  674. hmaster->max_bau_concurrent++;
  675. while (hmaster->uvhub_quiesce)
  676. cpu_relax();
  677. atomic_dec(&hmaster->active_descriptor_count);
  678. if (time2 > time1) {
  679. elapsed = time2 - time1;
  680. stat->s_time += elapsed;
  681. if ((completion_status == FLUSH_COMPLETE) && (try == 1)) {
  682. bcp->period_requests++;
  683. bcp->period_time += elapsed;
  684. if ((elapsed > congested_cycles) &&
  685. (bcp->period_requests > bcp->congested_reps)) {
  686. disable_for_congestion(bcp, stat);
  687. }
  688. }
  689. } else
  690. stat->s_requestor--;
  691. if (completion_status == FLUSH_COMPLETE && try > 1)
  692. stat->s_retriesok++;
  693. else if (completion_status == FLUSH_GIVEUP) {
  694. stat->s_giveup++;
  695. return 1;
  696. }
  697. return 0;
  698. }
  699. /**
  700. * uv_flush_tlb_others - globally purge translation cache of a virtual
  701. * address or all TLB's
  702. * @cpumask: mask of all cpu's in which the address is to be removed
  703. * @mm: mm_struct containing virtual address range
  704. * @va: virtual address to be removed (or TLB_FLUSH_ALL for all TLB's on cpu)
  705. * @cpu: the current cpu
  706. *
  707. * This is the entry point for initiating any UV global TLB shootdown.
  708. *
  709. * Purges the translation caches of all specified processors of the given
  710. * virtual address, or purges all TLB's on specified processors.
  711. *
  712. * The caller has derived the cpumask from the mm_struct. This function
  713. * is called only if there are bits set in the mask. (e.g. flush_tlb_page())
  714. *
  715. * The cpumask is converted into a uvhubmask of the uvhubs containing
  716. * those cpus.
  717. *
  718. * Note that this function should be called with preemption disabled.
  719. *
  720. * Returns NULL if all remote flushing was done.
  721. * Returns pointer to cpumask if some remote flushing remains to be
  722. * done. The returned pointer is valid till preemption is re-enabled.
  723. */
  724. const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
  725. struct mm_struct *mm,
  726. unsigned long va, unsigned int cpu)
  727. {
  728. int locals = 0;
  729. int remotes = 0;
  730. int hubs = 0;
  731. int tcpu;
  732. int tpnode;
  733. struct bau_desc *bau_desc;
  734. struct cpumask *flush_mask;
  735. struct ptc_stats *stat;
  736. struct bau_control *bcp;
  737. struct bau_control *tbcp;
  738. struct hub_and_pnode *hpp;
  739. /* kernel was booted 'nobau' */
  740. if (nobau)
  741. return cpumask;
  742. bcp = &per_cpu(bau_control, cpu);
  743. stat = bcp->statp;
  744. /* bau was disabled due to slow response */
  745. if (bcp->baudisabled) {
  746. /* the cpu that disabled it must re-enable it */
  747. if (bcp->set_bau_off) {
  748. if (get_cycles() >= bcp->set_bau_on_time) {
  749. stat->s_bau_reenabled++;
  750. baudisabled = 0;
  751. for_each_present_cpu(tcpu) {
  752. tbcp = &per_cpu(bau_control, tcpu);
  753. tbcp->baudisabled = 0;
  754. tbcp->period_requests = 0;
  755. tbcp->period_time = 0;
  756. }
  757. }
  758. }
  759. return cpumask;
  760. }
  761. /*
  762. * Each sending cpu has a per-cpu mask which it fills from the caller's
  763. * cpu mask. All cpus are converted to uvhubs and copied to the
  764. * activation descriptor.
  765. */
  766. flush_mask = (struct cpumask *)per_cpu(uv_flush_tlb_mask, cpu);
  767. /* don't actually do a shootdown of the local cpu */
  768. cpumask_andnot(flush_mask, cpumask, cpumask_of(cpu));
  769. if (cpu_isset(cpu, *cpumask))
  770. stat->s_ntargself++;
  771. bau_desc = bcp->descriptor_base;
  772. bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu;
  773. bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
  774. for_each_cpu(tcpu, flush_mask) {
  775. /*
  776. * The distribution vector is a bit map of pnodes, relative
  777. * to the partition base pnode (and the partition base nasid
  778. * in the header).
  779. * Translate cpu to pnode and hub using an array stored
  780. * in local memory.
  781. */
  782. hpp = &bcp->socket_master->target_hub_and_pnode[tcpu];
  783. tpnode = hpp->pnode - bcp->partition_base_pnode;
  784. bau_uvhub_set(tpnode, &bau_desc->distribution);
  785. if (hpp->uvhub == bcp->uvhub)
  786. locals++;
  787. else
  788. remotes++;
  789. }
  790. if ((locals + remotes) == 0)
  791. return NULL;
  792. stat->s_requestor++;
  793. stat->s_ntargcpu += remotes + locals;
  794. stat->s_ntargremotes += remotes;
  795. stat->s_ntarglocals += locals;
  796. remotes = bau_uvhub_weight(&bau_desc->distribution);
  797. /* uvhub statistics */
  798. hubs = bau_uvhub_weight(&bau_desc->distribution);
  799. if (locals) {
  800. stat->s_ntarglocaluvhub++;
  801. stat->s_ntargremoteuvhub += (hubs - 1);
  802. } else
  803. stat->s_ntargremoteuvhub += hubs;
  804. stat->s_ntarguvhub += hubs;
  805. if (hubs >= 16)
  806. stat->s_ntarguvhub16++;
  807. else if (hubs >= 8)
  808. stat->s_ntarguvhub8++;
  809. else if (hubs >= 4)
  810. stat->s_ntarguvhub4++;
  811. else if (hubs >= 2)
  812. stat->s_ntarguvhub2++;
  813. else
  814. stat->s_ntarguvhub1++;
  815. bau_desc->payload.address = va;
  816. bau_desc->payload.sending_cpu = cpu;
  817. /*
  818. * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
  819. * or 1 if it gave up and the original cpumask should be returned.
  820. */
  821. if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
  822. return NULL;
  823. else
  824. return cpumask;
  825. }
  826. /*
  827. * The BAU message interrupt comes here. (registered by set_intr_gate)
  828. * See entry_64.S
  829. *
  830. * We received a broadcast assist message.
  831. *
  832. * Interrupts are disabled; this interrupt could represent
  833. * the receipt of several messages.
  834. *
  835. * All cores/threads on this hub get this interrupt.
  836. * The last one to see it does the software ack.
  837. * (the resource will not be freed until noninterruptable cpus see this
  838. * interrupt; hardware may timeout the s/w ack and reply ERROR)
  839. */
  840. void uv_bau_message_interrupt(struct pt_regs *regs)
  841. {
  842. int count = 0;
  843. cycles_t time_start;
  844. struct bau_payload_queue_entry *msg;
  845. struct bau_control *bcp;
  846. struct ptc_stats *stat;
  847. struct msg_desc msgdesc;
  848. time_start = get_cycles();
  849. bcp = &per_cpu(bau_control, smp_processor_id());
  850. stat = bcp->statp;
  851. msgdesc.va_queue_first = bcp->va_queue_first;
  852. msgdesc.va_queue_last = bcp->va_queue_last;
  853. msg = bcp->bau_msg_head;
  854. while (msg->sw_ack_vector) {
  855. count++;
  856. msgdesc.msg_slot = msg - msgdesc.va_queue_first;
  857. msgdesc.sw_ack_slot = ffs(msg->sw_ack_vector) - 1;
  858. msgdesc.msg = msg;
  859. uv_bau_process_message(&msgdesc, bcp);
  860. msg++;
  861. if (msg > msgdesc.va_queue_last)
  862. msg = msgdesc.va_queue_first;
  863. bcp->bau_msg_head = msg;
  864. }
  865. stat->d_time += (get_cycles() - time_start);
  866. if (!count)
  867. stat->d_nomsg++;
  868. else if (count > 1)
  869. stat->d_multmsg++;
  870. ack_APIC_irq();
  871. }
  872. /*
  873. * uv_enable_timeouts
  874. *
  875. * Each target uvhub (i.e. a uvhub that has no cpu's) needs to have
  876. * shootdown message timeouts enabled. The timeout does not cause
  877. * an interrupt, but causes an error message to be returned to
  878. * the sender.
  879. */
  880. static void __init uv_enable_timeouts(void)
  881. {
  882. int uvhub;
  883. int nuvhubs;
  884. int pnode;
  885. unsigned long mmr_image;
  886. nuvhubs = uv_num_possible_blades();
  887. for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
  888. if (!uv_blade_nr_possible_cpus(uvhub))
  889. continue;
  890. pnode = uv_blade_to_pnode(uvhub);
  891. mmr_image =
  892. uv_read_global_mmr64(pnode, UVH_LB_BAU_MISC_CONTROL);
  893. /*
  894. * Set the timeout period and then lock it in, in three
  895. * steps; captures and locks in the period.
  896. *
  897. * To program the period, the SOFT_ACK_MODE must be off.
  898. */
  899. mmr_image &= ~((unsigned long)1 <<
  900. UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
  901. uv_write_global_mmr64
  902. (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
  903. /*
  904. * Set the 4-bit period.
  905. */
  906. mmr_image &= ~((unsigned long)0xf <<
  907. UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
  908. mmr_image |= (UV_INTD_SOFT_ACK_TIMEOUT_PERIOD <<
  909. UVH_LB_BAU_MISC_CONTROL_INTD_SOFT_ACK_TIMEOUT_PERIOD_SHFT);
  910. uv_write_global_mmr64
  911. (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
  912. /*
  913. * UV1:
  914. * Subsequent reversals of the timebase bit (3) cause an
  915. * immediate timeout of one or all INTD resources as
  916. * indicated in bits 2:0 (7 causes all of them to timeout).
  917. */
  918. mmr_image |= ((unsigned long)1 <<
  919. UVH_LB_BAU_MISC_CONTROL_ENABLE_INTD_SOFT_ACK_MODE_SHFT);
  920. if (is_uv2_hub()) {
  921. mmr_image |= ((unsigned long)1 << UV2_LEG_SHFT);
  922. mmr_image |= ((unsigned long)1 << UV2_EXT_SHFT);
  923. }
  924. uv_write_global_mmr64
  925. (pnode, UVH_LB_BAU_MISC_CONTROL, mmr_image);
  926. }
  927. }
  928. static void *uv_ptc_seq_start(struct seq_file *file, loff_t *offset)
  929. {
  930. if (*offset < num_possible_cpus())
  931. return offset;
  932. return NULL;
  933. }
  934. static void *uv_ptc_seq_next(struct seq_file *file, void *data, loff_t *offset)
  935. {
  936. (*offset)++;
  937. if (*offset < num_possible_cpus())
  938. return offset;
  939. return NULL;
  940. }
  941. static void uv_ptc_seq_stop(struct seq_file *file, void *data)
  942. {
  943. }
  944. static inline unsigned long long
  945. microsec_2_cycles(unsigned long microsec)
  946. {
  947. unsigned long ns;
  948. unsigned long long cyc;
  949. ns = microsec * 1000;
  950. cyc = (ns << CYC2NS_SCALE_FACTOR)/(per_cpu(cyc2ns, smp_processor_id()));
  951. return cyc;
  952. }
  953. /*
  954. * Display the statistics thru /proc.
  955. * 'data' points to the cpu number
  956. */
  957. static int uv_ptc_seq_show(struct seq_file *file, void *data)
  958. {
  959. struct ptc_stats *stat;
  960. int cpu;
  961. cpu = *(loff_t *)data;
  962. if (!cpu) {
  963. seq_printf(file,
  964. "# cpu sent stime self locals remotes ncpus localhub ");
  965. seq_printf(file,
  966. "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
  967. seq_printf(file,
  968. "numuvhubs4 numuvhubs2 numuvhubs1 dto ");
  969. seq_printf(file,
  970. "retries rok resetp resett giveup sto bz throt ");
  971. seq_printf(file,
  972. "sw_ack recv rtime all ");
  973. seq_printf(file,
  974. "one mult none retry canc nocan reset rcan ");
  975. seq_printf(file,
  976. "disable enable\n");
  977. }
  978. if (cpu < num_possible_cpus() && cpu_online(cpu)) {
  979. stat = &per_cpu(ptcstats, cpu);
  980. /* source side statistics */
  981. seq_printf(file,
  982. "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
  983. cpu, stat->s_requestor, cycles_2_us(stat->s_time),
  984. stat->s_ntargself, stat->s_ntarglocals,
  985. stat->s_ntargremotes, stat->s_ntargcpu,
  986. stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
  987. stat->s_ntarguvhub, stat->s_ntarguvhub16);
  988. seq_printf(file, "%ld %ld %ld %ld %ld ",
  989. stat->s_ntarguvhub8, stat->s_ntarguvhub4,
  990. stat->s_ntarguvhub2, stat->s_ntarguvhub1,
  991. stat->s_dtimeout);
  992. seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
  993. stat->s_retry_messages, stat->s_retriesok,
  994. stat->s_resets_plug, stat->s_resets_timeout,
  995. stat->s_giveup, stat->s_stimeout,
  996. stat->s_busy, stat->s_throttles);
  997. /* destination side statistics */
  998. seq_printf(file,
  999. "%lx %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld ",
  1000. uv_read_global_mmr64(uv_cpu_to_pnode(cpu),
  1001. UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE),
  1002. stat->d_requestee, cycles_2_us(stat->d_time),
  1003. stat->d_alltlb, stat->d_onetlb, stat->d_multmsg,
  1004. stat->d_nomsg, stat->d_retries, stat->d_canceled,
  1005. stat->d_nocanceled, stat->d_resets,
  1006. stat->d_rcanceled);
  1007. seq_printf(file, "%ld %ld\n",
  1008. stat->s_bau_disabled, stat->s_bau_reenabled);
  1009. }
  1010. return 0;
  1011. }
  1012. /*
  1013. * Display the tunables thru debugfs
  1014. */
  1015. static ssize_t tunables_read(struct file *file, char __user *userbuf,
  1016. size_t count, loff_t *ppos)
  1017. {
  1018. char *buf;
  1019. int ret;
  1020. buf = kasprintf(GFP_KERNEL, "%s %s %s\n%d %d %d %d %d %d %d %d %d\n",
  1021. "max_bau_concurrent plugged_delay plugsb4reset",
  1022. "timeoutsb4reset ipi_reset_limit complete_threshold",
  1023. "congested_response_us congested_reps congested_period",
  1024. max_bau_concurrent, plugged_delay, plugsb4reset,
  1025. timeoutsb4reset, ipi_reset_limit, complete_threshold,
  1026. congested_response_us, congested_reps, congested_period);
  1027. if (!buf)
  1028. return -ENOMEM;
  1029. ret = simple_read_from_buffer(userbuf, count, ppos, buf, strlen(buf));
  1030. kfree(buf);
  1031. return ret;
  1032. }
  1033. /*
  1034. * -1: resetf the statistics
  1035. * 0: display meaning of the statistics
  1036. */
  1037. static ssize_t uv_ptc_proc_write(struct file *file, const char __user *user,
  1038. size_t count, loff_t *data)
  1039. {
  1040. int cpu;
  1041. long input_arg;
  1042. char optstr[64];
  1043. struct ptc_stats *stat;
  1044. if (count == 0 || count > sizeof(optstr))
  1045. return -EINVAL;
  1046. if (copy_from_user(optstr, user, count))
  1047. return -EFAULT;
  1048. optstr[count - 1] = '\0';
  1049. if (strict_strtol(optstr, 10, &input_arg) < 0) {
  1050. printk(KERN_DEBUG "%s is invalid\n", optstr);
  1051. return -EINVAL;
  1052. }
  1053. if (input_arg == 0) {
  1054. printk(KERN_DEBUG "# cpu: cpu number\n");
  1055. printk(KERN_DEBUG "Sender statistics:\n");
  1056. printk(KERN_DEBUG
  1057. "sent: number of shootdown messages sent\n");
  1058. printk(KERN_DEBUG
  1059. "stime: time spent sending messages\n");
  1060. printk(KERN_DEBUG
  1061. "numuvhubs: number of hubs targeted with shootdown\n");
  1062. printk(KERN_DEBUG
  1063. "numuvhubs16: number times 16 or more hubs targeted\n");
  1064. printk(KERN_DEBUG
  1065. "numuvhubs8: number times 8 or more hubs targeted\n");
  1066. printk(KERN_DEBUG
  1067. "numuvhubs4: number times 4 or more hubs targeted\n");
  1068. printk(KERN_DEBUG
  1069. "numuvhubs2: number times 2 or more hubs targeted\n");
  1070. printk(KERN_DEBUG
  1071. "numuvhubs1: number times 1 hub targeted\n");
  1072. printk(KERN_DEBUG
  1073. "numcpus: number of cpus targeted with shootdown\n");
  1074. printk(KERN_DEBUG
  1075. "dto: number of destination timeouts\n");
  1076. printk(KERN_DEBUG
  1077. "retries: destination timeout retries sent\n");
  1078. printk(KERN_DEBUG
  1079. "rok: : destination timeouts successfully retried\n");
  1080. printk(KERN_DEBUG
  1081. "resetp: ipi-style resource resets for plugs\n");
  1082. printk(KERN_DEBUG
  1083. "resett: ipi-style resource resets for timeouts\n");
  1084. printk(KERN_DEBUG
  1085. "giveup: fall-backs to ipi-style shootdowns\n");
  1086. printk(KERN_DEBUG
  1087. "sto: number of source timeouts\n");
  1088. printk(KERN_DEBUG
  1089. "bz: number of stay-busy's\n");
  1090. printk(KERN_DEBUG
  1091. "throt: number times spun in throttle\n");
  1092. printk(KERN_DEBUG "Destination side statistics:\n");
  1093. printk(KERN_DEBUG
  1094. "sw_ack: image of UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE\n");
  1095. printk(KERN_DEBUG
  1096. "recv: shootdown messages received\n");
  1097. printk(KERN_DEBUG
  1098. "rtime: time spent processing messages\n");
  1099. printk(KERN_DEBUG
  1100. "all: shootdown all-tlb messages\n");
  1101. printk(KERN_DEBUG
  1102. "one: shootdown one-tlb messages\n");
  1103. printk(KERN_DEBUG
  1104. "mult: interrupts that found multiple messages\n");
  1105. printk(KERN_DEBUG
  1106. "none: interrupts that found no messages\n");
  1107. printk(KERN_DEBUG
  1108. "retry: number of retry messages processed\n");
  1109. printk(KERN_DEBUG
  1110. "canc: number messages canceled by retries\n");
  1111. printk(KERN_DEBUG
  1112. "nocan: number retries that found nothing to cancel\n");
  1113. printk(KERN_DEBUG
  1114. "reset: number of ipi-style reset requests processed\n");
  1115. printk(KERN_DEBUG
  1116. "rcan: number messages canceled by reset requests\n");
  1117. printk(KERN_DEBUG
  1118. "disable: number times use of the BAU was disabled\n");
  1119. printk(KERN_DEBUG
  1120. "enable: number times use of the BAU was re-enabled\n");
  1121. } else if (input_arg == -1) {
  1122. for_each_present_cpu(cpu) {
  1123. stat = &per_cpu(ptcstats, cpu);
  1124. memset(stat, 0, sizeof(struct ptc_stats));
  1125. }
  1126. }
  1127. return count;
  1128. }
  1129. static int local_atoi(const char *name)
  1130. {
  1131. int val = 0;
  1132. for (;; name++) {
  1133. switch (*name) {
  1134. case '0' ... '9':
  1135. val = 10*val+(*name-'0');
  1136. break;
  1137. default:
  1138. return val;
  1139. }
  1140. }
  1141. }
  1142. /*
  1143. * set the tunables
  1144. * 0 values reset them to defaults
  1145. */
  1146. static ssize_t tunables_write(struct file *file, const char __user *user,
  1147. size_t count, loff_t *data)
  1148. {
  1149. int cpu;
  1150. int cnt = 0;
  1151. int val;
  1152. char *p;
  1153. char *q;
  1154. char instr[64];
  1155. struct bau_control *bcp;
  1156. if (count == 0 || count > sizeof(instr)-1)
  1157. return -EINVAL;
  1158. if (copy_from_user(instr, user, count))
  1159. return -EFAULT;
  1160. instr[count] = '\0';
  1161. /* count the fields */
  1162. p = instr + strspn(instr, WHITESPACE);
  1163. q = p;
  1164. for (; *p; p = q + strspn(q, WHITESPACE)) {
  1165. q = p + strcspn(p, WHITESPACE);
  1166. cnt++;
  1167. if (q == p)
  1168. break;
  1169. }
  1170. if (cnt != 9) {
  1171. printk(KERN_INFO "bau tunable error: should be 9 numbers\n");
  1172. return -EINVAL;
  1173. }
  1174. p = instr + strspn(instr, WHITESPACE);
  1175. q = p;
  1176. for (cnt = 0; *p; p = q + strspn(q, WHITESPACE), cnt++) {
  1177. q = p + strcspn(p, WHITESPACE);
  1178. val = local_atoi(p);
  1179. switch (cnt) {
  1180. case 0:
  1181. if (val == 0) {
  1182. max_bau_concurrent = MAX_BAU_CONCURRENT;
  1183. max_bau_concurrent_constant =
  1184. MAX_BAU_CONCURRENT;
  1185. continue;
  1186. }
  1187. bcp = &per_cpu(bau_control, smp_processor_id());
  1188. if (val < 1 || val > bcp->cpus_in_uvhub) {
  1189. printk(KERN_DEBUG
  1190. "Error: BAU max concurrent %d is invalid\n",
  1191. val);
  1192. return -EINVAL;
  1193. }
  1194. max_bau_concurrent = val;
  1195. max_bau_concurrent_constant = val;
  1196. continue;
  1197. case 1:
  1198. if (val == 0)
  1199. plugged_delay = PLUGGED_DELAY;
  1200. else
  1201. plugged_delay = val;
  1202. continue;
  1203. case 2:
  1204. if (val == 0)
  1205. plugsb4reset = PLUGSB4RESET;
  1206. else
  1207. plugsb4reset = val;
  1208. continue;
  1209. case 3:
  1210. if (val == 0)
  1211. timeoutsb4reset = TIMEOUTSB4RESET;
  1212. else
  1213. timeoutsb4reset = val;
  1214. continue;
  1215. case 4:
  1216. if (val == 0)
  1217. ipi_reset_limit = IPI_RESET_LIMIT;
  1218. else
  1219. ipi_reset_limit = val;
  1220. continue;
  1221. case 5:
  1222. if (val == 0)
  1223. complete_threshold = COMPLETE_THRESHOLD;
  1224. else
  1225. complete_threshold = val;
  1226. continue;
  1227. case 6:
  1228. if (val == 0)
  1229. congested_response_us = CONGESTED_RESPONSE_US;
  1230. else
  1231. congested_response_us = val;
  1232. continue;
  1233. case 7:
  1234. if (val == 0)
  1235. congested_reps = CONGESTED_REPS;
  1236. else
  1237. congested_reps = val;
  1238. continue;
  1239. case 8:
  1240. if (val == 0)
  1241. congested_period = CONGESTED_PERIOD;
  1242. else
  1243. congested_period = val;
  1244. continue;
  1245. }
  1246. if (q == p)
  1247. break;
  1248. }
  1249. for_each_present_cpu(cpu) {
  1250. bcp = &per_cpu(bau_control, cpu);
  1251. bcp->max_bau_concurrent = max_bau_concurrent;
  1252. bcp->max_bau_concurrent_constant = max_bau_concurrent;
  1253. bcp->plugged_delay = plugged_delay;
  1254. bcp->plugsb4reset = plugsb4reset;
  1255. bcp->timeoutsb4reset = timeoutsb4reset;
  1256. bcp->ipi_reset_limit = ipi_reset_limit;
  1257. bcp->complete_threshold = complete_threshold;
  1258. bcp->congested_response_us = congested_response_us;
  1259. bcp->congested_reps = congested_reps;
  1260. bcp->congested_period = congested_period;
  1261. }
  1262. return count;
  1263. }
  1264. static const struct seq_operations uv_ptc_seq_ops = {
  1265. .start = uv_ptc_seq_start,
  1266. .next = uv_ptc_seq_next,
  1267. .stop = uv_ptc_seq_stop,
  1268. .show = uv_ptc_seq_show
  1269. };
  1270. static int uv_ptc_proc_open(struct inode *inode, struct file *file)
  1271. {
  1272. return seq_open(file, &uv_ptc_seq_ops);
  1273. }
  1274. static int tunables_open(struct inode *inode, struct file *file)
  1275. {
  1276. return 0;
  1277. }
  1278. static const struct file_operations proc_uv_ptc_operations = {
  1279. .open = uv_ptc_proc_open,
  1280. .read = seq_read,
  1281. .write = uv_ptc_proc_write,
  1282. .llseek = seq_lseek,
  1283. .release = seq_release,
  1284. };
  1285. static const struct file_operations tunables_fops = {
  1286. .open = tunables_open,
  1287. .read = tunables_read,
  1288. .write = tunables_write,
  1289. .llseek = default_llseek,
  1290. };
  1291. static int __init uv_ptc_init(void)
  1292. {
  1293. struct proc_dir_entry *proc_uv_ptc;
  1294. if (!is_uv_system())
  1295. return 0;
  1296. proc_uv_ptc = proc_create(UV_PTC_BASENAME, 0444, NULL,
  1297. &proc_uv_ptc_operations);
  1298. if (!proc_uv_ptc) {
  1299. printk(KERN_ERR "unable to create %s proc entry\n",
  1300. UV_PTC_BASENAME);
  1301. return -EINVAL;
  1302. }
  1303. tunables_dir = debugfs_create_dir(UV_BAU_TUNABLES_DIR, NULL);
  1304. if (!tunables_dir) {
  1305. printk(KERN_ERR "unable to create debugfs directory %s\n",
  1306. UV_BAU_TUNABLES_DIR);
  1307. return -EINVAL;
  1308. }
  1309. tunables_file = debugfs_create_file(UV_BAU_TUNABLES_FILE, 0600,
  1310. tunables_dir, NULL, &tunables_fops);
  1311. if (!tunables_file) {
  1312. printk(KERN_ERR "unable to create debugfs file %s\n",
  1313. UV_BAU_TUNABLES_FILE);
  1314. return -EINVAL;
  1315. }
  1316. return 0;
  1317. }
  1318. /*
  1319. * Initialize the sending side's sending buffers.
  1320. */
  1321. static void
  1322. uv_activation_descriptor_init(int node, int pnode, int base_pnode)
  1323. {
  1324. int i;
  1325. int cpu;
  1326. unsigned long pa;
  1327. unsigned long m;
  1328. unsigned long n;
  1329. struct bau_desc *bau_desc;
  1330. struct bau_desc *bd2;
  1331. struct bau_control *bcp;
  1332. /*
  1333. * each bau_desc is 64 bytes; there are 8 (UV_ITEMS_PER_DESCRIPTOR)
  1334. * per cpu; and one per cpu on the uvhub (UV_ADP_SIZE)
  1335. */
  1336. bau_desc = kmalloc_node(sizeof(struct bau_desc) * UV_ADP_SIZE
  1337. * UV_ITEMS_PER_DESCRIPTOR, GFP_KERNEL, node);
  1338. BUG_ON(!bau_desc);
  1339. pa = uv_gpa(bau_desc); /* need the real nasid*/
  1340. n = pa >> uv_nshift;
  1341. m = pa & uv_mmask;
  1342. /* the 14-bit pnode */
  1343. uv_write_global_mmr64(pnode, UVH_LB_BAU_SB_DESCRIPTOR_BASE,
  1344. (n << UV_DESC_BASE_PNODE_SHIFT | m));
  1345. /*
  1346. * Initializing all 8 (UV_ITEMS_PER_DESCRIPTOR) descriptors for each
  1347. * cpu even though we only use the first one; one descriptor can
  1348. * describe a broadcast to 256 uv hubs.
  1349. */
  1350. for (i = 0, bd2 = bau_desc; i < (UV_ADP_SIZE*UV_ITEMS_PER_DESCRIPTOR);
  1351. i++, bd2++) {
  1352. memset(bd2, 0, sizeof(struct bau_desc));
  1353. bd2->header.sw_ack_flag = 1;
  1354. /*
  1355. * The base_dest_nasid set in the message header is the nasid
  1356. * of the first uvhub in the partition. The bit map will
  1357. * indicate destination pnode numbers relative to that base.
  1358. * They may not be consecutive if nasid striding is being used.
  1359. */
  1360. bd2->header.base_dest_nasid = UV_PNODE_TO_NASID(base_pnode);
  1361. bd2->header.dest_subnodeid = UV_LB_SUBNODEID;
  1362. bd2->header.command = UV_NET_ENDPOINT_INTD;
  1363. bd2->header.int_both = 1;
  1364. /*
  1365. * all others need to be set to zero:
  1366. * fairness chaining multilevel count replied_to
  1367. */
  1368. }
  1369. for_each_present_cpu(cpu) {
  1370. if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
  1371. continue;
  1372. bcp = &per_cpu(bau_control, cpu);
  1373. bcp->descriptor_base = bau_desc;
  1374. }
  1375. }
  1376. /*
  1377. * initialize the destination side's receiving buffers
  1378. * entered for each uvhub in the partition
  1379. * - node is first node (kernel memory notion) on the uvhub
  1380. * - pnode is the uvhub's physical identifier
  1381. */
  1382. static void
  1383. uv_payload_queue_init(int node, int pnode)
  1384. {
  1385. int pn;
  1386. int cpu;
  1387. char *cp;
  1388. unsigned long pa;
  1389. struct bau_payload_queue_entry *pqp;
  1390. struct bau_payload_queue_entry *pqp_malloc;
  1391. struct bau_control *bcp;
  1392. pqp = kmalloc_node((DEST_Q_SIZE + 1)
  1393. * sizeof(struct bau_payload_queue_entry),
  1394. GFP_KERNEL, node);
  1395. BUG_ON(!pqp);
  1396. pqp_malloc = pqp;
  1397. cp = (char *)pqp + 31;
  1398. pqp = (struct bau_payload_queue_entry *)(((unsigned long)cp >> 5) << 5);
  1399. for_each_present_cpu(cpu) {
  1400. if (pnode != uv_cpu_to_pnode(cpu))
  1401. continue;
  1402. /* for every cpu on this pnode: */
  1403. bcp = &per_cpu(bau_control, cpu);
  1404. bcp->va_queue_first = pqp;
  1405. bcp->bau_msg_head = pqp;
  1406. bcp->va_queue_last = pqp + (DEST_Q_SIZE - 1);
  1407. }
  1408. /*
  1409. * need the pnode of where the memory was really allocated
  1410. */
  1411. pa = uv_gpa(pqp);
  1412. pn = pa >> uv_nshift;
  1413. uv_write_global_mmr64(pnode,
  1414. UVH_LB_BAU_INTD_PAYLOAD_QUEUE_FIRST,
  1415. ((unsigned long)pn << UV_PAYLOADQ_PNODE_SHIFT) |
  1416. uv_physnodeaddr(pqp));
  1417. uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_TAIL,
  1418. uv_physnodeaddr(pqp));
  1419. uv_write_global_mmr64(pnode, UVH_LB_BAU_INTD_PAYLOAD_QUEUE_LAST,
  1420. (unsigned long)
  1421. uv_physnodeaddr(pqp + (DEST_Q_SIZE - 1)));
  1422. /* in effect, all msg_type's are set to MSG_NOOP */
  1423. memset(pqp, 0, sizeof(struct bau_payload_queue_entry) * DEST_Q_SIZE);
  1424. }
  1425. /*
  1426. * Initialization of each UV hub's structures
  1427. */
  1428. static void __init uv_init_uvhub(int uvhub, int vector, int base_pnode)
  1429. {
  1430. int node;
  1431. int pnode;
  1432. unsigned long apicid;
  1433. node = uvhub_to_first_node(uvhub);
  1434. pnode = uv_blade_to_pnode(uvhub);
  1435. uv_activation_descriptor_init(node, pnode, base_pnode);
  1436. uv_payload_queue_init(node, pnode);
  1437. /*
  1438. * The below initialization can't be in firmware because the
  1439. * messaging IRQ will be determined by the OS.
  1440. */
  1441. apicid = uvhub_to_first_apicid(uvhub) | uv_apicid_hibits;
  1442. uv_write_global_mmr64(pnode, UVH_BAU_DATA_CONFIG,
  1443. ((apicid << 32) | vector));
  1444. }
  1445. /*
  1446. * We will set BAU_MISC_CONTROL with a timeout period.
  1447. * But the BIOS has set UVH_AGING_PRESCALE_SEL and UVH_TRANSACTION_TIMEOUT.
  1448. * So the destination timeout period has be be calculated from them.
  1449. */
  1450. static int
  1451. calculate_destination_timeout(void)
  1452. {
  1453. unsigned long mmr_image;
  1454. int mult1;
  1455. int mult2;
  1456. int index;
  1457. int base;
  1458. int ret;
  1459. unsigned long ts_ns;
  1460. if (is_uv1_hub()) {
  1461. mult1 = UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD &
  1462. BAU_MISC_CONTROL_MULT_MASK;
  1463. mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
  1464. index = (mmr_image >> BAU_URGENCY_7_SHIFT) & BAU_URGENCY_7_MASK;
  1465. mmr_image = uv_read_local_mmr(UVH_TRANSACTION_TIMEOUT);
  1466. mult2 = (mmr_image >> BAU_TRANS_SHIFT) & BAU_TRANS_MASK;
  1467. base = timeout_base_ns[index];
  1468. ts_ns = base * mult1 * mult2;
  1469. ret = ts_ns / 1000;
  1470. } else {
  1471. /* 4 bits 0/1 for 10/80us, 3 bits of multiplier */
  1472. mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
  1473. mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
  1474. if (mmr_image & ((unsigned long)1 << UV2_ACK_UNITS_SHFT))
  1475. mult1 = 80;
  1476. else
  1477. mult1 = 10;
  1478. base = mmr_image & UV2_ACK_MASK;
  1479. ret = mult1 * base;
  1480. }
  1481. return ret;
  1482. }
  1483. /*
  1484. * initialize the bau_control structure for each cpu
  1485. */
  1486. static int __init uv_init_per_cpu(int nuvhubs, int base_part_pnode)
  1487. {
  1488. int i;
  1489. int cpu;
  1490. int tcpu;
  1491. int pnode;
  1492. int uvhub;
  1493. int have_hmaster;
  1494. short socket = 0;
  1495. unsigned short socket_mask;
  1496. unsigned char *uvhub_mask;
  1497. struct bau_control *bcp;
  1498. struct uvhub_desc *bdp;
  1499. struct socket_desc *sdp;
  1500. struct bau_control *hmaster = NULL;
  1501. struct bau_control *smaster = NULL;
  1502. struct socket_desc {
  1503. short num_cpus;
  1504. short cpu_number[MAX_CPUS_PER_SOCKET];
  1505. };
  1506. struct uvhub_desc {
  1507. unsigned short socket_mask;
  1508. short num_cpus;
  1509. short uvhub;
  1510. short pnode;
  1511. struct socket_desc socket[2];
  1512. };
  1513. struct uvhub_desc *uvhub_descs;
  1514. timeout_us = calculate_destination_timeout();
  1515. uvhub_descs = kmalloc(nuvhubs * sizeof(struct uvhub_desc), GFP_KERNEL);
  1516. memset(uvhub_descs, 0, nuvhubs * sizeof(struct uvhub_desc));
  1517. uvhub_mask = kzalloc((nuvhubs+7)/8, GFP_KERNEL);
  1518. for_each_present_cpu(cpu) {
  1519. bcp = &per_cpu(bau_control, cpu);
  1520. memset(bcp, 0, sizeof(struct bau_control));
  1521. pnode = uv_cpu_hub_info(cpu)->pnode;
  1522. if ((pnode - base_part_pnode) >= UV_DISTRIBUTION_SIZE) {
  1523. printk(KERN_EMERG
  1524. "cpu %d pnode %d-%d beyond %d; BAU disabled\n",
  1525. cpu, pnode, base_part_pnode,
  1526. UV_DISTRIBUTION_SIZE);
  1527. return 1;
  1528. }
  1529. bcp->osnode = cpu_to_node(cpu);
  1530. bcp->partition_base_pnode = uv_partition_base_pnode;
  1531. uvhub = uv_cpu_hub_info(cpu)->numa_blade_id;
  1532. *(uvhub_mask + (uvhub/8)) |= (1 << (uvhub%8));
  1533. bdp = &uvhub_descs[uvhub];
  1534. bdp->num_cpus++;
  1535. bdp->uvhub = uvhub;
  1536. bdp->pnode = pnode;
  1537. /* kludge: 'assuming' one node per socket, and assuming that
  1538. disabling a socket just leaves a gap in node numbers */
  1539. socket = bcp->osnode & 1;
  1540. bdp->socket_mask |= (1 << socket);
  1541. sdp = &bdp->socket[socket];
  1542. sdp->cpu_number[sdp->num_cpus] = cpu;
  1543. sdp->num_cpus++;
  1544. if (sdp->num_cpus > MAX_CPUS_PER_SOCKET) {
  1545. printk(KERN_EMERG "%d cpus per socket invalid\n", sdp->num_cpus);
  1546. return 1;
  1547. }
  1548. }
  1549. for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
  1550. if (!(*(uvhub_mask + (uvhub/8)) & (1 << (uvhub%8))))
  1551. continue;
  1552. have_hmaster = 0;
  1553. bdp = &uvhub_descs[uvhub];
  1554. socket_mask = bdp->socket_mask;
  1555. socket = 0;
  1556. while (socket_mask) {
  1557. if (!(socket_mask & 1))
  1558. goto nextsocket;
  1559. sdp = &bdp->socket[socket];
  1560. for (i = 0; i < sdp->num_cpus; i++) {
  1561. cpu = sdp->cpu_number[i];
  1562. bcp = &per_cpu(bau_control, cpu);
  1563. bcp->cpu = cpu;
  1564. if (i == 0) {
  1565. smaster = bcp;
  1566. if (!have_hmaster) {
  1567. have_hmaster++;
  1568. hmaster = bcp;
  1569. }
  1570. }
  1571. bcp->cpus_in_uvhub = bdp->num_cpus;
  1572. bcp->cpus_in_socket = sdp->num_cpus;
  1573. bcp->socket_master = smaster;
  1574. bcp->uvhub = bdp->uvhub;
  1575. bcp->uvhub_master = hmaster;
  1576. bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->
  1577. blade_processor_id;
  1578. if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
  1579. printk(KERN_EMERG
  1580. "%d cpus per uvhub invalid\n",
  1581. bcp->uvhub_cpu);
  1582. return 1;
  1583. }
  1584. }
  1585. nextsocket:
  1586. socket++;
  1587. socket_mask = (socket_mask >> 1);
  1588. /* each socket gets a local array of pnodes/hubs */
  1589. bcp = smaster;
  1590. bcp->target_hub_and_pnode = kmalloc_node(
  1591. sizeof(struct hub_and_pnode) *
  1592. num_possible_cpus(), GFP_KERNEL, bcp->osnode);
  1593. memset(bcp->target_hub_and_pnode, 0,
  1594. sizeof(struct hub_and_pnode) *
  1595. num_possible_cpus());
  1596. for_each_present_cpu(tcpu) {
  1597. bcp->target_hub_and_pnode[tcpu].pnode =
  1598. uv_cpu_hub_info(tcpu)->pnode;
  1599. bcp->target_hub_and_pnode[tcpu].uvhub =
  1600. uv_cpu_hub_info(tcpu)->numa_blade_id;
  1601. }
  1602. }
  1603. }
  1604. kfree(uvhub_descs);
  1605. kfree(uvhub_mask);
  1606. for_each_present_cpu(cpu) {
  1607. bcp = &per_cpu(bau_control, cpu);
  1608. bcp->baudisabled = 0;
  1609. bcp->statp = &per_cpu(ptcstats, cpu);
  1610. /* time interval to catch a hardware stay-busy bug */
  1611. bcp->timeout_interval = microsec_2_cycles(2*timeout_us);
  1612. bcp->max_bau_concurrent = max_bau_concurrent;
  1613. bcp->max_bau_concurrent_constant = max_bau_concurrent;
  1614. bcp->plugged_delay = plugged_delay;
  1615. bcp->plugsb4reset = plugsb4reset;
  1616. bcp->timeoutsb4reset = timeoutsb4reset;
  1617. bcp->ipi_reset_limit = ipi_reset_limit;
  1618. bcp->complete_threshold = complete_threshold;
  1619. bcp->congested_response_us = congested_response_us;
  1620. bcp->congested_reps = congested_reps;
  1621. bcp->congested_period = congested_period;
  1622. }
  1623. return 0;
  1624. }
  1625. /*
  1626. * Initialization of BAU-related structures
  1627. */
  1628. static int __init uv_bau_init(void)
  1629. {
  1630. int uvhub;
  1631. int pnode;
  1632. int nuvhubs;
  1633. int cur_cpu;
  1634. int vector;
  1635. unsigned long mmr;
  1636. if (!is_uv_system())
  1637. return 0;
  1638. if (nobau)
  1639. return 0;
  1640. for_each_possible_cpu(cur_cpu)
  1641. zalloc_cpumask_var_node(&per_cpu(uv_flush_tlb_mask, cur_cpu),
  1642. GFP_KERNEL, cpu_to_node(cur_cpu));
  1643. uv_nshift = uv_hub_info->m_val;
  1644. uv_mmask = (1UL << uv_hub_info->m_val) - 1;
  1645. nuvhubs = uv_num_possible_blades();
  1646. spin_lock_init(&disable_lock);
  1647. congested_cycles = microsec_2_cycles(congested_response_us);
  1648. uv_partition_base_pnode = 0x7fffffff;
  1649. for (uvhub = 0; uvhub < nuvhubs; uvhub++) {
  1650. if (uv_blade_nr_possible_cpus(uvhub) &&
  1651. (uv_blade_to_pnode(uvhub) < uv_partition_base_pnode))
  1652. uv_partition_base_pnode = uv_blade_to_pnode(uvhub);
  1653. }
  1654. if (uv_init_per_cpu(nuvhubs, uv_partition_base_pnode)) {
  1655. nobau = 1;
  1656. return 0;
  1657. }
  1658. vector = UV_BAU_MESSAGE;
  1659. for_each_possible_blade(uvhub)
  1660. if (uv_blade_nr_possible_cpus(uvhub))
  1661. uv_init_uvhub(uvhub, vector, uv_partition_base_pnode);
  1662. uv_enable_timeouts();
  1663. alloc_intr_gate(vector, uv_bau_message_intr1);
  1664. for_each_possible_blade(uvhub) {
  1665. if (uv_blade_nr_possible_cpus(uvhub)) {
  1666. pnode = uv_blade_to_pnode(uvhub);
  1667. /* INIT the bau */
  1668. uv_write_global_mmr64(pnode,
  1669. UVH_LB_BAU_SB_ACTIVATION_CONTROL,
  1670. ((unsigned long)1 << 63));
  1671. mmr = 1; /* should be 1 to broadcast to both sockets */
  1672. uv_write_global_mmr64(pnode, UVH_BAU_DATA_BROADCAST,
  1673. mmr);
  1674. }
  1675. }
  1676. return 0;
  1677. }
  1678. core_initcall(uv_bau_init);
  1679. fs_initcall(uv_ptc_init);