xpc_main.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
  7. */
  8. /*
  9. * Cross Partition Communication (XPC) support - standard version.
  10. *
  11. * XPC provides a message passing capability that crosses partition
  12. * boundaries. This module is made up of two parts:
  13. *
  14. * partition This part detects the presence/absence of other
  15. * partitions. It provides a heartbeat and monitors
  16. * the heartbeats of other partitions.
  17. *
  18. * channel This part manages the channels and sends/receives
  19. * messages across them to/from other partitions.
  20. *
  21. * There are a couple of additional functions residing in XP, which
  22. * provide an interface to XPC for its users.
  23. *
  24. *
  25. * Caveats:
  26. *
  27. * . Currently on sn2, we have no way to determine which nasid an IRQ
  28. * came from. Thus, xpc_send_IRQ_sn2() does a remote amo write
  29. * followed by an IPI. The amo indicates where data is to be pulled
  30. * from, so after the IPI arrives, the remote partition checks the amo
  31. * word. The IPI can actually arrive before the amo however, so other
  32. * code must periodically check for this case. Also, remote amo
  33. * operations do not reliably time out. Thus we do a remote PIO read
  34. * solely to know whether the remote partition is down and whether we
  35. * should stop sending IPIs to it. This remote PIO read operation is
  36. * set up in a special nofault region so SAL knows to ignore (and
  37. * cleanup) any errors due to the remote amo write, PIO read, and/or
  38. * PIO write operations.
  39. *
  40. * If/when new hardware solves this IPI problem, we should abandon
  41. * the current approach.
  42. *
  43. */
  44. #include <linux/module.h>
  45. #include <linux/sysctl.h>
  46. #include <linux/device.h>
  47. #include <linux/delay.h>
  48. #include <linux/reboot.h>
  49. #include <linux/kdebug.h>
  50. #include <linux/kthread.h>
  51. #include "xpc.h"
  52. /* define two XPC debug device structures to be used with dev_dbg() et al */
  53. struct device_driver xpc_dbg_name = {
  54. .name = "xpc"
  55. };
  56. struct device xpc_part_dbg_subname = {
  57. .bus_id = {0}, /* set to "part" at xpc_init() time */
  58. .driver = &xpc_dbg_name
  59. };
  60. struct device xpc_chan_dbg_subname = {
  61. .bus_id = {0}, /* set to "chan" at xpc_init() time */
  62. .driver = &xpc_dbg_name
  63. };
  64. struct device *xpc_part = &xpc_part_dbg_subname;
  65. struct device *xpc_chan = &xpc_chan_dbg_subname;
  66. static int xpc_kdebug_ignore;
  67. /* systune related variables for /proc/sys directories */
  68. static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
  69. static int xpc_hb_min_interval = 1;
  70. static int xpc_hb_max_interval = 10;
  71. static int xpc_hb_check_interval = XPC_HB_CHECK_DEFAULT_INTERVAL;
  72. static int xpc_hb_check_min_interval = 10;
  73. static int xpc_hb_check_max_interval = 120;
  74. int xpc_disengage_timelimit = XPC_DISENGAGE_DEFAULT_TIMELIMIT;
  75. static int xpc_disengage_min_timelimit; /* = 0 */
  76. static int xpc_disengage_max_timelimit = 120;
  77. static ctl_table xpc_sys_xpc_hb_dir[] = {
  78. {
  79. .ctl_name = CTL_UNNUMBERED,
  80. .procname = "hb_interval",
  81. .data = &xpc_hb_interval,
  82. .maxlen = sizeof(int),
  83. .mode = 0644,
  84. .proc_handler = &proc_dointvec_minmax,
  85. .strategy = &sysctl_intvec,
  86. .extra1 = &xpc_hb_min_interval,
  87. .extra2 = &xpc_hb_max_interval},
  88. {
  89. .ctl_name = CTL_UNNUMBERED,
  90. .procname = "hb_check_interval",
  91. .data = &xpc_hb_check_interval,
  92. .maxlen = sizeof(int),
  93. .mode = 0644,
  94. .proc_handler = &proc_dointvec_minmax,
  95. .strategy = &sysctl_intvec,
  96. .extra1 = &xpc_hb_check_min_interval,
  97. .extra2 = &xpc_hb_check_max_interval},
  98. {}
  99. };
  100. static ctl_table xpc_sys_xpc_dir[] = {
  101. {
  102. .ctl_name = CTL_UNNUMBERED,
  103. .procname = "hb",
  104. .mode = 0555,
  105. .child = xpc_sys_xpc_hb_dir},
  106. {
  107. .ctl_name = CTL_UNNUMBERED,
  108. .procname = "disengage_timelimit",
  109. .data = &xpc_disengage_timelimit,
  110. .maxlen = sizeof(int),
  111. .mode = 0644,
  112. .proc_handler = &proc_dointvec_minmax,
  113. .strategy = &sysctl_intvec,
  114. .extra1 = &xpc_disengage_min_timelimit,
  115. .extra2 = &xpc_disengage_max_timelimit},
  116. {}
  117. };
  118. static ctl_table xpc_sys_dir[] = {
  119. {
  120. .ctl_name = CTL_UNNUMBERED,
  121. .procname = "xpc",
  122. .mode = 0555,
  123. .child = xpc_sys_xpc_dir},
  124. {}
  125. };
  126. static struct ctl_table_header *xpc_sysctl;
  127. /* non-zero if any remote partition disengage was timed out */
  128. int xpc_disengage_timedout;
  129. /* #of activate IRQs received */
  130. atomic_t xpc_activate_IRQ_rcvd = ATOMIC_INIT(0);
  131. /* IRQ handler notifies this wait queue on receipt of an IRQ */
  132. DECLARE_WAIT_QUEUE_HEAD(xpc_activate_IRQ_wq);
  133. static unsigned long xpc_hb_check_timeout;
  134. static struct timer_list xpc_hb_timer;
  135. void *xpc_heartbeating_to_mask;
  136. /* notification that the xpc_hb_checker thread has exited */
  137. static DECLARE_COMPLETION(xpc_hb_checker_exited);
  138. /* notification that the xpc_discovery thread has exited */
  139. static DECLARE_COMPLETION(xpc_discovery_exited);
  140. static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
  141. static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
  142. static struct notifier_block xpc_reboot_notifier = {
  143. .notifier_call = xpc_system_reboot,
  144. };
  145. static int xpc_system_die(struct notifier_block *, unsigned long, void *);
  146. static struct notifier_block xpc_die_notifier = {
  147. .notifier_call = xpc_system_die,
  148. };
  149. enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie,
  150. unsigned long *rp_pa,
  151. size_t *len);
  152. enum xp_retval (*xpc_rsvd_page_init) (struct xpc_rsvd_page *rp);
  153. void (*xpc_heartbeat_init) (void);
  154. void (*xpc_heartbeat_exit) (void);
  155. void (*xpc_increment_heartbeat) (void);
  156. void (*xpc_offline_heartbeat) (void);
  157. void (*xpc_online_heartbeat) (void);
  158. void (*xpc_check_remote_hb) (void);
  159. enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part);
  160. void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch);
  161. u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part);
  162. enum xp_retval (*xpc_allocate_msgqueues) (struct xpc_channel *ch);
  163. void (*xpc_free_msgqueues) (struct xpc_channel *ch);
  164. void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number);
  165. int (*xpc_n_of_deliverable_msgs) (struct xpc_channel *ch);
  166. struct xpc_msg *(*xpc_get_deliverable_msg) (struct xpc_channel *ch);
  167. void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp,
  168. unsigned long remote_rp_pa,
  169. int nasid);
  170. void (*xpc_request_partition_reactivation) (struct xpc_partition *part);
  171. void (*xpc_request_partition_deactivation) (struct xpc_partition *part);
  172. void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part);
  173. void (*xpc_process_activate_IRQ_rcvd) (int n_IRQs_expected);
  174. enum xp_retval (*xpc_setup_infrastructure) (struct xpc_partition *part);
  175. void (*xpc_teardown_infrastructure) (struct xpc_partition *part);
  176. void (*xpc_indicate_partition_engaged) (struct xpc_partition *part);
  177. int (*xpc_partition_engaged) (short partid);
  178. int (*xpc_any_partition_engaged) (void);
  179. void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part);
  180. void (*xpc_assume_partition_disengaged) (short partid);
  181. void (*xpc_send_chctl_closerequest) (struct xpc_channel *ch,
  182. unsigned long *irq_flags);
  183. void (*xpc_send_chctl_closereply) (struct xpc_channel *ch,
  184. unsigned long *irq_flags);
  185. void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch,
  186. unsigned long *irq_flags);
  187. void (*xpc_send_chctl_openreply) (struct xpc_channel *ch,
  188. unsigned long *irq_flags);
  189. enum xp_retval (*xpc_send_msg) (struct xpc_channel *ch, u32 flags,
  190. void *payload, u16 payload_size, u8 notify_type,
  191. xpc_notify_func func, void *key);
  192. void (*xpc_received_msg) (struct xpc_channel *ch, struct xpc_msg *msg);
  193. /*
  194. * Timer function to enforce the timelimit on the partition disengage.
  195. */
  196. static void
  197. xpc_timeout_partition_disengage(unsigned long data)
  198. {
  199. struct xpc_partition *part = (struct xpc_partition *)data;
  200. DBUG_ON(time_is_after_jiffies(part->disengage_timeout));
  201. (void)xpc_partition_disengaged(part);
  202. DBUG_ON(part->disengage_timeout != 0);
  203. DBUG_ON(xpc_partition_engaged(XPC_PARTID(part)));
  204. }
  205. /*
  206. * Timer to produce the heartbeat. The timer structures function is
  207. * already set when this is initially called. A tunable is used to
  208. * specify when the next timeout should occur.
  209. */
  210. static void
  211. xpc_hb_beater(unsigned long dummy)
  212. {
  213. xpc_increment_heartbeat();
  214. if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
  215. wake_up_interruptible(&xpc_activate_IRQ_wq);
  216. xpc_hb_timer.expires = jiffies + (xpc_hb_interval * HZ);
  217. add_timer(&xpc_hb_timer);
  218. }
  219. static void
  220. xpc_start_hb_beater(void)
  221. {
  222. xpc_heartbeat_init();
  223. init_timer(&xpc_hb_timer);
  224. xpc_hb_timer.function = xpc_hb_beater;
  225. xpc_hb_beater(0);
  226. }
  227. static void
  228. xpc_stop_hb_beater(void)
  229. {
  230. del_timer_sync(&xpc_hb_timer);
  231. xpc_heartbeat_exit();
  232. }
  233. /*
  234. * This thread is responsible for nearly all of the partition
  235. * activation/deactivation.
  236. */
  237. static int
  238. xpc_hb_checker(void *ignore)
  239. {
  240. int last_IRQ_count = 0;
  241. int new_IRQ_count;
  242. int force_IRQ = 0;
  243. /* this thread was marked active by xpc_hb_init() */
  244. set_cpus_allowed_ptr(current, &cpumask_of_cpu(XPC_HB_CHECK_CPU));
  245. /* set our heartbeating to other partitions into motion */
  246. xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
  247. xpc_start_hb_beater();
  248. while (!xpc_exiting) {
  249. dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
  250. "been received\n",
  251. (int)(xpc_hb_check_timeout - jiffies),
  252. atomic_read(&xpc_activate_IRQ_rcvd) - last_IRQ_count);
  253. /* checking of remote heartbeats is skewed by IRQ handling */
  254. if (time_is_before_eq_jiffies(xpc_hb_check_timeout)) {
  255. dev_dbg(xpc_part, "checking remote heartbeats\n");
  256. xpc_check_remote_hb();
  257. /*
  258. * We need to periodically recheck to ensure no
  259. * IRQ/amo pairs have been missed. That check
  260. * must always reset xpc_hb_check_timeout.
  261. */
  262. force_IRQ = 1;
  263. }
  264. /* check for outstanding IRQs */
  265. new_IRQ_count = atomic_read(&xpc_activate_IRQ_rcvd);
  266. if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
  267. force_IRQ = 0;
  268. dev_dbg(xpc_part, "found an IRQ to process; will be "
  269. "resetting xpc_hb_check_timeout\n");
  270. xpc_process_activate_IRQ_rcvd(new_IRQ_count -
  271. last_IRQ_count);
  272. last_IRQ_count = new_IRQ_count;
  273. xpc_hb_check_timeout = jiffies +
  274. (xpc_hb_check_interval * HZ);
  275. }
  276. /* wait for IRQ or timeout */
  277. (void)wait_event_interruptible(xpc_activate_IRQ_wq,
  278. (last_IRQ_count < atomic_read(
  279. &xpc_activate_IRQ_rcvd)
  280. || time_is_before_eq_jiffies(
  281. xpc_hb_check_timeout) ||
  282. xpc_exiting));
  283. }
  284. xpc_stop_hb_beater();
  285. dev_dbg(xpc_part, "heartbeat checker is exiting\n");
  286. /* mark this thread as having exited */
  287. complete(&xpc_hb_checker_exited);
  288. return 0;
  289. }
  290. /*
  291. * This thread will attempt to discover other partitions to activate
  292. * based on info provided by SAL. This new thread is short lived and
  293. * will exit once discovery is complete.
  294. */
  295. static int
  296. xpc_initiate_discovery(void *ignore)
  297. {
  298. xpc_discovery();
  299. dev_dbg(xpc_part, "discovery thread is exiting\n");
  300. /* mark this thread as having exited */
  301. complete(&xpc_discovery_exited);
  302. return 0;
  303. }
  304. /*
  305. * The first kthread assigned to a newly activated partition is the one
  306. * created by XPC HB with which it calls xpc_activating(). XPC hangs on to
  307. * that kthread until the partition is brought down, at which time that kthread
  308. * returns back to XPC HB. (The return of that kthread will signify to XPC HB
  309. * that XPC has dismantled all communication infrastructure for the associated
  310. * partition.) This kthread becomes the channel manager for that partition.
  311. *
  312. * Each active partition has a channel manager, who, besides connecting and
  313. * disconnecting channels, will ensure that each of the partition's connected
  314. * channels has the required number of assigned kthreads to get the work done.
  315. */
  316. static void
  317. xpc_channel_mgr(struct xpc_partition *part)
  318. {
  319. while (part->act_state != XPC_P_DEACTIVATING ||
  320. atomic_read(&part->nchannels_active) > 0 ||
  321. !xpc_partition_disengaged(part)) {
  322. xpc_process_sent_chctl_flags(part);
  323. /*
  324. * Wait until we've been requested to activate kthreads or
  325. * all of the channel's message queues have been torn down or
  326. * a signal is pending.
  327. *
  328. * The channel_mgr_requests is set to 1 after being awakened,
  329. * This is done to prevent the channel mgr from making one pass
  330. * through the loop for each request, since he will
  331. * be servicing all the requests in one pass. The reason it's
  332. * set to 1 instead of 0 is so that other kthreads will know
  333. * that the channel mgr is running and won't bother trying to
  334. * wake him up.
  335. */
  336. atomic_dec(&part->channel_mgr_requests);
  337. (void)wait_event_interruptible(part->channel_mgr_wq,
  338. (atomic_read(&part->channel_mgr_requests) > 0 ||
  339. part->chctl.all_flags != 0 ||
  340. (part->act_state == XPC_P_DEACTIVATING &&
  341. atomic_read(&part->nchannels_active) == 0 &&
  342. xpc_partition_disengaged(part))));
  343. atomic_set(&part->channel_mgr_requests, 1);
  344. }
  345. }
  346. /*
  347. * When XPC HB determines that a partition has come up, it will create a new
  348. * kthread and that kthread will call this function to attempt to set up the
  349. * basic infrastructure used for Cross Partition Communication with the newly
  350. * upped partition.
  351. *
  352. * The kthread that was created by XPC HB and which setup the XPC
  353. * infrastructure will remain assigned to the partition becoming the channel
  354. * manager for that partition until the partition is deactivating, at which
  355. * time the kthread will teardown the XPC infrastructure and then exit.
  356. */
  357. static int
  358. xpc_activating(void *__partid)
  359. {
  360. short partid = (u64)__partid;
  361. struct xpc_partition *part = &xpc_partitions[partid];
  362. unsigned long irq_flags;
  363. DBUG_ON(partid < 0 || partid >= xp_max_npartitions);
  364. spin_lock_irqsave(&part->act_lock, irq_flags);
  365. if (part->act_state == XPC_P_DEACTIVATING) {
  366. part->act_state = XPC_P_INACTIVE;
  367. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  368. part->remote_rp_pa = 0;
  369. return 0;
  370. }
  371. /* indicate the thread is activating */
  372. DBUG_ON(part->act_state != XPC_P_ACTIVATION_REQ);
  373. part->act_state = XPC_P_ACTIVATING;
  374. XPC_SET_REASON(part, 0, 0);
  375. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  376. dev_dbg(xpc_part, "activating partition %d\n", partid);
  377. xpc_allow_hb(partid);
  378. if (xpc_setup_infrastructure(part) == xpSuccess) {
  379. (void)xpc_part_ref(part); /* this will always succeed */
  380. if (xpc_make_first_contact(part) == xpSuccess) {
  381. xpc_mark_partition_active(part);
  382. xpc_channel_mgr(part);
  383. /* won't return until partition is deactivating */
  384. }
  385. xpc_part_deref(part);
  386. xpc_teardown_infrastructure(part);
  387. }
  388. xpc_disallow_hb(partid);
  389. xpc_mark_partition_inactive(part);
  390. if (part->reason == xpReactivating) {
  391. /* interrupting ourselves results in activating partition */
  392. xpc_request_partition_reactivation(part);
  393. }
  394. return 0;
  395. }
  396. void
  397. xpc_activate_partition(struct xpc_partition *part)
  398. {
  399. short partid = XPC_PARTID(part);
  400. unsigned long irq_flags;
  401. struct task_struct *kthread;
  402. spin_lock_irqsave(&part->act_lock, irq_flags);
  403. DBUG_ON(part->act_state != XPC_P_INACTIVE);
  404. part->act_state = XPC_P_ACTIVATION_REQ;
  405. XPC_SET_REASON(part, xpCloneKThread, __LINE__);
  406. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  407. kthread = kthread_run(xpc_activating, (void *)((u64)partid), "xpc%02d",
  408. partid);
  409. if (IS_ERR(kthread)) {
  410. spin_lock_irqsave(&part->act_lock, irq_flags);
  411. part->act_state = XPC_P_INACTIVE;
  412. XPC_SET_REASON(part, xpCloneKThreadFailed, __LINE__);
  413. spin_unlock_irqrestore(&part->act_lock, irq_flags);
  414. }
  415. }
  416. void
  417. xpc_activate_kthreads(struct xpc_channel *ch, int needed)
  418. {
  419. int idle = atomic_read(&ch->kthreads_idle);
  420. int assigned = atomic_read(&ch->kthreads_assigned);
  421. int wakeup;
  422. DBUG_ON(needed <= 0);
  423. if (idle > 0) {
  424. wakeup = (needed > idle) ? idle : needed;
  425. needed -= wakeup;
  426. dev_dbg(xpc_chan, "wakeup %d idle kthreads, partid=%d, "
  427. "channel=%d\n", wakeup, ch->partid, ch->number);
  428. /* only wakeup the requested number of kthreads */
  429. wake_up_nr(&ch->idle_wq, wakeup);
  430. }
  431. if (needed <= 0)
  432. return;
  433. if (needed + assigned > ch->kthreads_assigned_limit) {
  434. needed = ch->kthreads_assigned_limit - assigned;
  435. if (needed <= 0)
  436. return;
  437. }
  438. dev_dbg(xpc_chan, "create %d new kthreads, partid=%d, channel=%d\n",
  439. needed, ch->partid, ch->number);
  440. xpc_create_kthreads(ch, needed, 0);
  441. }
  442. /*
  443. * This function is where XPC's kthreads wait for messages to deliver.
  444. */
  445. static void
  446. xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
  447. {
  448. do {
  449. /* deliver messages to their intended recipients */
  450. while (xpc_n_of_deliverable_msgs(ch) > 0 &&
  451. !(ch->flags & XPC_C_DISCONNECTING)) {
  452. xpc_deliver_msg(ch);
  453. }
  454. if (atomic_inc_return(&ch->kthreads_idle) >
  455. ch->kthreads_idle_limit) {
  456. /* too many idle kthreads on this channel */
  457. atomic_dec(&ch->kthreads_idle);
  458. break;
  459. }
  460. dev_dbg(xpc_chan, "idle kthread calling "
  461. "wait_event_interruptible_exclusive()\n");
  462. (void)wait_event_interruptible_exclusive(ch->idle_wq,
  463. (xpc_n_of_deliverable_msgs(ch) > 0 ||
  464. (ch->flags & XPC_C_DISCONNECTING)));
  465. atomic_dec(&ch->kthreads_idle);
  466. } while (!(ch->flags & XPC_C_DISCONNECTING));
  467. }
  468. static int
  469. xpc_kthread_start(void *args)
  470. {
  471. short partid = XPC_UNPACK_ARG1(args);
  472. u16 ch_number = XPC_UNPACK_ARG2(args);
  473. struct xpc_partition *part = &xpc_partitions[partid];
  474. struct xpc_channel *ch;
  475. int n_needed;
  476. unsigned long irq_flags;
  477. dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
  478. partid, ch_number);
  479. ch = &part->channels[ch_number];
  480. if (!(ch->flags & XPC_C_DISCONNECTING)) {
  481. /* let registerer know that connection has been established */
  482. spin_lock_irqsave(&ch->lock, irq_flags);
  483. if (!(ch->flags & XPC_C_CONNECTEDCALLOUT)) {
  484. ch->flags |= XPC_C_CONNECTEDCALLOUT;
  485. spin_unlock_irqrestore(&ch->lock, irq_flags);
  486. xpc_connected_callout(ch);
  487. spin_lock_irqsave(&ch->lock, irq_flags);
  488. ch->flags |= XPC_C_CONNECTEDCALLOUT_MADE;
  489. spin_unlock_irqrestore(&ch->lock, irq_flags);
  490. /*
  491. * It is possible that while the callout was being
  492. * made that the remote partition sent some messages.
  493. * If that is the case, we may need to activate
  494. * additional kthreads to help deliver them. We only
  495. * need one less than total #of messages to deliver.
  496. */
  497. n_needed = xpc_n_of_deliverable_msgs(ch) - 1;
  498. if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
  499. xpc_activate_kthreads(ch, n_needed);
  500. } else {
  501. spin_unlock_irqrestore(&ch->lock, irq_flags);
  502. }
  503. xpc_kthread_waitmsgs(part, ch);
  504. }
  505. /* let registerer know that connection is disconnecting */
  506. spin_lock_irqsave(&ch->lock, irq_flags);
  507. if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
  508. !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
  509. ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
  510. spin_unlock_irqrestore(&ch->lock, irq_flags);
  511. xpc_disconnect_callout(ch, xpDisconnecting);
  512. spin_lock_irqsave(&ch->lock, irq_flags);
  513. ch->flags |= XPC_C_DISCONNECTINGCALLOUT_MADE;
  514. }
  515. spin_unlock_irqrestore(&ch->lock, irq_flags);
  516. if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
  517. atomic_dec_return(&part->nchannels_engaged) == 0) {
  518. xpc_indicate_partition_disengaged(part);
  519. }
  520. xpc_msgqueue_deref(ch);
  521. dev_dbg(xpc_chan, "kthread exiting, partid=%d, channel=%d\n",
  522. partid, ch_number);
  523. xpc_part_deref(part);
  524. return 0;
  525. }
  526. /*
  527. * For each partition that XPC has established communications with, there is
  528. * a minimum of one kernel thread assigned to perform any operation that
  529. * may potentially sleep or block (basically the callouts to the asynchronous
  530. * functions registered via xpc_connect()).
  531. *
  532. * Additional kthreads are created and destroyed by XPC as the workload
  533. * demands.
  534. *
  535. * A kthread is assigned to one of the active channels that exists for a given
  536. * partition.
  537. */
  538. void
  539. xpc_create_kthreads(struct xpc_channel *ch, int needed,
  540. int ignore_disconnecting)
  541. {
  542. unsigned long irq_flags;
  543. u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
  544. struct xpc_partition *part = &xpc_partitions[ch->partid];
  545. struct task_struct *kthread;
  546. while (needed-- > 0) {
  547. /*
  548. * The following is done on behalf of the newly created
  549. * kthread. That kthread is responsible for doing the
  550. * counterpart to the following before it exits.
  551. */
  552. if (ignore_disconnecting) {
  553. if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
  554. /* kthreads assigned had gone to zero */
  555. BUG_ON(!(ch->flags &
  556. XPC_C_DISCONNECTINGCALLOUT_MADE));
  557. break;
  558. }
  559. } else if (ch->flags & XPC_C_DISCONNECTING) {
  560. break;
  561. } else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
  562. atomic_inc_return(&part->nchannels_engaged) == 1) {
  563. xpc_indicate_partition_engaged(part);
  564. }
  565. (void)xpc_part_ref(part);
  566. xpc_msgqueue_ref(ch);
  567. kthread = kthread_run(xpc_kthread_start, (void *)args,
  568. "xpc%02dc%d", ch->partid, ch->number);
  569. if (IS_ERR(kthread)) {
  570. /* the fork failed */
  571. /*
  572. * NOTE: if (ignore_disconnecting &&
  573. * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
  574. * then we'll deadlock if all other kthreads assigned
  575. * to this channel are blocked in the channel's
  576. * registerer, because the only thing that will unblock
  577. * them is the xpDisconnecting callout that this
  578. * failed kthread_run() would have made.
  579. */
  580. if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
  581. atomic_dec_return(&part->nchannels_engaged) == 0) {
  582. xpc_indicate_partition_disengaged(part);
  583. }
  584. xpc_msgqueue_deref(ch);
  585. xpc_part_deref(part);
  586. if (atomic_read(&ch->kthreads_assigned) <
  587. ch->kthreads_idle_limit) {
  588. /*
  589. * Flag this as an error only if we have an
  590. * insufficient #of kthreads for the channel
  591. * to function.
  592. */
  593. spin_lock_irqsave(&ch->lock, irq_flags);
  594. XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
  595. &irq_flags);
  596. spin_unlock_irqrestore(&ch->lock, irq_flags);
  597. }
  598. break;
  599. }
  600. }
  601. }
  602. void
  603. xpc_disconnect_wait(int ch_number)
  604. {
  605. unsigned long irq_flags;
  606. short partid;
  607. struct xpc_partition *part;
  608. struct xpc_channel *ch;
  609. int wakeup_channel_mgr;
  610. /* now wait for all callouts to the caller's function to cease */
  611. for (partid = 0; partid < xp_max_npartitions; partid++) {
  612. part = &xpc_partitions[partid];
  613. if (!xpc_part_ref(part))
  614. continue;
  615. ch = &part->channels[ch_number];
  616. if (!(ch->flags & XPC_C_WDISCONNECT)) {
  617. xpc_part_deref(part);
  618. continue;
  619. }
  620. wait_for_completion(&ch->wdisconnect_wait);
  621. spin_lock_irqsave(&ch->lock, irq_flags);
  622. DBUG_ON(!(ch->flags & XPC_C_DISCONNECTED));
  623. wakeup_channel_mgr = 0;
  624. if (ch->delayed_chctl_flags) {
  625. if (part->act_state != XPC_P_DEACTIVATING) {
  626. spin_lock(&part->chctl_lock);
  627. part->chctl.flags[ch->number] |=
  628. ch->delayed_chctl_flags;
  629. spin_unlock(&part->chctl_lock);
  630. wakeup_channel_mgr = 1;
  631. }
  632. ch->delayed_chctl_flags = 0;
  633. }
  634. ch->flags &= ~XPC_C_WDISCONNECT;
  635. spin_unlock_irqrestore(&ch->lock, irq_flags);
  636. if (wakeup_channel_mgr)
  637. xpc_wakeup_channel_mgr(part);
  638. xpc_part_deref(part);
  639. }
  640. }
  641. static void
  642. xpc_do_exit(enum xp_retval reason)
  643. {
  644. short partid;
  645. int active_part_count, printed_waiting_msg = 0;
  646. struct xpc_partition *part;
  647. unsigned long printmsg_time, disengage_timeout = 0;
  648. /* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
  649. DBUG_ON(xpc_exiting == 1);
  650. /*
  651. * Let the heartbeat checker thread and the discovery thread
  652. * (if one is running) know that they should exit. Also wake up
  653. * the heartbeat checker thread in case it's sleeping.
  654. */
  655. xpc_exiting = 1;
  656. wake_up_interruptible(&xpc_activate_IRQ_wq);
  657. /* wait for the discovery thread to exit */
  658. wait_for_completion(&xpc_discovery_exited);
  659. /* wait for the heartbeat checker thread to exit */
  660. wait_for_completion(&xpc_hb_checker_exited);
  661. /* sleep for a 1/3 of a second or so */
  662. (void)msleep_interruptible(300);
  663. /* wait for all partitions to become inactive */
  664. printmsg_time = jiffies + (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
  665. xpc_disengage_timedout = 0;
  666. do {
  667. active_part_count = 0;
  668. for (partid = 0; partid < xp_max_npartitions; partid++) {
  669. part = &xpc_partitions[partid];
  670. if (xpc_partition_disengaged(part) &&
  671. part->act_state == XPC_P_INACTIVE) {
  672. continue;
  673. }
  674. active_part_count++;
  675. XPC_DEACTIVATE_PARTITION(part, reason);
  676. if (part->disengage_timeout > disengage_timeout)
  677. disengage_timeout = part->disengage_timeout;
  678. }
  679. if (xpc_any_partition_engaged()) {
  680. if (time_is_before_jiffies(printmsg_time)) {
  681. dev_info(xpc_part, "waiting for remote "
  682. "partitions to deactivate, timeout in "
  683. "%ld seconds\n", (disengage_timeout -
  684. jiffies) / HZ);
  685. printmsg_time = jiffies +
  686. (XPC_DEACTIVATE_PRINTMSG_INTERVAL * HZ);
  687. printed_waiting_msg = 1;
  688. }
  689. } else if (active_part_count > 0) {
  690. if (printed_waiting_msg) {
  691. dev_info(xpc_part, "waiting for local partition"
  692. " to deactivate\n");
  693. printed_waiting_msg = 0;
  694. }
  695. } else {
  696. if (!xpc_disengage_timedout) {
  697. dev_info(xpc_part, "all partitions have "
  698. "deactivated\n");
  699. }
  700. break;
  701. }
  702. /* sleep for a 1/3 of a second or so */
  703. (void)msleep_interruptible(300);
  704. } while (1);
  705. DBUG_ON(xpc_any_partition_engaged());
  706. DBUG_ON(xpc_any_hbs_allowed() != 0);
  707. /* a zero timestamp indicates our rsvd page is not initialized */
  708. xpc_rsvd_page->ts_jiffies = 0;
  709. if (reason == xpUnloading) {
  710. (void)unregister_die_notifier(&xpc_die_notifier);
  711. (void)unregister_reboot_notifier(&xpc_reboot_notifier);
  712. }
  713. /* clear the interface to XPC's functions */
  714. xpc_clear_interface();
  715. if (xpc_sysctl)
  716. unregister_sysctl_table(xpc_sysctl);
  717. kfree(xpc_partitions);
  718. if (is_shub())
  719. xpc_exit_sn2();
  720. else
  721. xpc_exit_uv();
  722. }
  723. /*
  724. * This function is called when the system is being rebooted.
  725. */
  726. static int
  727. xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
  728. {
  729. enum xp_retval reason;
  730. switch (event) {
  731. case SYS_RESTART:
  732. reason = xpSystemReboot;
  733. break;
  734. case SYS_HALT:
  735. reason = xpSystemHalt;
  736. break;
  737. case SYS_POWER_OFF:
  738. reason = xpSystemPoweroff;
  739. break;
  740. default:
  741. reason = xpSystemGoingDown;
  742. }
  743. xpc_do_exit(reason);
  744. return NOTIFY_DONE;
  745. }
  746. /*
  747. * Notify other partitions to deactivate from us by first disengaging from all
  748. * references to our memory.
  749. */
  750. static void
  751. xpc_die_deactivate(void)
  752. {
  753. struct xpc_partition *part;
  754. short partid;
  755. int any_engaged;
  756. long keep_waiting;
  757. long wait_to_print;
  758. /* keep xpc_hb_checker thread from doing anything (just in case) */
  759. xpc_exiting = 1;
  760. xpc_disallow_all_hbs(); /*indicate we're deactivated */
  761. for (partid = 0; partid < xp_max_npartitions; partid++) {
  762. part = &xpc_partitions[partid];
  763. if (xpc_partition_engaged(partid) ||
  764. part->act_state != XPC_P_INACTIVE) {
  765. xpc_request_partition_deactivation(part);
  766. xpc_indicate_partition_disengaged(part);
  767. }
  768. }
  769. /*
  770. * Though we requested that all other partitions deactivate from us,
  771. * we only wait until they've all disengaged or we've reached the
  772. * defined timelimit.
  773. *
  774. * Given that one iteration through the following while-loop takes
  775. * approximately 200 microseconds, calculate the #of loops to take
  776. * before bailing and the #of loops before printing a waiting message.
  777. */
  778. keep_waiting = xpc_disengage_timelimit * 1000 * 5;
  779. wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;
  780. while (1) {
  781. any_engaged = xpc_any_partition_engaged();
  782. if (!any_engaged) {
  783. dev_info(xpc_part, "all partitions have deactivated\n");
  784. break;
  785. }
  786. if (!keep_waiting--) {
  787. for (partid = 0; partid < xp_max_npartitions;
  788. partid++) {
  789. if (xpc_partition_engaged(partid)) {
  790. dev_info(xpc_part, "deactivate from "
  791. "remote partition %d timed "
  792. "out\n", partid);
  793. }
  794. }
  795. break;
  796. }
  797. if (!wait_to_print--) {
  798. dev_info(xpc_part, "waiting for remote partitions to "
  799. "deactivate, timeout in %ld seconds\n",
  800. keep_waiting / (1000 * 5));
  801. wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL *
  802. 1000 * 5;
  803. }
  804. udelay(200);
  805. }
  806. }
  807. /*
  808. * This function is called when the system is being restarted or halted due
  809. * to some sort of system failure. If this is the case we need to notify the
  810. * other partitions to disengage from all references to our memory.
  811. * This function can also be called when our heartbeater could be offlined
  812. * for a time. In this case we need to notify other partitions to not worry
  813. * about the lack of a heartbeat.
  814. */
  815. static int
  816. xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
  817. {
  818. #ifdef CONFIG_IA64 /* !!! temporary kludge */
  819. switch (event) {
  820. case DIE_MACHINE_RESTART:
  821. case DIE_MACHINE_HALT:
  822. xpc_die_deactivate();
  823. break;
  824. case DIE_KDEBUG_ENTER:
  825. /* Should lack of heartbeat be ignored by other partitions? */
  826. if (!xpc_kdebug_ignore)
  827. break;
  828. /* fall through */
  829. case DIE_MCA_MONARCH_ENTER:
  830. case DIE_INIT_MONARCH_ENTER:
  831. xpc_offline_heartbeat();
  832. break;
  833. case DIE_KDEBUG_LEAVE:
  834. /* Is lack of heartbeat being ignored by other partitions? */
  835. if (!xpc_kdebug_ignore)
  836. break;
  837. /* fall through */
  838. case DIE_MCA_MONARCH_LEAVE:
  839. case DIE_INIT_MONARCH_LEAVE:
  840. xpc_online_heartbeat();
  841. break;
  842. }
  843. #else
  844. xpc_die_deactivate();
  845. #endif
  846. return NOTIFY_DONE;
  847. }
  848. int __init
  849. xpc_init(void)
  850. {
  851. int ret;
  852. short partid;
  853. struct xpc_partition *part;
  854. struct task_struct *kthread;
  855. snprintf(xpc_part->bus_id, BUS_ID_SIZE, "part");
  856. snprintf(xpc_chan->bus_id, BUS_ID_SIZE, "chan");
  857. if (is_shub()) {
  858. /*
  859. * The ia64-sn2 architecture supports at most 64 partitions.
  860. * And the inability to unregister remote amos restricts us
  861. * further to only support exactly 64 partitions on this
  862. * architecture, no less.
  863. */
  864. if (xp_max_npartitions != 64)
  865. return -EINVAL;
  866. ret = xpc_init_sn2();
  867. if (ret != 0)
  868. return ret;
  869. } else if (is_uv()) {
  870. xpc_init_uv();
  871. } else {
  872. return -ENODEV;
  873. }
  874. xpc_partitions = kzalloc(sizeof(struct xpc_partition) *
  875. xp_max_npartitions, GFP_KERNEL);
  876. if (xpc_partitions == NULL) {
  877. dev_err(xpc_part, "can't get memory for partition structure\n");
  878. ret = -ENOMEM;
  879. goto out_1;
  880. }
  881. /*
  882. * The first few fields of each entry of xpc_partitions[] need to
  883. * be initialized now so that calls to xpc_connect() and
  884. * xpc_disconnect() can be made prior to the activation of any remote
  885. * partition. NOTE THAT NONE OF THE OTHER FIELDS BELONGING TO THESE
  886. * ENTRIES ARE MEANINGFUL UNTIL AFTER AN ENTRY'S CORRESPONDING
  887. * PARTITION HAS BEEN ACTIVATED.
  888. */
  889. for (partid = 0; partid < xp_max_npartitions; partid++) {
  890. part = &xpc_partitions[partid];
  891. DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
  892. part->activate_IRQ_rcvd = 0;
  893. spin_lock_init(&part->act_lock);
  894. part->act_state = XPC_P_INACTIVE;
  895. XPC_SET_REASON(part, 0, 0);
  896. init_timer(&part->disengage_timer);
  897. part->disengage_timer.function =
  898. xpc_timeout_partition_disengage;
  899. part->disengage_timer.data = (unsigned long)part;
  900. part->setup_state = XPC_P_UNSET;
  901. init_waitqueue_head(&part->teardown_wq);
  902. atomic_set(&part->references, 0);
  903. }
  904. xpc_sysctl = register_sysctl_table(xpc_sys_dir);
  905. /*
  906. * Fill the partition reserved page with the information needed by
  907. * other partitions to discover we are alive and establish initial
  908. * communications.
  909. */
  910. xpc_rsvd_page = xpc_setup_rsvd_page();
  911. if (xpc_rsvd_page == NULL) {
  912. dev_err(xpc_part, "can't setup our reserved page\n");
  913. ret = -EBUSY;
  914. goto out_2;
  915. }
  916. /* add ourselves to the reboot_notifier_list */
  917. ret = register_reboot_notifier(&xpc_reboot_notifier);
  918. if (ret != 0)
  919. dev_warn(xpc_part, "can't register reboot notifier\n");
  920. /* add ourselves to the die_notifier list */
  921. ret = register_die_notifier(&xpc_die_notifier);
  922. if (ret != 0)
  923. dev_warn(xpc_part, "can't register die notifier\n");
  924. /*
  925. * The real work-horse behind xpc. This processes incoming
  926. * interrupts and monitors remote heartbeats.
  927. */
  928. kthread = kthread_run(xpc_hb_checker, NULL, XPC_HB_CHECK_THREAD_NAME);
  929. if (IS_ERR(kthread)) {
  930. dev_err(xpc_part, "failed while forking hb check thread\n");
  931. ret = -EBUSY;
  932. goto out_3;
  933. }
  934. /*
  935. * Startup a thread that will attempt to discover other partitions to
  936. * activate based on info provided by SAL. This new thread is short
  937. * lived and will exit once discovery is complete.
  938. */
  939. kthread = kthread_run(xpc_initiate_discovery, NULL,
  940. XPC_DISCOVERY_THREAD_NAME);
  941. if (IS_ERR(kthread)) {
  942. dev_err(xpc_part, "failed while forking discovery thread\n");
  943. /* mark this new thread as a non-starter */
  944. complete(&xpc_discovery_exited);
  945. xpc_do_exit(xpUnloading);
  946. return -EBUSY;
  947. }
  948. /* set the interface to point at XPC's functions */
  949. xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
  950. xpc_initiate_send, xpc_initiate_send_notify,
  951. xpc_initiate_received, xpc_initiate_partid_to_nasids);
  952. return 0;
  953. /* initialization was not successful */
  954. out_3:
  955. /* a zero timestamp indicates our rsvd page is not initialized */
  956. xpc_rsvd_page->ts_jiffies = 0;
  957. (void)unregister_die_notifier(&xpc_die_notifier);
  958. (void)unregister_reboot_notifier(&xpc_reboot_notifier);
  959. out_2:
  960. if (xpc_sysctl)
  961. unregister_sysctl_table(xpc_sysctl);
  962. kfree(xpc_partitions);
  963. out_1:
  964. if (is_shub())
  965. xpc_exit_sn2();
  966. else
  967. xpc_exit_uv();
  968. return ret;
  969. }
  970. module_init(xpc_init);
  971. void __exit
  972. xpc_exit(void)
  973. {
  974. xpc_do_exit(xpUnloading);
  975. }
  976. module_exit(xpc_exit);
  977. MODULE_AUTHOR("Silicon Graphics, Inc.");
  978. MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
  979. MODULE_LICENSE("GPL");
  980. module_param(xpc_hb_interval, int, 0);
  981. MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
  982. "heartbeat increments.");
  983. module_param(xpc_hb_check_interval, int, 0);
  984. MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
  985. "heartbeat checks.");
  986. module_param(xpc_disengage_timelimit, int, 0);
  987. MODULE_PARM_DESC(xpc_disengage_timelimit, "Number of seconds to wait "
  988. "for disengage to complete.");
  989. module_param(xpc_kdebug_ignore, int, 0);
  990. MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
  991. "other partitions when dropping into kdebug.");