|
@@ -6,7 +6,6 @@
|
|
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
|
|
* Copyright (c) 2004-2008 Silicon Graphics, Inc. All Rights Reserved.
|
|
*/
|
|
*/
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Cross Partition Communication (XPC) support - standard version.
|
|
* Cross Partition Communication (XPC) support - standard version.
|
|
*
|
|
*
|
|
@@ -44,7 +43,6 @@
|
|
*
|
|
*
|
|
*/
|
|
*/
|
|
|
|
|
|
-
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/module.h>
|
|
#include <linux/module.h>
|
|
#include <linux/init.h>
|
|
#include <linux/init.h>
|
|
@@ -61,7 +59,6 @@
|
|
#include <asm/uaccess.h>
|
|
#include <asm/uaccess.h>
|
|
#include "xpc.h"
|
|
#include "xpc.h"
|
|
|
|
|
|
-
|
|
|
|
/* define two XPC debug device structures to be used with dev_dbg() et al */
|
|
/* define two XPC debug device structures to be used with dev_dbg() et al */
|
|
|
|
|
|
struct device_driver xpc_dbg_name = {
|
|
struct device_driver xpc_dbg_name = {
|
|
@@ -81,10 +78,8 @@ struct device xpc_chan_dbg_subname = {
|
|
struct device *xpc_part = &xpc_part_dbg_subname;
|
|
struct device *xpc_part = &xpc_part_dbg_subname;
|
|
struct device *xpc_chan = &xpc_chan_dbg_subname;
|
|
struct device *xpc_chan = &xpc_chan_dbg_subname;
|
|
|
|
|
|
-
|
|
|
|
static int xpc_kdebug_ignore;
|
|
static int xpc_kdebug_ignore;
|
|
|
|
|
|
-
|
|
|
|
/* systune related variables for /proc/sys directories */
|
|
/* systune related variables for /proc/sys directories */
|
|
|
|
|
|
static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
|
|
static int xpc_hb_interval = XPC_HB_DEFAULT_INTERVAL;
|
|
@@ -101,56 +96,51 @@ static int xpc_disengage_request_max_timelimit = 120;
|
|
|
|
|
|
static ctl_table xpc_sys_xpc_hb_dir[] = {
|
|
static ctl_table xpc_sys_xpc_hb_dir[] = {
|
|
{
|
|
{
|
|
- .ctl_name = CTL_UNNUMBERED,
|
|
|
|
- .procname = "hb_interval",
|
|
|
|
- .data = &xpc_hb_interval,
|
|
|
|
- .maxlen = sizeof(int),
|
|
|
|
- .mode = 0644,
|
|
|
|
- .proc_handler = &proc_dointvec_minmax,
|
|
|
|
- .strategy = &sysctl_intvec,
|
|
|
|
- .extra1 = &xpc_hb_min_interval,
|
|
|
|
- .extra2 = &xpc_hb_max_interval
|
|
|
|
- },
|
|
|
|
|
|
+ .ctl_name = CTL_UNNUMBERED,
|
|
|
|
+ .procname = "hb_interval",
|
|
|
|
+ .data = &xpc_hb_interval,
|
|
|
|
+ .maxlen = sizeof(int),
|
|
|
|
+ .mode = 0644,
|
|
|
|
+ .proc_handler = &proc_dointvec_minmax,
|
|
|
|
+ .strategy = &sysctl_intvec,
|
|
|
|
+ .extra1 = &xpc_hb_min_interval,
|
|
|
|
+ .extra2 = &xpc_hb_max_interval},
|
|
{
|
|
{
|
|
- .ctl_name = CTL_UNNUMBERED,
|
|
|
|
- .procname = "hb_check_interval",
|
|
|
|
- .data = &xpc_hb_check_interval,
|
|
|
|
- .maxlen = sizeof(int),
|
|
|
|
- .mode = 0644,
|
|
|
|
- .proc_handler = &proc_dointvec_minmax,
|
|
|
|
- .strategy = &sysctl_intvec,
|
|
|
|
- .extra1 = &xpc_hb_check_min_interval,
|
|
|
|
- .extra2 = &xpc_hb_check_max_interval
|
|
|
|
- },
|
|
|
|
|
|
+ .ctl_name = CTL_UNNUMBERED,
|
|
|
|
+ .procname = "hb_check_interval",
|
|
|
|
+ .data = &xpc_hb_check_interval,
|
|
|
|
+ .maxlen = sizeof(int),
|
|
|
|
+ .mode = 0644,
|
|
|
|
+ .proc_handler = &proc_dointvec_minmax,
|
|
|
|
+ .strategy = &sysctl_intvec,
|
|
|
|
+ .extra1 = &xpc_hb_check_min_interval,
|
|
|
|
+ .extra2 = &xpc_hb_check_max_interval},
|
|
{}
|
|
{}
|
|
};
|
|
};
|
|
static ctl_table xpc_sys_xpc_dir[] = {
|
|
static ctl_table xpc_sys_xpc_dir[] = {
|
|
{
|
|
{
|
|
- .ctl_name = CTL_UNNUMBERED,
|
|
|
|
- .procname = "hb",
|
|
|
|
- .mode = 0555,
|
|
|
|
- .child = xpc_sys_xpc_hb_dir
|
|
|
|
- },
|
|
|
|
|
|
+ .ctl_name = CTL_UNNUMBERED,
|
|
|
|
+ .procname = "hb",
|
|
|
|
+ .mode = 0555,
|
|
|
|
+ .child = xpc_sys_xpc_hb_dir},
|
|
{
|
|
{
|
|
- .ctl_name = CTL_UNNUMBERED,
|
|
|
|
- .procname = "disengage_request_timelimit",
|
|
|
|
- .data = &xpc_disengage_request_timelimit,
|
|
|
|
- .maxlen = sizeof(int),
|
|
|
|
- .mode = 0644,
|
|
|
|
- .proc_handler = &proc_dointvec_minmax,
|
|
|
|
- .strategy = &sysctl_intvec,
|
|
|
|
- .extra1 = &xpc_disengage_request_min_timelimit,
|
|
|
|
- .extra2 = &xpc_disengage_request_max_timelimit
|
|
|
|
- },
|
|
|
|
|
|
+ .ctl_name = CTL_UNNUMBERED,
|
|
|
|
+ .procname = "disengage_request_timelimit",
|
|
|
|
+ .data = &xpc_disengage_request_timelimit,
|
|
|
|
+ .maxlen = sizeof(int),
|
|
|
|
+ .mode = 0644,
|
|
|
|
+ .proc_handler = &proc_dointvec_minmax,
|
|
|
|
+ .strategy = &sysctl_intvec,
|
|
|
|
+ .extra1 = &xpc_disengage_request_min_timelimit,
|
|
|
|
+ .extra2 = &xpc_disengage_request_max_timelimit},
|
|
{}
|
|
{}
|
|
};
|
|
};
|
|
static ctl_table xpc_sys_dir[] = {
|
|
static ctl_table xpc_sys_dir[] = {
|
|
{
|
|
{
|
|
- .ctl_name = CTL_UNNUMBERED,
|
|
|
|
- .procname = "xpc",
|
|
|
|
- .mode = 0555,
|
|
|
|
- .child = xpc_sys_xpc_dir
|
|
|
|
- },
|
|
|
|
|
|
+ .ctl_name = CTL_UNNUMBERED,
|
|
|
|
+ .procname = "xpc",
|
|
|
|
+ .mode = 0555,
|
|
|
|
+ .child = xpc_sys_xpc_dir},
|
|
{}
|
|
{}
|
|
};
|
|
};
|
|
static struct ctl_table_header *xpc_sysctl;
|
|
static struct ctl_table_header *xpc_sysctl;
|
|
@@ -172,13 +162,10 @@ static DECLARE_COMPLETION(xpc_hb_checker_exited);
|
|
/* notification that the xpc_discovery thread has exited */
|
|
/* notification that the xpc_discovery thread has exited */
|
|
static DECLARE_COMPLETION(xpc_discovery_exited);
|
|
static DECLARE_COMPLETION(xpc_discovery_exited);
|
|
|
|
|
|
-
|
|
|
|
static struct timer_list xpc_hb_timer;
|
|
static struct timer_list xpc_hb_timer;
|
|
|
|
|
|
-
|
|
|
|
static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
|
|
static void xpc_kthread_waitmsgs(struct xpc_partition *, struct xpc_channel *);
|
|
|
|
|
|
-
|
|
|
|
static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
|
|
static int xpc_system_reboot(struct notifier_block *, unsigned long, void *);
|
|
static struct notifier_block xpc_reboot_notifier = {
|
|
static struct notifier_block xpc_reboot_notifier = {
|
|
.notifier_call = xpc_system_reboot,
|
|
.notifier_call = xpc_system_reboot,
|
|
@@ -189,25 +176,22 @@ static struct notifier_block xpc_die_notifier = {
|
|
.notifier_call = xpc_system_die,
|
|
.notifier_call = xpc_system_die,
|
|
};
|
|
};
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Timer function to enforce the timelimit on the partition disengage request.
|
|
* Timer function to enforce the timelimit on the partition disengage request.
|
|
*/
|
|
*/
|
|
static void
|
|
static void
|
|
xpc_timeout_partition_disengage_request(unsigned long data)
|
|
xpc_timeout_partition_disengage_request(unsigned long data)
|
|
{
|
|
{
|
|
- struct xpc_partition *part = (struct xpc_partition *) data;
|
|
|
|
-
|
|
|
|
|
|
+ struct xpc_partition *part = (struct xpc_partition *)data;
|
|
|
|
|
|
DBUG_ON(time_before(jiffies, part->disengage_request_timeout));
|
|
DBUG_ON(time_before(jiffies, part->disengage_request_timeout));
|
|
|
|
|
|
- (void) xpc_partition_disengaged(part);
|
|
|
|
|
|
+ (void)xpc_partition_disengaged(part);
|
|
|
|
|
|
DBUG_ON(part->disengage_request_timeout != 0);
|
|
DBUG_ON(part->disengage_request_timeout != 0);
|
|
DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);
|
|
DBUG_ON(xpc_partition_engaged(1UL << XPC_PARTID(part)) != 0);
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Notify the heartbeat check thread that an IRQ has been received.
|
|
* Notify the heartbeat check thread that an IRQ has been received.
|
|
*/
|
|
*/
|
|
@@ -219,7 +203,6 @@ xpc_act_IRQ_handler(int irq, void *dev_id)
|
|
return IRQ_HANDLED;
|
|
return IRQ_HANDLED;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Timer to produce the heartbeat. The timer structures function is
|
|
* Timer to produce the heartbeat. The timer structures function is
|
|
* already set when this is initially called. A tunable is used to
|
|
* already set when this is initially called. A tunable is used to
|
|
@@ -238,7 +221,6 @@ xpc_hb_beater(unsigned long dummy)
|
|
add_timer(&xpc_hb_timer);
|
|
add_timer(&xpc_hb_timer);
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* This thread is responsible for nearly all of the partition
|
|
* This thread is responsible for nearly all of the partition
|
|
* activation/deactivation.
|
|
* activation/deactivation.
|
|
@@ -248,8 +230,7 @@ xpc_hb_checker(void *ignore)
|
|
{
|
|
{
|
|
int last_IRQ_count = 0;
|
|
int last_IRQ_count = 0;
|
|
int new_IRQ_count;
|
|
int new_IRQ_count;
|
|
- int force_IRQ=0;
|
|
|
|
-
|
|
|
|
|
|
+ int force_IRQ = 0;
|
|
|
|
|
|
/* this thread was marked active by xpc_hb_init() */
|
|
/* this thread was marked active by xpc_hb_init() */
|
|
|
|
|
|
@@ -261,14 +242,13 @@ xpc_hb_checker(void *ignore)
|
|
xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
|
|
xpc_hb_check_timeout = jiffies + (xpc_hb_check_interval * HZ);
|
|
xpc_hb_beater(0);
|
|
xpc_hb_beater(0);
|
|
|
|
|
|
- while (!(volatile int) xpc_exiting) {
|
|
|
|
|
|
+ while (!(volatile int)xpc_exiting) {
|
|
|
|
|
|
dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
|
|
dev_dbg(xpc_part, "woke up with %d ticks rem; %d IRQs have "
|
|
"been received\n",
|
|
"been received\n",
|
|
- (int) (xpc_hb_check_timeout - jiffies),
|
|
|
|
|
|
+ (int)(xpc_hb_check_timeout - jiffies),
|
|
atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
|
|
atomic_read(&xpc_act_IRQ_rcvd) - last_IRQ_count);
|
|
|
|
|
|
-
|
|
|
|
/* checking of remote heartbeats is skewed by IRQ handling */
|
|
/* checking of remote heartbeats is skewed by IRQ handling */
|
|
if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
|
|
if (time_after_eq(jiffies, xpc_hb_check_timeout)) {
|
|
dev_dbg(xpc_part, "checking remote heartbeats\n");
|
|
dev_dbg(xpc_part, "checking remote heartbeats\n");
|
|
@@ -282,7 +262,6 @@ xpc_hb_checker(void *ignore)
|
|
force_IRQ = 1;
|
|
force_IRQ = 1;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/* check for outstanding IRQs */
|
|
/* check for outstanding IRQs */
|
|
new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
|
|
new_IRQ_count = atomic_read(&xpc_act_IRQ_rcvd);
|
|
if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
|
|
if (last_IRQ_count < new_IRQ_count || force_IRQ != 0) {
|
|
@@ -294,30 +273,30 @@ xpc_hb_checker(void *ignore)
|
|
last_IRQ_count += xpc_identify_act_IRQ_sender();
|
|
last_IRQ_count += xpc_identify_act_IRQ_sender();
|
|
if (last_IRQ_count < new_IRQ_count) {
|
|
if (last_IRQ_count < new_IRQ_count) {
|
|
/* retry once to help avoid missing AMO */
|
|
/* retry once to help avoid missing AMO */
|
|
- (void) xpc_identify_act_IRQ_sender();
|
|
|
|
|
|
+ (void)xpc_identify_act_IRQ_sender();
|
|
}
|
|
}
|
|
last_IRQ_count = new_IRQ_count;
|
|
last_IRQ_count = new_IRQ_count;
|
|
|
|
|
|
xpc_hb_check_timeout = jiffies +
|
|
xpc_hb_check_timeout = jiffies +
|
|
- (xpc_hb_check_interval * HZ);
|
|
|
|
|
|
+ (xpc_hb_check_interval * HZ);
|
|
}
|
|
}
|
|
|
|
|
|
/* wait for IRQ or timeout */
|
|
/* wait for IRQ or timeout */
|
|
- (void) wait_event_interruptible(xpc_act_IRQ_wq,
|
|
|
|
- (last_IRQ_count < atomic_read(&xpc_act_IRQ_rcvd) ||
|
|
|
|
- time_after_eq(jiffies, xpc_hb_check_timeout) ||
|
|
|
|
- (volatile int) xpc_exiting));
|
|
|
|
|
|
+ (void)wait_event_interruptible(xpc_act_IRQ_wq,
|
|
|
|
+ (last_IRQ_count <
|
|
|
|
+ atomic_read(&xpc_act_IRQ_rcvd)
|
|
|
|
+ || time_after_eq(jiffies,
|
|
|
|
+ xpc_hb_check_timeout) ||
|
|
|
|
+ (volatile int)xpc_exiting));
|
|
}
|
|
}
|
|
|
|
|
|
dev_dbg(xpc_part, "heartbeat checker is exiting\n");
|
|
dev_dbg(xpc_part, "heartbeat checker is exiting\n");
|
|
|
|
|
|
-
|
|
|
|
/* mark this thread as having exited */
|
|
/* mark this thread as having exited */
|
|
complete(&xpc_hb_checker_exited);
|
|
complete(&xpc_hb_checker_exited);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* This thread will attempt to discover other partitions to activate
|
|
* This thread will attempt to discover other partitions to activate
|
|
* based on info provided by SAL. This new thread is short lived and
|
|
* based on info provided by SAL. This new thread is short lived and
|
|
@@ -337,7 +316,6 @@ xpc_initiate_discovery(void *ignore)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Establish first contact with the remote partititon. This involves pulling
|
|
* Establish first contact with the remote partititon. This involves pulling
|
|
* the XPC per partition variables from the remote partition and waiting for
|
|
* the XPC per partition variables from the remote partition and waiting for
|
|
@@ -348,7 +326,6 @@ xpc_make_first_contact(struct xpc_partition *part)
|
|
{
|
|
{
|
|
enum xpc_retval ret;
|
|
enum xpc_retval ret;
|
|
|
|
|
|
-
|
|
|
|
while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) {
|
|
while ((ret = xpc_pull_remote_vars_part(part)) != xpcSuccess) {
|
|
if (ret != xpcRetry) {
|
|
if (ret != xpcRetry) {
|
|
XPC_DEACTIVATE_PARTITION(part, ret);
|
|
XPC_DEACTIVATE_PARTITION(part, ret);
|
|
@@ -359,7 +336,7 @@ xpc_make_first_contact(struct xpc_partition *part)
|
|
"partition %d\n", XPC_PARTID(part));
|
|
"partition %d\n", XPC_PARTID(part));
|
|
|
|
|
|
/* wait a 1/4 of a second or so */
|
|
/* wait a 1/4 of a second or so */
|
|
- (void) msleep_interruptible(250);
|
|
|
|
|
|
+ (void)msleep_interruptible(250);
|
|
|
|
|
|
if (part->act_state == XPC_P_DEACTIVATING) {
|
|
if (part->act_state == XPC_P_DEACTIVATING) {
|
|
return part->reason;
|
|
return part->reason;
|
|
@@ -369,7 +346,6 @@ xpc_make_first_contact(struct xpc_partition *part)
|
|
return xpc_mark_partition_active(part);
|
|
return xpc_mark_partition_active(part);
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* The first kthread assigned to a newly activated partition is the one
|
|
* The first kthread assigned to a newly activated partition is the one
|
|
* created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to
|
|
* created by XPC HB with which it calls xpc_partition_up(). XPC hangs on to
|
|
@@ -386,12 +362,11 @@ static void
|
|
xpc_channel_mgr(struct xpc_partition *part)
|
|
xpc_channel_mgr(struct xpc_partition *part)
|
|
{
|
|
{
|
|
while (part->act_state != XPC_P_DEACTIVATING ||
|
|
while (part->act_state != XPC_P_DEACTIVATING ||
|
|
- atomic_read(&part->nchannels_active) > 0 ||
|
|
|
|
- !xpc_partition_disengaged(part)) {
|
|
|
|
|
|
+ atomic_read(&part->nchannels_active) > 0 ||
|
|
|
|
+ !xpc_partition_disengaged(part)) {
|
|
|
|
|
|
xpc_process_channel_activity(part);
|
|
xpc_process_channel_activity(part);
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Wait until we've been requested to activate kthreads or
|
|
* Wait until we've been requested to activate kthreads or
|
|
* all of the channel's message queues have been torn down or
|
|
* all of the channel's message queues have been torn down or
|
|
@@ -406,13 +381,19 @@ xpc_channel_mgr(struct xpc_partition *part)
|
|
* wake him up.
|
|
* wake him up.
|
|
*/
|
|
*/
|
|
atomic_dec(&part->channel_mgr_requests);
|
|
atomic_dec(&part->channel_mgr_requests);
|
|
- (void) wait_event_interruptible(part->channel_mgr_wq,
|
|
|
|
- (atomic_read(&part->channel_mgr_requests) > 0 ||
|
|
|
|
- (volatile u64) part->local_IPI_amo != 0 ||
|
|
|
|
- ((volatile u8) part->act_state ==
|
|
|
|
- XPC_P_DEACTIVATING &&
|
|
|
|
- atomic_read(&part->nchannels_active) == 0 &&
|
|
|
|
- xpc_partition_disengaged(part))));
|
|
|
|
|
|
+ (void)wait_event_interruptible(part->channel_mgr_wq,
|
|
|
|
+ (atomic_read
|
|
|
|
+ (&part->channel_mgr_requests) >
|
|
|
|
+ 0 ||
|
|
|
|
+ (volatile u64)part->
|
|
|
|
+ local_IPI_amo != 0 ||
|
|
|
|
+ ((volatile u8)part->act_state ==
|
|
|
|
+ XPC_P_DEACTIVATING &&
|
|
|
|
+ atomic_read(&part->
|
|
|
|
+ nchannels_active)
|
|
|
|
+ == 0 &&
|
|
|
|
+ xpc_partition_disengaged
|
|
|
|
+ (part))));
|
|
atomic_set(&part->channel_mgr_requests, 1);
|
|
atomic_set(&part->channel_mgr_requests, 1);
|
|
|
|
|
|
// >>> Does it need to wakeup periodically as well? In case we
|
|
// >>> Does it need to wakeup periodically as well? In case we
|
|
@@ -420,7 +401,6 @@ xpc_channel_mgr(struct xpc_partition *part)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* When XPC HB determines that a partition has come up, it will create a new
|
|
* When XPC HB determines that a partition has come up, it will create a new
|
|
* kthread and that kthread will call this function to attempt to set up the
|
|
* kthread and that kthread will call this function to attempt to set up the
|
|
@@ -454,7 +434,7 @@ xpc_partition_up(struct xpc_partition *part)
|
|
* has been dismantled.
|
|
* has been dismantled.
|
|
*/
|
|
*/
|
|
|
|
|
|
- (void) xpc_part_ref(part); /* this will always succeed */
|
|
|
|
|
|
+ (void)xpc_part_ref(part); /* this will always succeed */
|
|
|
|
|
|
if (xpc_make_first_contact(part) == xpcSuccess) {
|
|
if (xpc_make_first_contact(part) == xpcSuccess) {
|
|
xpc_channel_mgr(part);
|
|
xpc_channel_mgr(part);
|
|
@@ -465,17 +445,15 @@ xpc_partition_up(struct xpc_partition *part)
|
|
xpc_teardown_infrastructure(part);
|
|
xpc_teardown_infrastructure(part);
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
static int
|
|
static int
|
|
xpc_activating(void *__partid)
|
|
xpc_activating(void *__partid)
|
|
{
|
|
{
|
|
- partid_t partid = (u64) __partid;
|
|
|
|
|
|
+ partid_t partid = (u64)__partid;
|
|
struct xpc_partition *part = &xpc_partitions[partid];
|
|
struct xpc_partition *part = &xpc_partitions[partid];
|
|
unsigned long irq_flags;
|
|
unsigned long irq_flags;
|
|
- struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
|
|
|
|
|
|
+ struct sched_param param = {.sched_priority = MAX_RT_PRIO - 1 };
|
|
int ret;
|
|
int ret;
|
|
|
|
|
|
-
|
|
|
|
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
|
|
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
|
|
|
|
|
|
spin_lock_irqsave(&part->act_lock, irq_flags);
|
|
spin_lock_irqsave(&part->act_lock, irq_flags);
|
|
@@ -505,7 +483,7 @@ xpc_activating(void *__partid)
|
|
ret = sched_setscheduler(current, SCHED_FIFO, ¶m);
|
|
ret = sched_setscheduler(current, SCHED_FIFO, ¶m);
|
|
if (ret != 0) {
|
|
if (ret != 0) {
|
|
dev_warn(xpc_part, "unable to set pid %d to a realtime "
|
|
dev_warn(xpc_part, "unable to set pid %d to a realtime "
|
|
- "priority, ret=%d\n", current->pid, ret);
|
|
|
|
|
|
+ "priority, ret=%d\n", current->pid, ret);
|
|
}
|
|
}
|
|
|
|
|
|
/* allow this thread and its children to run on any CPU */
|
|
/* allow this thread and its children to run on any CPU */
|
|
@@ -522,9 +500,9 @@ xpc_activating(void *__partid)
|
|
* reloads and system reboots.
|
|
* reloads and system reboots.
|
|
*/
|
|
*/
|
|
if (sn_register_xp_addr_region(part->remote_amos_page_pa,
|
|
if (sn_register_xp_addr_region(part->remote_amos_page_pa,
|
|
- PAGE_SIZE, 1) < 0) {
|
|
|
|
|
|
+ PAGE_SIZE, 1) < 0) {
|
|
dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "
|
|
dev_warn(xpc_part, "xpc_partition_up(%d) failed to register "
|
|
- "xp_addr region\n", partid);
|
|
|
|
|
|
+ "xp_addr region\n", partid);
|
|
|
|
|
|
spin_lock_irqsave(&part->act_lock, irq_flags);
|
|
spin_lock_irqsave(&part->act_lock, irq_flags);
|
|
part->act_state = XPC_P_INACTIVE;
|
|
part->act_state = XPC_P_INACTIVE;
|
|
@@ -537,12 +515,11 @@ xpc_activating(void *__partid)
|
|
xpc_allow_hb(partid, xpc_vars);
|
|
xpc_allow_hb(partid, xpc_vars);
|
|
xpc_IPI_send_activated(part);
|
|
xpc_IPI_send_activated(part);
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* xpc_partition_up() holds this thread and marks this partition as
|
|
* xpc_partition_up() holds this thread and marks this partition as
|
|
* XPC_P_ACTIVE by calling xpc_hb_mark_active().
|
|
* XPC_P_ACTIVE by calling xpc_hb_mark_active().
|
|
*/
|
|
*/
|
|
- (void) xpc_partition_up(part);
|
|
|
|
|
|
+ (void)xpc_partition_up(part);
|
|
|
|
|
|
xpc_disallow_hb(partid, xpc_vars);
|
|
xpc_disallow_hb(partid, xpc_vars);
|
|
xpc_mark_partition_inactive(part);
|
|
xpc_mark_partition_inactive(part);
|
|
@@ -555,7 +532,6 @@ xpc_activating(void *__partid)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
void
|
|
void
|
|
xpc_activate_partition(struct xpc_partition *part)
|
|
xpc_activate_partition(struct xpc_partition *part)
|
|
{
|
|
{
|
|
@@ -563,7 +539,6 @@ xpc_activate_partition(struct xpc_partition *part)
|
|
unsigned long irq_flags;
|
|
unsigned long irq_flags;
|
|
pid_t pid;
|
|
pid_t pid;
|
|
|
|
|
|
-
|
|
|
|
spin_lock_irqsave(&part->act_lock, irq_flags);
|
|
spin_lock_irqsave(&part->act_lock, irq_flags);
|
|
|
|
|
|
DBUG_ON(part->act_state != XPC_P_INACTIVE);
|
|
DBUG_ON(part->act_state != XPC_P_INACTIVE);
|
|
@@ -573,7 +548,7 @@ xpc_activate_partition(struct xpc_partition *part)
|
|
|
|
|
|
spin_unlock_irqrestore(&part->act_lock, irq_flags);
|
|
spin_unlock_irqrestore(&part->act_lock, irq_flags);
|
|
|
|
|
|
- pid = kernel_thread(xpc_activating, (void *) ((u64) partid), 0);
|
|
|
|
|
|
+ pid = kernel_thread(xpc_activating, (void *)((u64)partid), 0);
|
|
|
|
|
|
if (unlikely(pid <= 0)) {
|
|
if (unlikely(pid <= 0)) {
|
|
spin_lock_irqsave(&part->act_lock, irq_flags);
|
|
spin_lock_irqsave(&part->act_lock, irq_flags);
|
|
@@ -583,7 +558,6 @@ xpc_activate_partition(struct xpc_partition *part)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
|
|
* Handle the receipt of a SGI_XPC_NOTIFY IRQ by seeing whether the specified
|
|
* partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
|
|
* partition actually sent it. Since SGI_XPC_NOTIFY IRQs may be shared by more
|
|
@@ -603,10 +577,9 @@ xpc_activate_partition(struct xpc_partition *part)
|
|
irqreturn_t
|
|
irqreturn_t
|
|
xpc_notify_IRQ_handler(int irq, void *dev_id)
|
|
xpc_notify_IRQ_handler(int irq, void *dev_id)
|
|
{
|
|
{
|
|
- partid_t partid = (partid_t) (u64) dev_id;
|
|
|
|
|
|
+ partid_t partid = (partid_t) (u64)dev_id;
|
|
struct xpc_partition *part = &xpc_partitions[partid];
|
|
struct xpc_partition *part = &xpc_partitions[partid];
|
|
|
|
|
|
-
|
|
|
|
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
|
|
DBUG_ON(partid <= 0 || partid >= XP_MAX_PARTITIONS);
|
|
|
|
|
|
if (xpc_part_ref(part)) {
|
|
if (xpc_part_ref(part)) {
|
|
@@ -617,7 +590,6 @@ xpc_notify_IRQ_handler(int irq, void *dev_id)
|
|
return IRQ_HANDLED;
|
|
return IRQ_HANDLED;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
|
|
* Check to see if xpc_notify_IRQ_handler() dropped any IPIs on the floor
|
|
* because the write to their associated IPI amo completed after the IRQ/IPI
|
|
* because the write to their associated IPI amo completed after the IRQ/IPI
|
|
@@ -630,13 +602,12 @@ xpc_dropped_IPI_check(struct xpc_partition *part)
|
|
xpc_check_for_channel_activity(part);
|
|
xpc_check_for_channel_activity(part);
|
|
|
|
|
|
part->dropped_IPI_timer.expires = jiffies +
|
|
part->dropped_IPI_timer.expires = jiffies +
|
|
- XPC_P_DROPPED_IPI_WAIT;
|
|
|
|
|
|
+ XPC_P_DROPPED_IPI_WAIT;
|
|
add_timer(&part->dropped_IPI_timer);
|
|
add_timer(&part->dropped_IPI_timer);
|
|
xpc_part_deref(part);
|
|
xpc_part_deref(part);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
void
|
|
void
|
|
xpc_activate_kthreads(struct xpc_channel *ch, int needed)
|
|
xpc_activate_kthreads(struct xpc_channel *ch, int needed)
|
|
{
|
|
{
|
|
@@ -644,7 +615,6 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
|
|
int assigned = atomic_read(&ch->kthreads_assigned);
|
|
int assigned = atomic_read(&ch->kthreads_assigned);
|
|
int wakeup;
|
|
int wakeup;
|
|
|
|
|
|
-
|
|
|
|
DBUG_ON(needed <= 0);
|
|
DBUG_ON(needed <= 0);
|
|
|
|
|
|
if (idle > 0) {
|
|
if (idle > 0) {
|
|
@@ -676,7 +646,6 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
|
|
xpc_create_kthreads(ch, needed, 0);
|
|
xpc_create_kthreads(ch, needed, 0);
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* This function is where XPC's kthreads wait for messages to deliver.
|
|
* This function is where XPC's kthreads wait for messages to deliver.
|
|
*/
|
|
*/
|
|
@@ -686,15 +655,14 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
|
|
do {
|
|
do {
|
|
/* deliver messages to their intended recipients */
|
|
/* deliver messages to their intended recipients */
|
|
|
|
|
|
- while ((volatile s64) ch->w_local_GP.get <
|
|
|
|
- (volatile s64) ch->w_remote_GP.put &&
|
|
|
|
- !((volatile u32) ch->flags &
|
|
|
|
- XPC_C_DISCONNECTING)) {
|
|
|
|
|
|
+ while ((volatile s64)ch->w_local_GP.get <
|
|
|
|
+ (volatile s64)ch->w_remote_GP.put &&
|
|
|
|
+ !((volatile u32)ch->flags & XPC_C_DISCONNECTING)) {
|
|
xpc_deliver_msg(ch);
|
|
xpc_deliver_msg(ch);
|
|
}
|
|
}
|
|
|
|
|
|
if (atomic_inc_return(&ch->kthreads_idle) >
|
|
if (atomic_inc_return(&ch->kthreads_idle) >
|
|
- ch->kthreads_idle_limit) {
|
|
|
|
|
|
+ ch->kthreads_idle_limit) {
|
|
/* too many idle kthreads on this channel */
|
|
/* too many idle kthreads on this channel */
|
|
atomic_dec(&ch->kthreads_idle);
|
|
atomic_dec(&ch->kthreads_idle);
|
|
break;
|
|
break;
|
|
@@ -703,18 +671,20 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
|
|
dev_dbg(xpc_chan, "idle kthread calling "
|
|
dev_dbg(xpc_chan, "idle kthread calling "
|
|
"wait_event_interruptible_exclusive()\n");
|
|
"wait_event_interruptible_exclusive()\n");
|
|
|
|
|
|
- (void) wait_event_interruptible_exclusive(ch->idle_wq,
|
|
|
|
- ((volatile s64) ch->w_local_GP.get <
|
|
|
|
- (volatile s64) ch->w_remote_GP.put ||
|
|
|
|
- ((volatile u32) ch->flags &
|
|
|
|
- XPC_C_DISCONNECTING)));
|
|
|
|
|
|
+ (void)wait_event_interruptible_exclusive(ch->idle_wq,
|
|
|
|
+ ((volatile s64)ch->
|
|
|
|
+ w_local_GP.get <
|
|
|
|
+ (volatile s64)ch->
|
|
|
|
+ w_remote_GP.put ||
|
|
|
|
+ ((volatile u32)ch->
|
|
|
|
+ flags &
|
|
|
|
+ XPC_C_DISCONNECTING)));
|
|
|
|
|
|
atomic_dec(&ch->kthreads_idle);
|
|
atomic_dec(&ch->kthreads_idle);
|
|
|
|
|
|
- } while (!((volatile u32) ch->flags & XPC_C_DISCONNECTING));
|
|
|
|
|
|
+ } while (!((volatile u32)ch->flags & XPC_C_DISCONNECTING));
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
static int
|
|
static int
|
|
xpc_daemonize_kthread(void *args)
|
|
xpc_daemonize_kthread(void *args)
|
|
{
|
|
{
|
|
@@ -725,7 +695,6 @@ xpc_daemonize_kthread(void *args)
|
|
int n_needed;
|
|
int n_needed;
|
|
unsigned long irq_flags;
|
|
unsigned long irq_flags;
|
|
|
|
|
|
-
|
|
|
|
daemonize("xpc%02dc%d", partid, ch_number);
|
|
daemonize("xpc%02dc%d", partid, ch_number);
|
|
|
|
|
|
dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
|
|
dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
|
|
@@ -756,8 +725,7 @@ xpc_daemonize_kthread(void *args)
|
|
* need one less than total #of messages to deliver.
|
|
* need one less than total #of messages to deliver.
|
|
*/
|
|
*/
|
|
n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
|
|
n_needed = ch->w_remote_GP.put - ch->w_local_GP.get - 1;
|
|
- if (n_needed > 0 &&
|
|
|
|
- !(ch->flags & XPC_C_DISCONNECTING)) {
|
|
|
|
|
|
+ if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING)) {
|
|
xpc_activate_kthreads(ch, n_needed);
|
|
xpc_activate_kthreads(ch, n_needed);
|
|
}
|
|
}
|
|
} else {
|
|
} else {
|
|
@@ -771,7 +739,7 @@ xpc_daemonize_kthread(void *args)
|
|
|
|
|
|
spin_lock_irqsave(&ch->lock, irq_flags);
|
|
spin_lock_irqsave(&ch->lock, irq_flags);
|
|
if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
|
|
if ((ch->flags & XPC_C_CONNECTEDCALLOUT_MADE) &&
|
|
- !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
|
|
|
|
|
|
+ !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) {
|
|
ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
|
|
ch->flags |= XPC_C_DISCONNECTINGCALLOUT;
|
|
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
|
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
|
|
|
|
|
@@ -798,7 +766,6 @@ xpc_daemonize_kthread(void *args)
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* For each partition that XPC has established communications with, there is
|
|
* For each partition that XPC has established communications with, there is
|
|
* a minimum of one kernel thread assigned to perform any operation that
|
|
* a minimum of one kernel thread assigned to perform any operation that
|
|
@@ -813,14 +780,13 @@ xpc_daemonize_kthread(void *args)
|
|
*/
|
|
*/
|
|
void
|
|
void
|
|
xpc_create_kthreads(struct xpc_channel *ch, int needed,
|
|
xpc_create_kthreads(struct xpc_channel *ch, int needed,
|
|
- int ignore_disconnecting)
|
|
|
|
|
|
+ int ignore_disconnecting)
|
|
{
|
|
{
|
|
unsigned long irq_flags;
|
|
unsigned long irq_flags;
|
|
pid_t pid;
|
|
pid_t pid;
|
|
u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
|
|
u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
|
|
struct xpc_partition *part = &xpc_partitions[ch->partid];
|
|
struct xpc_partition *part = &xpc_partitions[ch->partid];
|
|
|
|
|
|
-
|
|
|
|
while (needed-- > 0) {
|
|
while (needed-- > 0) {
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -832,7 +798,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
|
|
if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
|
|
if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
|
|
/* kthreads assigned had gone to zero */
|
|
/* kthreads assigned had gone to zero */
|
|
BUG_ON(!(ch->flags &
|
|
BUG_ON(!(ch->flags &
|
|
- XPC_C_DISCONNECTINGCALLOUT_MADE));
|
|
|
|
|
|
+ XPC_C_DISCONNECTINGCALLOUT_MADE));
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -843,10 +809,10 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
|
|
if (atomic_inc_return(&part->nchannels_engaged) == 1)
|
|
if (atomic_inc_return(&part->nchannels_engaged) == 1)
|
|
xpc_mark_partition_engaged(part);
|
|
xpc_mark_partition_engaged(part);
|
|
}
|
|
}
|
|
- (void) xpc_part_ref(part);
|
|
|
|
|
|
+ (void)xpc_part_ref(part);
|
|
xpc_msgqueue_ref(ch);
|
|
xpc_msgqueue_ref(ch);
|
|
|
|
|
|
- pid = kernel_thread(xpc_daemonize_kthread, (void *) args, 0);
|
|
|
|
|
|
+ pid = kernel_thread(xpc_daemonize_kthread, (void *)args, 0);
|
|
if (pid < 0) {
|
|
if (pid < 0) {
|
|
/* the fork failed */
|
|
/* the fork failed */
|
|
|
|
|
|
@@ -869,7 +835,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
|
|
xpc_part_deref(part);
|
|
xpc_part_deref(part);
|
|
|
|
|
|
if (atomic_read(&ch->kthreads_assigned) <
|
|
if (atomic_read(&ch->kthreads_assigned) <
|
|
- ch->kthreads_idle_limit) {
|
|
|
|
|
|
+ ch->kthreads_idle_limit) {
|
|
/*
|
|
/*
|
|
* Flag this as an error only if we have an
|
|
* Flag this as an error only if we have an
|
|
* insufficient #of kthreads for the channel
|
|
* insufficient #of kthreads for the channel
|
|
@@ -877,7 +843,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
|
|
*/
|
|
*/
|
|
spin_lock_irqsave(&ch->lock, irq_flags);
|
|
spin_lock_irqsave(&ch->lock, irq_flags);
|
|
XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
|
|
XPC_DISCONNECT_CHANNEL(ch, xpcLackOfResources,
|
|
- &irq_flags);
|
|
|
|
|
|
+ &irq_flags);
|
|
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
|
spin_unlock_irqrestore(&ch->lock, irq_flags);
|
|
}
|
|
}
|
|
break;
|
|
break;
|
|
@@ -887,7 +853,6 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
void
|
|
void
|
|
xpc_disconnect_wait(int ch_number)
|
|
xpc_disconnect_wait(int ch_number)
|
|
{
|
|
{
|
|
@@ -897,7 +862,6 @@ xpc_disconnect_wait(int ch_number)
|
|
struct xpc_channel *ch;
|
|
struct xpc_channel *ch;
|
|
int wakeup_channel_mgr;
|
|
int wakeup_channel_mgr;
|
|
|
|
|
|
-
|
|
|
|
/* now wait for all callouts to the caller's function to cease */
|
|
/* now wait for all callouts to the caller's function to cease */
|
|
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
|
|
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
|
|
part = &xpc_partitions[partid];
|
|
part = &xpc_partitions[partid];
|
|
@@ -923,7 +887,8 @@ xpc_disconnect_wait(int ch_number)
|
|
if (part->act_state != XPC_P_DEACTIVATING) {
|
|
if (part->act_state != XPC_P_DEACTIVATING) {
|
|
spin_lock(&part->IPI_lock);
|
|
spin_lock(&part->IPI_lock);
|
|
XPC_SET_IPI_FLAGS(part->local_IPI_amo,
|
|
XPC_SET_IPI_FLAGS(part->local_IPI_amo,
|
|
- ch->number, ch->delayed_IPI_flags);
|
|
|
|
|
|
+ ch->number,
|
|
|
|
+ ch->delayed_IPI_flags);
|
|
spin_unlock(&part->IPI_lock);
|
|
spin_unlock(&part->IPI_lock);
|
|
wakeup_channel_mgr = 1;
|
|
wakeup_channel_mgr = 1;
|
|
}
|
|
}
|
|
@@ -941,7 +906,6 @@ xpc_disconnect_wait(int ch_number)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
static void
|
|
static void
|
|
xpc_do_exit(enum xpc_retval reason)
|
|
xpc_do_exit(enum xpc_retval reason)
|
|
{
|
|
{
|
|
@@ -950,7 +914,6 @@ xpc_do_exit(enum xpc_retval reason)
|
|
struct xpc_partition *part;
|
|
struct xpc_partition *part;
|
|
unsigned long printmsg_time, disengage_request_timeout = 0;
|
|
unsigned long printmsg_time, disengage_request_timeout = 0;
|
|
|
|
|
|
-
|
|
|
|
/* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
|
|
/* a 'rmmod XPC' and a 'reboot' cannot both end up here together */
|
|
DBUG_ON(xpc_exiting == 1);
|
|
DBUG_ON(xpc_exiting == 1);
|
|
|
|
|
|
@@ -971,10 +934,8 @@ xpc_do_exit(enum xpc_retval reason)
|
|
/* wait for the heartbeat checker thread to exit */
|
|
/* wait for the heartbeat checker thread to exit */
|
|
wait_for_completion(&xpc_hb_checker_exited);
|
|
wait_for_completion(&xpc_hb_checker_exited);
|
|
|
|
|
|
-
|
|
|
|
/* sleep for a 1/3 of a second or so */
|
|
/* sleep for a 1/3 of a second or so */
|
|
- (void) msleep_interruptible(300);
|
|
|
|
-
|
|
|
|
|
|
+ (void)msleep_interruptible(300);
|
|
|
|
|
|
/* wait for all partitions to become inactive */
|
|
/* wait for all partitions to become inactive */
|
|
|
|
|
|
@@ -988,7 +949,7 @@ xpc_do_exit(enum xpc_retval reason)
|
|
part = &xpc_partitions[partid];
|
|
part = &xpc_partitions[partid];
|
|
|
|
|
|
if (xpc_partition_disengaged(part) &&
|
|
if (xpc_partition_disengaged(part) &&
|
|
- part->act_state == XPC_P_INACTIVE) {
|
|
|
|
|
|
+ part->act_state == XPC_P_INACTIVE) {
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -997,47 +958,46 @@ xpc_do_exit(enum xpc_retval reason)
|
|
XPC_DEACTIVATE_PARTITION(part, reason);
|
|
XPC_DEACTIVATE_PARTITION(part, reason);
|
|
|
|
|
|
if (part->disengage_request_timeout >
|
|
if (part->disengage_request_timeout >
|
|
- disengage_request_timeout) {
|
|
|
|
|
|
+ disengage_request_timeout) {
|
|
disengage_request_timeout =
|
|
disengage_request_timeout =
|
|
- part->disengage_request_timeout;
|
|
|
|
|
|
+ part->disengage_request_timeout;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
if (xpc_partition_engaged(-1UL)) {
|
|
if (xpc_partition_engaged(-1UL)) {
|
|
if (time_after(jiffies, printmsg_time)) {
|
|
if (time_after(jiffies, printmsg_time)) {
|
|
dev_info(xpc_part, "waiting for remote "
|
|
dev_info(xpc_part, "waiting for remote "
|
|
- "partitions to disengage, timeout in "
|
|
|
|
- "%ld seconds\n",
|
|
|
|
- (disengage_request_timeout - jiffies)
|
|
|
|
- / HZ);
|
|
|
|
|
|
+ "partitions to disengage, timeout in "
|
|
|
|
+ "%ld seconds\n",
|
|
|
|
+ (disengage_request_timeout - jiffies)
|
|
|
|
+ / HZ);
|
|
printmsg_time = jiffies +
|
|
printmsg_time = jiffies +
|
|
- (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
|
|
|
|
|
|
+ (XPC_DISENGAGE_PRINTMSG_INTERVAL * HZ);
|
|
printed_waiting_msg = 1;
|
|
printed_waiting_msg = 1;
|
|
}
|
|
}
|
|
|
|
|
|
} else if (active_part_count > 0) {
|
|
} else if (active_part_count > 0) {
|
|
if (printed_waiting_msg) {
|
|
if (printed_waiting_msg) {
|
|
dev_info(xpc_part, "waiting for local partition"
|
|
dev_info(xpc_part, "waiting for local partition"
|
|
- " to disengage\n");
|
|
|
|
|
|
+ " to disengage\n");
|
|
printed_waiting_msg = 0;
|
|
printed_waiting_msg = 0;
|
|
}
|
|
}
|
|
|
|
|
|
} else {
|
|
} else {
|
|
if (!xpc_disengage_request_timedout) {
|
|
if (!xpc_disengage_request_timedout) {
|
|
dev_info(xpc_part, "all partitions have "
|
|
dev_info(xpc_part, "all partitions have "
|
|
- "disengaged\n");
|
|
|
|
|
|
+ "disengaged\n");
|
|
}
|
|
}
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
|
|
|
|
/* sleep for a 1/3 of a second or so */
|
|
/* sleep for a 1/3 of a second or so */
|
|
- (void) msleep_interruptible(300);
|
|
|
|
|
|
+ (void)msleep_interruptible(300);
|
|
|
|
|
|
} while (1);
|
|
} while (1);
|
|
|
|
|
|
DBUG_ON(xpc_partition_engaged(-1UL));
|
|
DBUG_ON(xpc_partition_engaged(-1UL));
|
|
|
|
|
|
-
|
|
|
|
/* indicate to others that our reserved page is uninitialized */
|
|
/* indicate to others that our reserved page is uninitialized */
|
|
xpc_rsvd_page->vars_pa = 0;
|
|
xpc_rsvd_page->vars_pa = 0;
|
|
|
|
|
|
@@ -1047,16 +1007,15 @@ xpc_do_exit(enum xpc_retval reason)
|
|
|
|
|
|
if (reason == xpcUnloading) {
|
|
if (reason == xpcUnloading) {
|
|
/* take ourselves off of the reboot_notifier_list */
|
|
/* take ourselves off of the reboot_notifier_list */
|
|
- (void) unregister_reboot_notifier(&xpc_reboot_notifier);
|
|
|
|
|
|
+ (void)unregister_reboot_notifier(&xpc_reboot_notifier);
|
|
|
|
|
|
/* take ourselves off of the die_notifier list */
|
|
/* take ourselves off of the die_notifier list */
|
|
- (void) unregister_die_notifier(&xpc_die_notifier);
|
|
|
|
|
|
+ (void)unregister_die_notifier(&xpc_die_notifier);
|
|
}
|
|
}
|
|
|
|
|
|
/* close down protections for IPI operations */
|
|
/* close down protections for IPI operations */
|
|
xpc_restrict_IPI_ops();
|
|
xpc_restrict_IPI_ops();
|
|
|
|
|
|
-
|
|
|
|
/* clear the interface to XPC's functions */
|
|
/* clear the interface to XPC's functions */
|
|
xpc_clear_interface();
|
|
xpc_clear_interface();
|
|
|
|
|
|
@@ -1067,7 +1026,6 @@ xpc_do_exit(enum xpc_retval reason)
|
|
kfree(xpc_remote_copy_buffer_base);
|
|
kfree(xpc_remote_copy_buffer_base);
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* This function is called when the system is being rebooted.
|
|
* This function is called when the system is being rebooted.
|
|
*/
|
|
*/
|
|
@@ -1076,7 +1034,6 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
|
|
{
|
|
{
|
|
enum xpc_retval reason;
|
|
enum xpc_retval reason;
|
|
|
|
|
|
-
|
|
|
|
switch (event) {
|
|
switch (event) {
|
|
case SYS_RESTART:
|
|
case SYS_RESTART:
|
|
reason = xpcSystemReboot;
|
|
reason = xpcSystemReboot;
|
|
@@ -1095,7 +1052,6 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
|
|
return NOTIFY_DONE;
|
|
return NOTIFY_DONE;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Notify other partitions to disengage from all references to our memory.
|
|
* Notify other partitions to disengage from all references to our memory.
|
|
*/
|
|
*/
|
|
@@ -1107,17 +1063,15 @@ xpc_die_disengage(void)
|
|
unsigned long engaged;
|
|
unsigned long engaged;
|
|
long time, printmsg_time, disengage_request_timeout;
|
|
long time, printmsg_time, disengage_request_timeout;
|
|
|
|
|
|
-
|
|
|
|
/* keep xpc_hb_checker thread from doing anything (just in case) */
|
|
/* keep xpc_hb_checker thread from doing anything (just in case) */
|
|
xpc_exiting = 1;
|
|
xpc_exiting = 1;
|
|
|
|
|
|
- xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */
|
|
|
|
|
|
+ xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */
|
|
|
|
|
|
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
|
|
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
|
|
part = &xpc_partitions[partid];
|
|
part = &xpc_partitions[partid];
|
|
|
|
|
|
- if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->
|
|
|
|
- remote_vars_version)) {
|
|
|
|
|
|
+ if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->remote_vars_version)) {
|
|
|
|
|
|
/* just in case it was left set by an earlier XPC */
|
|
/* just in case it was left set by an earlier XPC */
|
|
xpc_clear_partition_engaged(1UL << partid);
|
|
xpc_clear_partition_engaged(1UL << partid);
|
|
@@ -1125,7 +1079,7 @@ xpc_die_disengage(void)
|
|
}
|
|
}
|
|
|
|
|
|
if (xpc_partition_engaged(1UL << partid) ||
|
|
if (xpc_partition_engaged(1UL << partid) ||
|
|
- part->act_state != XPC_P_INACTIVE) {
|
|
|
|
|
|
+ part->act_state != XPC_P_INACTIVE) {
|
|
xpc_request_partition_disengage(part);
|
|
xpc_request_partition_disengage(part);
|
|
xpc_mark_partition_disengaged(part);
|
|
xpc_mark_partition_disengaged(part);
|
|
xpc_IPI_send_disengage(part);
|
|
xpc_IPI_send_disengage(part);
|
|
@@ -1134,9 +1088,9 @@ xpc_die_disengage(void)
|
|
|
|
|
|
time = rtc_time();
|
|
time = rtc_time();
|
|
printmsg_time = time +
|
|
printmsg_time = time +
|
|
- (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
|
|
|
|
|
|
+ (XPC_DISENGAGE_PRINTMSG_INTERVAL * sn_rtc_cycles_per_second);
|
|
disengage_request_timeout = time +
|
|
disengage_request_timeout = time +
|
|
- (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
|
|
|
|
|
|
+ (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
|
|
|
|
|
|
/* wait for all other partitions to disengage from us */
|
|
/* wait for all other partitions to disengage from us */
|
|
|
|
|
|
@@ -1152,8 +1106,8 @@ xpc_die_disengage(void)
|
|
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
|
|
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
|
|
if (engaged & (1UL << partid)) {
|
|
if (engaged & (1UL << partid)) {
|
|
dev_info(xpc_part, "disengage from "
|
|
dev_info(xpc_part, "disengage from "
|
|
- "remote partition %d timed "
|
|
|
|
- "out\n", partid);
|
|
|
|
|
|
+ "remote partition %d timed "
|
|
|
|
+ "out\n", partid);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
break;
|
|
break;
|
|
@@ -1161,17 +1115,16 @@ xpc_die_disengage(void)
|
|
|
|
|
|
if (time >= printmsg_time) {
|
|
if (time >= printmsg_time) {
|
|
dev_info(xpc_part, "waiting for remote partitions to "
|
|
dev_info(xpc_part, "waiting for remote partitions to "
|
|
- "disengage, timeout in %ld seconds\n",
|
|
|
|
- (disengage_request_timeout - time) /
|
|
|
|
- sn_rtc_cycles_per_second);
|
|
|
|
|
|
+ "disengage, timeout in %ld seconds\n",
|
|
|
|
+ (disengage_request_timeout - time) /
|
|
|
|
+ sn_rtc_cycles_per_second);
|
|
printmsg_time = time +
|
|
printmsg_time = time +
|
|
- (XPC_DISENGAGE_PRINTMSG_INTERVAL *
|
|
|
|
- sn_rtc_cycles_per_second);
|
|
|
|
|
|
+ (XPC_DISENGAGE_PRINTMSG_INTERVAL *
|
|
|
|
+ sn_rtc_cycles_per_second);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* This function is called when the system is being restarted or halted due
|
|
* This function is called when the system is being restarted or halted due
|
|
* to some sort of system failure. If this is the case we need to notify the
|
|
* to some sort of system failure. If this is the case we need to notify the
|
|
@@ -1217,7 +1170,6 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
|
|
return NOTIFY_DONE;
|
|
return NOTIFY_DONE;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
int __init
|
|
int __init
|
|
xpc_init(void)
|
|
xpc_init(void)
|
|
{
|
|
{
|
|
@@ -1227,16 +1179,15 @@ xpc_init(void)
|
|
pid_t pid;
|
|
pid_t pid;
|
|
size_t buf_size;
|
|
size_t buf_size;
|
|
|
|
|
|
-
|
|
|
|
if (!ia64_platform_is("sn2")) {
|
|
if (!ia64_platform_is("sn2")) {
|
|
return -ENODEV;
|
|
return -ENODEV;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
buf_size = max(XPC_RP_VARS_SIZE,
|
|
buf_size = max(XPC_RP_VARS_SIZE,
|
|
- XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
|
|
|
|
|
|
+ XPC_RP_HEADER_SIZE + XP_NASID_MASK_BYTES);
|
|
xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
|
|
xpc_remote_copy_buffer = xpc_kmalloc_cacheline_aligned(buf_size,
|
|
- GFP_KERNEL, &xpc_remote_copy_buffer_base);
|
|
|
|
|
|
+ GFP_KERNEL,
|
|
|
|
+ &xpc_remote_copy_buffer_base);
|
|
if (xpc_remote_copy_buffer == NULL)
|
|
if (xpc_remote_copy_buffer == NULL)
|
|
return -ENOMEM;
|
|
return -ENOMEM;
|
|
|
|
|
|
@@ -1256,7 +1207,7 @@ xpc_init(void)
|
|
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
|
|
for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
|
|
part = &xpc_partitions[partid];
|
|
part = &xpc_partitions[partid];
|
|
|
|
|
|
- DBUG_ON((u64) part != L1_CACHE_ALIGN((u64) part));
|
|
|
|
|
|
+ DBUG_ON((u64)part != L1_CACHE_ALIGN((u64)part));
|
|
|
|
|
|
part->act_IRQ_rcvd = 0;
|
|
part->act_IRQ_rcvd = 0;
|
|
spin_lock_init(&part->act_lock);
|
|
spin_lock_init(&part->act_lock);
|
|
@@ -1265,8 +1216,8 @@ xpc_init(void)
|
|
|
|
|
|
init_timer(&part->disengage_request_timer);
|
|
init_timer(&part->disengage_request_timer);
|
|
part->disengage_request_timer.function =
|
|
part->disengage_request_timer.function =
|
|
- xpc_timeout_partition_disengage_request;
|
|
|
|
- part->disengage_request_timer.data = (unsigned long) part;
|
|
|
|
|
|
+ xpc_timeout_partition_disengage_request;
|
|
|
|
+ part->disengage_request_timer.data = (unsigned long)part;
|
|
|
|
|
|
part->setup_state = XPC_P_UNSET;
|
|
part->setup_state = XPC_P_UNSET;
|
|
init_waitqueue_head(&part->teardown_wq);
|
|
init_waitqueue_head(&part->teardown_wq);
|
|
@@ -1292,7 +1243,7 @@ xpc_init(void)
|
|
* but rather immediately process the interrupt.
|
|
* but rather immediately process the interrupt.
|
|
*/
|
|
*/
|
|
ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
|
|
ret = request_irq(SGI_XPC_ACTIVATE, xpc_act_IRQ_handler, 0,
|
|
- "xpc hb", NULL);
|
|
|
|
|
|
+ "xpc hb", NULL);
|
|
if (ret != 0) {
|
|
if (ret != 0) {
|
|
dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
|
|
dev_err(xpc_part, "can't register ACTIVATE IRQ handler, "
|
|
"errno=%d\n", -ret);
|
|
"errno=%d\n", -ret);
|
|
@@ -1327,7 +1278,6 @@ xpc_init(void)
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/* add ourselves to the reboot_notifier_list */
|
|
/* add ourselves to the reboot_notifier_list */
|
|
ret = register_reboot_notifier(&xpc_reboot_notifier);
|
|
ret = register_reboot_notifier(&xpc_reboot_notifier);
|
|
if (ret != 0) {
|
|
if (ret != 0) {
|
|
@@ -1355,10 +1305,10 @@ xpc_init(void)
|
|
xpc_rsvd_page->vars_pa = 0;
|
|
xpc_rsvd_page->vars_pa = 0;
|
|
|
|
|
|
/* take ourselves off of the reboot_notifier_list */
|
|
/* take ourselves off of the reboot_notifier_list */
|
|
- (void) unregister_reboot_notifier(&xpc_reboot_notifier);
|
|
|
|
|
|
+ (void)unregister_reboot_notifier(&xpc_reboot_notifier);
|
|
|
|
|
|
/* take ourselves off of the die_notifier list */
|
|
/* take ourselves off of the die_notifier list */
|
|
- (void) unregister_die_notifier(&xpc_die_notifier);
|
|
|
|
|
|
+ (void)unregister_die_notifier(&xpc_die_notifier);
|
|
|
|
|
|
del_timer_sync(&xpc_hb_timer);
|
|
del_timer_sync(&xpc_hb_timer);
|
|
free_irq(SGI_XPC_ACTIVATE, NULL);
|
|
free_irq(SGI_XPC_ACTIVATE, NULL);
|
|
@@ -1372,7 +1322,6 @@ xpc_init(void)
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/*
|
|
/*
|
|
* Startup a thread that will attempt to discover other partitions to
|
|
* Startup a thread that will attempt to discover other partitions to
|
|
* activate based on info provided by SAL. This new thread is short
|
|
* activate based on info provided by SAL. This new thread is short
|
|
@@ -1389,7 +1338,6 @@ xpc_init(void)
|
|
return -EBUSY;
|
|
return -EBUSY;
|
|
}
|
|
}
|
|
|
|
|
|
-
|
|
|
|
/* set the interface to point at XPC's functions */
|
|
/* set the interface to point at XPC's functions */
|
|
xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
|
|
xpc_set_interface(xpc_initiate_connect, xpc_initiate_disconnect,
|
|
xpc_initiate_allocate, xpc_initiate_send,
|
|
xpc_initiate_allocate, xpc_initiate_send,
|
|
@@ -1398,16 +1346,16 @@ xpc_init(void)
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
-module_init(xpc_init);
|
|
|
|
|
|
|
|
|
|
+module_init(xpc_init);
|
|
|
|
|
|
void __exit
|
|
void __exit
|
|
xpc_exit(void)
|
|
xpc_exit(void)
|
|
{
|
|
{
|
|
xpc_do_exit(xpcUnloading);
|
|
xpc_do_exit(xpcUnloading);
|
|
}
|
|
}
|
|
-module_exit(xpc_exit);
|
|
|
|
|
|
|
|
|
|
+module_exit(xpc_exit);
|
|
|
|
|
|
MODULE_AUTHOR("Silicon Graphics, Inc.");
|
|
MODULE_AUTHOR("Silicon Graphics, Inc.");
|
|
MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
|
|
MODULE_DESCRIPTION("Cross Partition Communication (XPC) support");
|
|
@@ -1415,17 +1363,16 @@ MODULE_LICENSE("GPL");
|
|
|
|
|
|
module_param(xpc_hb_interval, int, 0);
|
|
module_param(xpc_hb_interval, int, 0);
|
|
MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
|
|
MODULE_PARM_DESC(xpc_hb_interval, "Number of seconds between "
|
|
- "heartbeat increments.");
|
|
|
|
|
|
+ "heartbeat increments.");
|
|
|
|
|
|
module_param(xpc_hb_check_interval, int, 0);
|
|
module_param(xpc_hb_check_interval, int, 0);
|
|
MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
|
|
MODULE_PARM_DESC(xpc_hb_check_interval, "Number of seconds between "
|
|
- "heartbeat checks.");
|
|
|
|
|
|
+ "heartbeat checks.");
|
|
|
|
|
|
module_param(xpc_disengage_request_timelimit, int, 0);
|
|
module_param(xpc_disengage_request_timelimit, int, 0);
|
|
MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
|
|
MODULE_PARM_DESC(xpc_disengage_request_timelimit, "Number of seconds to wait "
|
|
- "for disengage request to complete.");
|
|
|
|
|
|
+ "for disengage request to complete.");
|
|
|
|
|
|
module_param(xpc_kdebug_ignore, int, 0);
|
|
module_param(xpc_kdebug_ignore, int, 0);
|
|
MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
|
|
MODULE_PARM_DESC(xpc_kdebug_ignore, "Should lack of heartbeat be ignored by "
|
|
- "other partitions when dropping into kdebug.");
|
|
|
|
-
|
|
|
|
|
|
+ "other partitions when dropping into kdebug.");
|