|
@@ -169,68 +169,7 @@ static struct notifier_block xpc_die_notifier = {
|
|
.notifier_call = xpc_system_die,
|
|
.notifier_call = xpc_system_die,
|
|
};
|
|
};
|
|
|
|
|
|
-int (*xpc_setup_partitions_sn) (void);
|
|
|
|
-void (*xpc_teardown_partitions_sn) (void);
|
|
|
|
-enum xp_retval (*xpc_get_partition_rsvd_page_pa) (void *buf, u64 *cookie,
|
|
|
|
- unsigned long *rp_pa,
|
|
|
|
- size_t *len);
|
|
|
|
-int (*xpc_setup_rsvd_page_sn) (struct xpc_rsvd_page *rp);
|
|
|
|
-
|
|
|
|
-void (*xpc_allow_hb) (short partid);
|
|
|
|
-void (*xpc_disallow_hb) (short partid);
|
|
|
|
-void (*xpc_disallow_all_hbs) (void);
|
|
|
|
-void (*xpc_heartbeat_init) (void);
|
|
|
|
-void (*xpc_heartbeat_exit) (void);
|
|
|
|
-void (*xpc_increment_heartbeat) (void);
|
|
|
|
-void (*xpc_offline_heartbeat) (void);
|
|
|
|
-void (*xpc_online_heartbeat) (void);
|
|
|
|
-enum xp_retval (*xpc_get_remote_heartbeat) (struct xpc_partition *part);
|
|
|
|
-
|
|
|
|
-enum xp_retval (*xpc_make_first_contact) (struct xpc_partition *part);
|
|
|
|
-void (*xpc_notify_senders_of_disconnect) (struct xpc_channel *ch);
|
|
|
|
-u64 (*xpc_get_chctl_all_flags) (struct xpc_partition *part);
|
|
|
|
-enum xp_retval (*xpc_setup_msg_structures) (struct xpc_channel *ch);
|
|
|
|
-void (*xpc_teardown_msg_structures) (struct xpc_channel *ch);
|
|
|
|
-void (*xpc_process_msg_chctl_flags) (struct xpc_partition *part, int ch_number);
|
|
|
|
-int (*xpc_n_of_deliverable_payloads) (struct xpc_channel *ch);
|
|
|
|
-void *(*xpc_get_deliverable_payload) (struct xpc_channel *ch);
|
|
|
|
-
|
|
|
|
-void (*xpc_request_partition_activation) (struct xpc_rsvd_page *remote_rp,
|
|
|
|
- unsigned long remote_rp_pa,
|
|
|
|
- int nasid);
|
|
|
|
-void (*xpc_request_partition_reactivation) (struct xpc_partition *part);
|
|
|
|
-void (*xpc_request_partition_deactivation) (struct xpc_partition *part);
|
|
|
|
-void (*xpc_cancel_partition_deactivation_request) (struct xpc_partition *part);
|
|
|
|
-
|
|
|
|
-void (*xpc_process_activate_IRQ_rcvd) (void);
|
|
|
|
-enum xp_retval (*xpc_setup_ch_structures_sn) (struct xpc_partition *part);
|
|
|
|
-void (*xpc_teardown_ch_structures_sn) (struct xpc_partition *part);
|
|
|
|
-
|
|
|
|
-void (*xpc_indicate_partition_engaged) (struct xpc_partition *part);
|
|
|
|
-int (*xpc_partition_engaged) (short partid);
|
|
|
|
-int (*xpc_any_partition_engaged) (void);
|
|
|
|
-void (*xpc_indicate_partition_disengaged) (struct xpc_partition *part);
|
|
|
|
-void (*xpc_assume_partition_disengaged) (short partid);
|
|
|
|
-
|
|
|
|
-void (*xpc_send_chctl_closerequest) (struct xpc_channel *ch,
|
|
|
|
- unsigned long *irq_flags);
|
|
|
|
-void (*xpc_send_chctl_closereply) (struct xpc_channel *ch,
|
|
|
|
- unsigned long *irq_flags);
|
|
|
|
-void (*xpc_send_chctl_openrequest) (struct xpc_channel *ch,
|
|
|
|
- unsigned long *irq_flags);
|
|
|
|
-void (*xpc_send_chctl_openreply) (struct xpc_channel *ch,
|
|
|
|
- unsigned long *irq_flags);
|
|
|
|
-void (*xpc_send_chctl_opencomplete) (struct xpc_channel *ch,
|
|
|
|
- unsigned long *irq_flags);
|
|
|
|
-
|
|
|
|
-enum xp_retval (*xpc_save_remote_msgqueue_pa) (struct xpc_channel *ch,
|
|
|
|
- unsigned long msgqueue_pa);
|
|
|
|
-
|
|
|
|
-enum xp_retval (*xpc_send_payload) (struct xpc_channel *ch, u32 flags,
|
|
|
|
- void *payload, u16 payload_size,
|
|
|
|
- u8 notify_type, xpc_notify_func func,
|
|
|
|
- void *key);
|
|
|
|
-void (*xpc_received_payload) (struct xpc_channel *ch, void *payload);
|
|
|
|
|
|
+struct xpc_arch_operations xpc_arch_ops;
|
|
|
|
|
|
/*
|
|
/*
|
|
* Timer function to enforce the timelimit on the partition disengage.
|
|
* Timer function to enforce the timelimit on the partition disengage.
|
|
@@ -245,7 +184,7 @@ xpc_timeout_partition_disengage(unsigned long data)
|
|
(void)xpc_partition_disengaged(part);
|
|
(void)xpc_partition_disengaged(part);
|
|
|
|
|
|
DBUG_ON(part->disengage_timeout != 0);
|
|
DBUG_ON(part->disengage_timeout != 0);
|
|
- DBUG_ON(xpc_partition_engaged(XPC_PARTID(part)));
|
|
|
|
|
|
+ DBUG_ON(xpc_arch_ops.partition_engaged(XPC_PARTID(part)));
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -256,7 +195,7 @@ xpc_timeout_partition_disengage(unsigned long data)
|
|
static void
|
|
static void
|
|
xpc_hb_beater(unsigned long dummy)
|
|
xpc_hb_beater(unsigned long dummy)
|
|
{
|
|
{
|
|
- xpc_increment_heartbeat();
|
|
|
|
|
|
+ xpc_arch_ops.increment_heartbeat();
|
|
|
|
|
|
if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
|
|
if (time_is_before_eq_jiffies(xpc_hb_check_timeout))
|
|
wake_up_interruptible(&xpc_activate_IRQ_wq);
|
|
wake_up_interruptible(&xpc_activate_IRQ_wq);
|
|
@@ -268,7 +207,7 @@ xpc_hb_beater(unsigned long dummy)
|
|
static void
|
|
static void
|
|
xpc_start_hb_beater(void)
|
|
xpc_start_hb_beater(void)
|
|
{
|
|
{
|
|
- xpc_heartbeat_init();
|
|
|
|
|
|
+ xpc_arch_ops.heartbeat_init();
|
|
init_timer(&xpc_hb_timer);
|
|
init_timer(&xpc_hb_timer);
|
|
xpc_hb_timer.function = xpc_hb_beater;
|
|
xpc_hb_timer.function = xpc_hb_beater;
|
|
xpc_hb_beater(0);
|
|
xpc_hb_beater(0);
|
|
@@ -278,7 +217,7 @@ static void
|
|
xpc_stop_hb_beater(void)
|
|
xpc_stop_hb_beater(void)
|
|
{
|
|
{
|
|
del_timer_sync(&xpc_hb_timer);
|
|
del_timer_sync(&xpc_hb_timer);
|
|
- xpc_heartbeat_exit();
|
|
|
|
|
|
+ xpc_arch_ops.heartbeat_exit();
|
|
}
|
|
}
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -307,7 +246,7 @@ xpc_check_remote_hb(void)
|
|
continue;
|
|
continue;
|
|
}
|
|
}
|
|
|
|
|
|
- ret = xpc_get_remote_heartbeat(part);
|
|
|
|
|
|
+ ret = xpc_arch_ops.get_remote_heartbeat(part);
|
|
if (ret != xpSuccess)
|
|
if (ret != xpSuccess)
|
|
XPC_DEACTIVATE_PARTITION(part, ret);
|
|
XPC_DEACTIVATE_PARTITION(part, ret);
|
|
}
|
|
}
|
|
@@ -358,7 +297,7 @@ xpc_hb_checker(void *ignore)
|
|
force_IRQ = 0;
|
|
force_IRQ = 0;
|
|
dev_dbg(xpc_part, "processing activate IRQs "
|
|
dev_dbg(xpc_part, "processing activate IRQs "
|
|
"received\n");
|
|
"received\n");
|
|
- xpc_process_activate_IRQ_rcvd();
|
|
|
|
|
|
+ xpc_arch_ops.process_activate_IRQ_rcvd();
|
|
}
|
|
}
|
|
|
|
|
|
/* wait for IRQ or timeout */
|
|
/* wait for IRQ or timeout */
|
|
@@ -533,7 +472,7 @@ xpc_setup_ch_structures(struct xpc_partition *part)
|
|
init_waitqueue_head(&ch->idle_wq);
|
|
init_waitqueue_head(&ch->idle_wq);
|
|
}
|
|
}
|
|
|
|
|
|
- ret = xpc_setup_ch_structures_sn(part);
|
|
|
|
|
|
+ ret = xpc_arch_ops.setup_ch_structures(part);
|
|
if (ret != xpSuccess)
|
|
if (ret != xpSuccess)
|
|
goto out_2;
|
|
goto out_2;
|
|
|
|
|
|
@@ -577,7 +516,7 @@ xpc_teardown_ch_structures(struct xpc_partition *part)
|
|
|
|
|
|
/* now we can begin tearing down the infrastructure */
|
|
/* now we can begin tearing down the infrastructure */
|
|
|
|
|
|
- xpc_teardown_ch_structures_sn(part);
|
|
|
|
|
|
+ xpc_arch_ops.teardown_ch_structures(part);
|
|
|
|
|
|
kfree(part->remote_openclose_args_base);
|
|
kfree(part->remote_openclose_args_base);
|
|
part->remote_openclose_args = NULL;
|
|
part->remote_openclose_args = NULL;
|
|
@@ -625,12 +564,12 @@ xpc_activating(void *__partid)
|
|
|
|
|
|
dev_dbg(xpc_part, "activating partition %d\n", partid);
|
|
dev_dbg(xpc_part, "activating partition %d\n", partid);
|
|
|
|
|
|
- xpc_allow_hb(partid);
|
|
|
|
|
|
+ xpc_arch_ops.allow_hb(partid);
|
|
|
|
|
|
if (xpc_setup_ch_structures(part) == xpSuccess) {
|
|
if (xpc_setup_ch_structures(part) == xpSuccess) {
|
|
(void)xpc_part_ref(part); /* this will always succeed */
|
|
(void)xpc_part_ref(part); /* this will always succeed */
|
|
|
|
|
|
- if (xpc_make_first_contact(part) == xpSuccess) {
|
|
|
|
|
|
+ if (xpc_arch_ops.make_first_contact(part) == xpSuccess) {
|
|
xpc_mark_partition_active(part);
|
|
xpc_mark_partition_active(part);
|
|
xpc_channel_mgr(part);
|
|
xpc_channel_mgr(part);
|
|
/* won't return until partition is deactivating */
|
|
/* won't return until partition is deactivating */
|
|
@@ -640,12 +579,12 @@ xpc_activating(void *__partid)
|
|
xpc_teardown_ch_structures(part);
|
|
xpc_teardown_ch_structures(part);
|
|
}
|
|
}
|
|
|
|
|
|
- xpc_disallow_hb(partid);
|
|
|
|
|
|
+ xpc_arch_ops.disallow_hb(partid);
|
|
xpc_mark_partition_inactive(part);
|
|
xpc_mark_partition_inactive(part);
|
|
|
|
|
|
if (part->reason == xpReactivating) {
|
|
if (part->reason == xpReactivating) {
|
|
/* interrupting ourselves results in activating partition */
|
|
/* interrupting ourselves results in activating partition */
|
|
- xpc_request_partition_reactivation(part);
|
|
|
|
|
|
+ xpc_arch_ops.request_partition_reactivation(part);
|
|
}
|
|
}
|
|
|
|
|
|
return 0;
|
|
return 0;
|
|
@@ -718,10 +657,13 @@ xpc_activate_kthreads(struct xpc_channel *ch, int needed)
|
|
static void
|
|
static void
|
|
xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
|
|
xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
|
|
{
|
|
{
|
|
|
|
+ int (*n_of_deliverable_payloads) (struct xpc_channel *) =
|
|
|
|
+ xpc_arch_ops.n_of_deliverable_payloads;
|
|
|
|
+
|
|
do {
|
|
do {
|
|
/* deliver messages to their intended recipients */
|
|
/* deliver messages to their intended recipients */
|
|
|
|
|
|
- while (xpc_n_of_deliverable_payloads(ch) > 0 &&
|
|
|
|
|
|
+ while (n_of_deliverable_payloads(ch) > 0 &&
|
|
!(ch->flags & XPC_C_DISCONNECTING)) {
|
|
!(ch->flags & XPC_C_DISCONNECTING)) {
|
|
xpc_deliver_payload(ch);
|
|
xpc_deliver_payload(ch);
|
|
}
|
|
}
|
|
@@ -737,7 +679,7 @@ xpc_kthread_waitmsgs(struct xpc_partition *part, struct xpc_channel *ch)
|
|
"wait_event_interruptible_exclusive()\n");
|
|
"wait_event_interruptible_exclusive()\n");
|
|
|
|
|
|
(void)wait_event_interruptible_exclusive(ch->idle_wq,
|
|
(void)wait_event_interruptible_exclusive(ch->idle_wq,
|
|
- (xpc_n_of_deliverable_payloads(ch) > 0 ||
|
|
|
|
|
|
+ (n_of_deliverable_payloads(ch) > 0 ||
|
|
(ch->flags & XPC_C_DISCONNECTING)));
|
|
(ch->flags & XPC_C_DISCONNECTING)));
|
|
|
|
|
|
atomic_dec(&ch->kthreads_idle);
|
|
atomic_dec(&ch->kthreads_idle);
|
|
@@ -754,6 +696,8 @@ xpc_kthread_start(void *args)
|
|
struct xpc_channel *ch;
|
|
struct xpc_channel *ch;
|
|
int n_needed;
|
|
int n_needed;
|
|
unsigned long irq_flags;
|
|
unsigned long irq_flags;
|
|
|
|
+ int (*n_of_deliverable_payloads) (struct xpc_channel *) =
|
|
|
|
+ xpc_arch_ops.n_of_deliverable_payloads;
|
|
|
|
|
|
dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
|
|
dev_dbg(xpc_chan, "kthread starting, partid=%d, channel=%d\n",
|
|
partid, ch_number);
|
|
partid, ch_number);
|
|
@@ -782,7 +726,7 @@ xpc_kthread_start(void *args)
|
|
* additional kthreads to help deliver them. We only
|
|
* additional kthreads to help deliver them. We only
|
|
* need one less than total #of messages to deliver.
|
|
* need one less than total #of messages to deliver.
|
|
*/
|
|
*/
|
|
- n_needed = xpc_n_of_deliverable_payloads(ch) - 1;
|
|
|
|
|
|
+ n_needed = n_of_deliverable_payloads(ch) - 1;
|
|
if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
|
|
if (n_needed > 0 && !(ch->flags & XPC_C_DISCONNECTING))
|
|
xpc_activate_kthreads(ch, n_needed);
|
|
xpc_activate_kthreads(ch, n_needed);
|
|
|
|
|
|
@@ -810,7 +754,7 @@ xpc_kthread_start(void *args)
|
|
|
|
|
|
if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
|
|
if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
|
|
atomic_dec_return(&part->nchannels_engaged) == 0) {
|
|
atomic_dec_return(&part->nchannels_engaged) == 0) {
|
|
- xpc_indicate_partition_disengaged(part);
|
|
|
|
|
|
+ xpc_arch_ops.indicate_partition_disengaged(part);
|
|
}
|
|
}
|
|
|
|
|
|
xpc_msgqueue_deref(ch);
|
|
xpc_msgqueue_deref(ch);
|
|
@@ -842,6 +786,8 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
|
|
u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
|
|
u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
|
|
struct xpc_partition *part = &xpc_partitions[ch->partid];
|
|
struct xpc_partition *part = &xpc_partitions[ch->partid];
|
|
struct task_struct *kthread;
|
|
struct task_struct *kthread;
|
|
|
|
+ void (*indicate_partition_disengaged) (struct xpc_partition *) =
|
|
|
|
+ xpc_arch_ops.indicate_partition_disengaged;
|
|
|
|
|
|
while (needed-- > 0) {
|
|
while (needed-- > 0) {
|
|
|
|
|
|
@@ -863,7 +809,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
|
|
|
|
|
|
} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
|
|
} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
|
|
atomic_inc_return(&part->nchannels_engaged) == 1) {
|
|
atomic_inc_return(&part->nchannels_engaged) == 1) {
|
|
- xpc_indicate_partition_engaged(part);
|
|
|
|
|
|
+ xpc_arch_ops.indicate_partition_engaged(part);
|
|
}
|
|
}
|
|
(void)xpc_part_ref(part);
|
|
(void)xpc_part_ref(part);
|
|
xpc_msgqueue_ref(ch);
|
|
xpc_msgqueue_ref(ch);
|
|
@@ -885,7 +831,7 @@ xpc_create_kthreads(struct xpc_channel *ch, int needed,
|
|
|
|
|
|
if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
|
|
if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
|
|
atomic_dec_return(&part->nchannels_engaged) == 0) {
|
|
atomic_dec_return(&part->nchannels_engaged) == 0) {
|
|
- xpc_indicate_partition_disengaged(part);
|
|
|
|
|
|
+ indicate_partition_disengaged(part);
|
|
}
|
|
}
|
|
xpc_msgqueue_deref(ch);
|
|
xpc_msgqueue_deref(ch);
|
|
xpc_part_deref(part);
|
|
xpc_part_deref(part);
|
|
@@ -998,13 +944,13 @@ xpc_setup_partitions(void)
|
|
atomic_set(&part->references, 0);
|
|
atomic_set(&part->references, 0);
|
|
}
|
|
}
|
|
|
|
|
|
- return xpc_setup_partitions_sn();
|
|
|
|
|
|
+ return xpc_arch_ops.setup_partitions();
|
|
}
|
|
}
|
|
|
|
|
|
static void
|
|
static void
|
|
xpc_teardown_partitions(void)
|
|
xpc_teardown_partitions(void)
|
|
{
|
|
{
|
|
- xpc_teardown_partitions_sn();
|
|
|
|
|
|
+ xpc_arch_ops.teardown_partitions();
|
|
kfree(xpc_partitions);
|
|
kfree(xpc_partitions);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1060,7 +1006,7 @@ xpc_do_exit(enum xp_retval reason)
|
|
disengage_timeout = part->disengage_timeout;
|
|
disengage_timeout = part->disengage_timeout;
|
|
}
|
|
}
|
|
|
|
|
|
- if (xpc_any_partition_engaged()) {
|
|
|
|
|
|
+ if (xpc_arch_ops.any_partition_engaged()) {
|
|
if (time_is_before_jiffies(printmsg_time)) {
|
|
if (time_is_before_jiffies(printmsg_time)) {
|
|
dev_info(xpc_part, "waiting for remote "
|
|
dev_info(xpc_part, "waiting for remote "
|
|
"partitions to deactivate, timeout in "
|
|
"partitions to deactivate, timeout in "
|
|
@@ -1091,7 +1037,7 @@ xpc_do_exit(enum xp_retval reason)
|
|
|
|
|
|
} while (1);
|
|
} while (1);
|
|
|
|
|
|
- DBUG_ON(xpc_any_partition_engaged());
|
|
|
|
|
|
+ DBUG_ON(xpc_arch_ops.any_partition_engaged());
|
|
|
|
|
|
xpc_teardown_rsvd_page();
|
|
xpc_teardown_rsvd_page();
|
|
|
|
|
|
@@ -1156,15 +1102,15 @@ xpc_die_deactivate(void)
|
|
/* keep xpc_hb_checker thread from doing anything (just in case) */
|
|
/* keep xpc_hb_checker thread from doing anything (just in case) */
|
|
xpc_exiting = 1;
|
|
xpc_exiting = 1;
|
|
|
|
|
|
- xpc_disallow_all_hbs(); /*indicate we're deactivated */
|
|
|
|
|
|
+ xpc_arch_ops.disallow_all_hbs(); /*indicate we're deactivated */
|
|
|
|
|
|
for (partid = 0; partid < xp_max_npartitions; partid++) {
|
|
for (partid = 0; partid < xp_max_npartitions; partid++) {
|
|
part = &xpc_partitions[partid];
|
|
part = &xpc_partitions[partid];
|
|
|
|
|
|
- if (xpc_partition_engaged(partid) ||
|
|
|
|
|
|
+ if (xpc_arch_ops.partition_engaged(partid) ||
|
|
part->act_state != XPC_P_AS_INACTIVE) {
|
|
part->act_state != XPC_P_AS_INACTIVE) {
|
|
- xpc_request_partition_deactivation(part);
|
|
|
|
- xpc_indicate_partition_disengaged(part);
|
|
|
|
|
|
+ xpc_arch_ops.request_partition_deactivation(part);
|
|
|
|
+ xpc_arch_ops.indicate_partition_disengaged(part);
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1181,7 +1127,7 @@ xpc_die_deactivate(void)
|
|
wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;
|
|
wait_to_print = XPC_DEACTIVATE_PRINTMSG_INTERVAL * 1000 * 5;
|
|
|
|
|
|
while (1) {
|
|
while (1) {
|
|
- any_engaged = xpc_any_partition_engaged();
|
|
|
|
|
|
+ any_engaged = xpc_arch_ops.any_partition_engaged();
|
|
if (!any_engaged) {
|
|
if (!any_engaged) {
|
|
dev_info(xpc_part, "all partitions have deactivated\n");
|
|
dev_info(xpc_part, "all partitions have deactivated\n");
|
|
break;
|
|
break;
|
|
@@ -1190,7 +1136,7 @@ xpc_die_deactivate(void)
|
|
if (!keep_waiting--) {
|
|
if (!keep_waiting--) {
|
|
for (partid = 0; partid < xp_max_npartitions;
|
|
for (partid = 0; partid < xp_max_npartitions;
|
|
partid++) {
|
|
partid++) {
|
|
- if (xpc_partition_engaged(partid)) {
|
|
|
|
|
|
+ if (xpc_arch_ops.partition_engaged(partid)) {
|
|
dev_info(xpc_part, "deactivate from "
|
|
dev_info(xpc_part, "deactivate from "
|
|
"remote partition %d timed "
|
|
"remote partition %d timed "
|
|
"out\n", partid);
|
|
"out\n", partid);
|
|
@@ -1237,7 +1183,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
|
|
/* fall through */
|
|
/* fall through */
|
|
case DIE_MCA_MONARCH_ENTER:
|
|
case DIE_MCA_MONARCH_ENTER:
|
|
case DIE_INIT_MONARCH_ENTER:
|
|
case DIE_INIT_MONARCH_ENTER:
|
|
- xpc_offline_heartbeat();
|
|
|
|
|
|
+ xpc_arch_ops.offline_heartbeat();
|
|
break;
|
|
break;
|
|
|
|
|
|
case DIE_KDEBUG_LEAVE:
|
|
case DIE_KDEBUG_LEAVE:
|
|
@@ -1248,7 +1194,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
|
|
/* fall through */
|
|
/* fall through */
|
|
case DIE_MCA_MONARCH_LEAVE:
|
|
case DIE_MCA_MONARCH_LEAVE:
|
|
case DIE_INIT_MONARCH_LEAVE:
|
|
case DIE_INIT_MONARCH_LEAVE:
|
|
- xpc_online_heartbeat();
|
|
|
|
|
|
+ xpc_arch_ops.online_heartbeat();
|
|
break;
|
|
break;
|
|
}
|
|
}
|
|
#else
|
|
#else
|