|
@@ -67,6 +67,11 @@ struct ip_vs_sync_conn_options {
|
|
struct ip_vs_seq out_seq; /* outgoing seq. struct */
|
|
struct ip_vs_seq out_seq; /* outgoing seq. struct */
|
|
};
|
|
};
|
|
|
|
|
|
|
|
+struct ip_vs_sync_thread_data {
|
|
|
|
+ struct completion *startup;
|
|
|
|
+ int state;
|
|
|
|
+};
|
|
|
|
+
|
|
#define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ)
|
|
#define IP_VS_SYNC_CONN_TIMEOUT (3*60*HZ)
|
|
#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn))
|
|
#define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn))
|
|
#define FULL_CONN_SIZE \
|
|
#define FULL_CONN_SIZE \
|
|
@@ -751,6 +756,7 @@ static int sync_thread(void *startup)
|
|
mm_segment_t oldmm;
|
|
mm_segment_t oldmm;
|
|
int state;
|
|
int state;
|
|
const char *name;
|
|
const char *name;
|
|
|
|
+ struct ip_vs_sync_thread_data *tinfo = startup;
|
|
|
|
|
|
/* increase the module use count */
|
|
/* increase the module use count */
|
|
ip_vs_use_count_inc();
|
|
ip_vs_use_count_inc();
|
|
@@ -789,7 +795,14 @@ static int sync_thread(void *startup)
|
|
add_wait_queue(&sync_wait, &wait);
|
|
add_wait_queue(&sync_wait, &wait);
|
|
|
|
|
|
set_sync_pid(state, current->pid);
|
|
set_sync_pid(state, current->pid);
|
|
- complete((struct completion *)startup);
|
|
|
|
|
|
+ complete(tinfo->startup);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * once we call the completion queue above, we should
|
|
|
|
+ * null out that reference, since its allocated on the
|
|
|
|
+ * stack of the creating kernel thread
|
|
|
|
+ */
|
|
|
|
+ tinfo->startup = NULL;
|
|
|
|
|
|
/* processing master/backup loop here */
|
|
/* processing master/backup loop here */
|
|
if (state == IP_VS_STATE_MASTER)
|
|
if (state == IP_VS_STATE_MASTER)
|
|
@@ -801,6 +814,14 @@ static int sync_thread(void *startup)
|
|
remove_wait_queue(&sync_wait, &wait);
|
|
remove_wait_queue(&sync_wait, &wait);
|
|
|
|
|
|
/* thread exits */
|
|
/* thread exits */
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * If we weren't explicitly stopped, then we
|
|
|
|
+ * exited in error, and should undo our state
|
|
|
|
+ */
|
|
|
|
+ if ((!stop_master_sync) && (!stop_backup_sync))
|
|
|
|
+ ip_vs_sync_state -= tinfo->state;
|
|
|
|
+
|
|
set_sync_pid(state, 0);
|
|
set_sync_pid(state, 0);
|
|
IP_VS_INFO("sync thread stopped!\n");
|
|
IP_VS_INFO("sync thread stopped!\n");
|
|
|
|
|
|
@@ -812,6 +833,11 @@ static int sync_thread(void *startup)
|
|
set_stop_sync(state, 0);
|
|
set_stop_sync(state, 0);
|
|
wake_up(&stop_sync_wait);
|
|
wake_up(&stop_sync_wait);
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * we need to free the structure that was allocated
|
|
|
|
+ * for us in start_sync_thread
|
|
|
|
+ */
|
|
|
|
+ kfree(tinfo);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -838,11 +864,19 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
|
|
{
|
|
{
|
|
DECLARE_COMPLETION_ONSTACK(startup);
|
|
DECLARE_COMPLETION_ONSTACK(startup);
|
|
pid_t pid;
|
|
pid_t pid;
|
|
|
|
+ struct ip_vs_sync_thread_data *tinfo;
|
|
|
|
|
|
if ((state == IP_VS_STATE_MASTER && sync_master_pid) ||
|
|
if ((state == IP_VS_STATE_MASTER && sync_master_pid) ||
|
|
(state == IP_VS_STATE_BACKUP && sync_backup_pid))
|
|
(state == IP_VS_STATE_BACKUP && sync_backup_pid))
|
|
return -EEXIST;
|
|
return -EEXIST;
|
|
|
|
|
|
|
|
+ /*
|
|
|
|
+ * Note that tinfo will be freed in sync_thread on exit
|
|
|
|
+ */
|
|
|
|
+ tinfo = kmalloc(sizeof(struct ip_vs_sync_thread_data), GFP_KERNEL);
|
|
|
|
+ if (!tinfo)
|
|
|
|
+ return -ENOMEM;
|
|
|
|
+
|
|
IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid);
|
|
IP_VS_DBG(7, "%s: pid %d\n", __FUNCTION__, current->pid);
|
|
IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n",
|
|
IP_VS_DBG(7, "Each ip_vs_sync_conn entry need %Zd bytes\n",
|
|
sizeof(struct ip_vs_sync_conn));
|
|
sizeof(struct ip_vs_sync_conn));
|
|
@@ -858,8 +892,11 @@ int start_sync_thread(int state, char *mcast_ifn, __u8 syncid)
|
|
ip_vs_backup_syncid = syncid;
|
|
ip_vs_backup_syncid = syncid;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+ tinfo->state = state;
|
|
|
|
+ tinfo->startup = &startup;
|
|
|
|
+
|
|
repeat:
|
|
repeat:
|
|
- if ((pid = kernel_thread(fork_sync_thread, &startup, 0)) < 0) {
|
|
|
|
|
|
+ if ((pid = kernel_thread(fork_sync_thread, tinfo, 0)) < 0) {
|
|
IP_VS_ERR("could not create fork_sync_thread due to %d... "
|
|
IP_VS_ERR("could not create fork_sync_thread due to %d... "
|
|
"retrying.\n", pid);
|
|
"retrying.\n", pid);
|
|
msleep_interruptible(1000);
|
|
msleep_interruptible(1000);
|