|
@@ -117,21 +117,6 @@ static struct entry entries[MAX_ENTRIES];
|
|
|
|
|
|
static atomic_t overflow_count;
|
|
|
|
|
|
-static void reset_entries(void)
|
|
|
-{
|
|
|
- nr_entries = 0;
|
|
|
- memset(entries, 0, sizeof(entries));
|
|
|
- atomic_set(&overflow_count, 0);
|
|
|
-}
|
|
|
-
|
|
|
-static struct entry *alloc_entry(void)
|
|
|
-{
|
|
|
- if (nr_entries >= MAX_ENTRIES)
|
|
|
- return NULL;
|
|
|
-
|
|
|
- return entries + nr_entries++;
|
|
|
-}
|
|
|
-
|
|
|
/*
|
|
|
* The entries are in a hash-table, for fast lookup:
|
|
|
*/
|
|
@@ -149,6 +134,22 @@ static struct entry *alloc_entry(void)
|
|
|
|
|
|
static struct entry *tstat_hash_table[TSTAT_HASH_SIZE] __read_mostly;
|
|
|
|
|
|
+static void reset_entries(void)
|
|
|
+{
|
|
|
+ nr_entries = 0;
|
|
|
+ memset(entries, 0, sizeof(entries));
|
|
|
+ memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
|
|
|
+ atomic_set(&overflow_count, 0);
|
|
|
+}
|
|
|
+
|
|
|
+static struct entry *alloc_entry(void)
|
|
|
+{
|
|
|
+ if (nr_entries >= MAX_ENTRIES)
|
|
|
+ return NULL;
|
|
|
+
|
|
|
+ return entries + nr_entries++;
|
|
|
+}
|
|
|
+
|
|
|
static int match_entries(struct entry *entry1, struct entry *entry2)
|
|
|
{
|
|
|
return entry1->timer == entry2->timer &&
|
|
@@ -202,12 +203,15 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
|
|
|
if (curr) {
|
|
|
*curr = *entry;
|
|
|
curr->count = 0;
|
|
|
+ curr->next = NULL;
|
|
|
memcpy(curr->comm, comm, TASK_COMM_LEN);
|
|
|
+
|
|
|
+ smp_mb(); /* Ensure that curr is initialized before insert */
|
|
|
+
|
|
|
if (prev)
|
|
|
prev->next = curr;
|
|
|
else
|
|
|
*head = curr;
|
|
|
- curr->next = NULL;
|
|
|
}
|
|
|
out_unlock:
|
|
|
spin_unlock(&table_lock);
|
|
@@ -360,6 +364,7 @@ static ssize_t tstats_write(struct file *file, const char __user *buf,
|
|
|
if (!active) {
|
|
|
reset_entries();
|
|
|
time_start = ktime_get();
|
|
|
+ smp_mb();
|
|
|
active = 1;
|
|
|
}
|
|
|
break;
|