|
@@ -706,16 +706,11 @@ static void get_counters(const struct xt_table_info *t,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-static int copy_entries_to_user(unsigned int total_size,
|
|
|
- struct arpt_table *table,
|
|
|
- void __user *userptr)
|
|
|
+static inline struct xt_counters *alloc_counters(struct arpt_table *table)
|
|
|
{
|
|
|
- unsigned int off, num, countersize;
|
|
|
- struct arpt_entry *e;
|
|
|
+ unsigned int countersize;
|
|
|
struct xt_counters *counters;
|
|
|
struct xt_table_info *private = table->private;
|
|
|
- int ret = 0;
|
|
|
- void *loc_cpu_entry;
|
|
|
|
|
|
/* We need atomic snapshot of counters: rest doesn't change
|
|
|
* (other than comefrom, which userspace doesn't care
|
|
@@ -725,13 +720,31 @@ static int copy_entries_to_user(unsigned int total_size,
|
|
|
counters = vmalloc_node(countersize, numa_node_id());
|
|
|
|
|
|
if (counters == NULL)
|
|
|
- return -ENOMEM;
|
|
|
+ return ERR_PTR(-ENOMEM);
|
|
|
|
|
|
/* First, sum counters... */
|
|
|
write_lock_bh(&table->lock);
|
|
|
get_counters(private, counters);
|
|
|
write_unlock_bh(&table->lock);
|
|
|
|
|
|
+ return counters;
|
|
|
+}
|
|
|
+
|
|
|
+static int copy_entries_to_user(unsigned int total_size,
|
|
|
+ struct arpt_table *table,
|
|
|
+ void __user *userptr)
|
|
|
+{
|
|
|
+ unsigned int off, num;
|
|
|
+ struct arpt_entry *e;
|
|
|
+ struct xt_counters *counters;
|
|
|
+ struct xt_table_info *private = table->private;
|
|
|
+ int ret = 0;
|
|
|
+ void *loc_cpu_entry;
|
|
|
+
|
|
|
+ counters = alloc_counters(table);
|
|
|
+ if (IS_ERR(counters))
|
|
|
+ return PTR_ERR(counters);
|
|
|
+
|
|
|
loc_cpu_entry = private->entries[raw_smp_processor_id()];
|
|
|
/* ... then copy entire thing ... */
|
|
|
if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
|