|
@@ -127,19 +127,15 @@
|
|
|
*
|
|
|
* void add_input_randomness(unsigned int type, unsigned int code,
|
|
|
* unsigned int value);
|
|
|
- * void add_interrupt_randomness(int irq);
|
|
|
+ * void add_interrupt_randomness(int irq, int irq_flags);
|
|
|
* void add_disk_randomness(struct gendisk *disk);
|
|
|
*
|
|
|
* add_input_randomness() uses the input layer interrupt timing, as well as
|
|
|
* the event type information from the hardware.
|
|
|
*
|
|
|
- * add_interrupt_randomness() uses the inter-interrupt timing as random
|
|
|
- * inputs to the entropy pool. Note that not all interrupts are good
|
|
|
- * sources of randomness! For example, the timer interrupts is not a
|
|
|
- * good choice, because the periodicity of the interrupts is too
|
|
|
- * regular, and hence predictable to an attacker. Network Interface
|
|
|
- * Controller interrupts are a better measure, since the timing of the
|
|
|
- * NIC interrupts are more unpredictable.
|
|
|
+ * add_interrupt_randomness() uses the interrupt timing as random
|
|
|
+ * inputs to the entropy pool. Using the cycle counters and the irq source
|
|
|
+ * as inputs, it feeds the randomness roughly once a second.
|
|
|
*
|
|
|
* add_disk_randomness() uses what amounts to the seek time of block
|
|
|
* layer request events, on a per-disk_devt basis, as input to the
|
|
@@ -248,6 +244,7 @@
|
|
|
#include <linux/percpu.h>
|
|
|
#include <linux/cryptohash.h>
|
|
|
#include <linux/fips.h>
|
|
|
+#include <linux/ptrace.h>
|
|
|
|
|
|
#ifdef CONFIG_GENERIC_HARDIRQS
|
|
|
# include <linux/irq.h>
|
|
@@ -256,6 +253,7 @@
|
|
|
#include <asm/processor.h>
|
|
|
#include <asm/uaccess.h>
|
|
|
#include <asm/irq.h>
|
|
|
+#include <asm/irq_regs.h>
|
|
|
#include <asm/io.h>
|
|
|
|
|
|
/*
|
|
@@ -421,7 +419,9 @@ struct entropy_store {
|
|
|
spinlock_t lock;
|
|
|
unsigned add_ptr;
|
|
|
int entropy_count;
|
|
|
+ int entropy_total;
|
|
|
int input_rotate;
|
|
|
+ unsigned int initialized:1;
|
|
|
__u8 last_data[EXTRACT_SIZE];
|
|
|
};
|
|
|
|
|
@@ -454,6 +454,10 @@ static struct entropy_store nonblocking_pool = {
|
|
|
.pool = nonblocking_pool_data
|
|
|
};
|
|
|
|
|
|
+static __u32 const twist_table[8] = {
|
|
|
+ 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
|
|
|
+ 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
|
|
|
+
|
|
|
/*
|
|
|
* This function adds bytes into the entropy "pool". It does not
|
|
|
* update the entropy estimate. The caller should call
|
|
@@ -467,9 +471,6 @@ static struct entropy_store nonblocking_pool = {
|
|
|
static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
|
|
|
int nbytes, __u8 out[64])
|
|
|
{
|
|
|
- static __u32 const twist_table[8] = {
|
|
|
- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
|
|
|
- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
|
|
|
unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
|
|
|
int input_rotate;
|
|
|
int wordmask = r->poolinfo->poolwords - 1;
|
|
@@ -528,6 +529,36 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
|
|
|
mix_pool_bytes_extract(r, in, bytes, NULL);
|
|
|
}
|
|
|
|
|
|
+struct fast_pool {
|
|
|
+ __u32 pool[4];
|
|
|
+ unsigned long last;
|
|
|
+ unsigned short count;
|
|
|
+ unsigned char rotate;
|
|
|
+ unsigned char last_timer_intr;
|
|
|
+};
|
|
|
+
|
|
|
+/*
|
|
|
+ * This is a fast mixing routine used by the interrupt randomness
|
|
|
+ * collector. It's hardcoded for an 128 bit pool and assumes that any
|
|
|
+ * locks that might be needed are taken by the caller.
|
|
|
+ */
|
|
|
+static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
|
|
|
+{
|
|
|
+ const char *bytes = in;
|
|
|
+ __u32 w;
|
|
|
+ unsigned i = f->count;
|
|
|
+ unsigned input_rotate = f->rotate;
|
|
|
+
|
|
|
+ while (nbytes--) {
|
|
|
+ w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
|
|
|
+ f->pool[(i + 1) & 3];
|
|
|
+ f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
|
|
|
+ input_rotate += (i++ & 3) ? 7 : 14;
|
|
|
+ }
|
|
|
+ f->count = i;
|
|
|
+ f->rotate = input_rotate;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Credit (or debit) the entropy store with n bits of entropy
|
|
|
*/
|
|
@@ -551,6 +582,12 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits)
|
|
|
entropy_count = r->poolinfo->POOLBITS;
|
|
|
r->entropy_count = entropy_count;
|
|
|
|
|
|
+ if (!r->initialized && nbits > 0) {
|
|
|
+ r->entropy_total += nbits;
|
|
|
+ if (r->entropy_total > 128)
|
|
|
+ r->initialized = 1;
|
|
|
+ }
|
|
|
+
|
|
|
/* should we wake readers? */
|
|
|
if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
|
|
|
wake_up_interruptible(&random_read_wait);
|
|
@@ -700,17 +737,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(add_input_randomness);
|
|
|
|
|
|
-void add_interrupt_randomness(int irq)
|
|
|
+static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
|
|
|
+
|
|
|
+void add_interrupt_randomness(int irq, int irq_flags)
|
|
|
{
|
|
|
- struct timer_rand_state *state;
|
|
|
+ struct entropy_store *r;
|
|
|
+ struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
|
|
|
+ struct pt_regs *regs = get_irq_regs();
|
|
|
+ unsigned long now = jiffies;
|
|
|
+ __u32 input[4], cycles = get_cycles();
|
|
|
+
|
|
|
+ input[0] = cycles ^ jiffies;
|
|
|
+ input[1] = irq;
|
|
|
+ if (regs) {
|
|
|
+ __u64 ip = instruction_pointer(regs);
|
|
|
+ input[2] = ip;
|
|
|
+ input[3] = ip >> 32;
|
|
|
+ }
|
|
|
|
|
|
- state = get_timer_rand_state(irq);
|
|
|
+ fast_mix(fast_pool, input, sizeof(input));
|
|
|
|
|
|
- if (state == NULL)
|
|
|
+ if ((fast_pool->count & 1023) &&
|
|
|
+ !time_after(now, fast_pool->last + HZ))
|
|
|
return;
|
|
|
|
|
|
- DEBUG_ENT("irq event %d\n", irq);
|
|
|
- add_timer_randomness(state, 0x100 + irq);
|
|
|
+ fast_pool->last = now;
|
|
|
+
|
|
|
+ r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
|
|
|
+ mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
|
|
|
+ /*
|
|
|
+ * If we don't have a valid cycle counter, and we see
|
|
|
+ * back-to-back timer interrupts, then skip giving credit for
|
|
|
+ * any entropy.
|
|
|
+ */
|
|
|
+ if (cycles == 0) {
|
|
|
+ if (irq_flags & __IRQF_TIMER) {
|
|
|
+ if (fast_pool->last_timer_intr)
|
|
|
+ return;
|
|
|
+ fast_pool->last_timer_intr = 1;
|
|
|
+ } else
|
|
|
+ fast_pool->last_timer_intr = 0;
|
|
|
+ }
|
|
|
+ credit_entropy_bits(r, 1);
|
|
|
}
|
|
|
|
|
|
#ifdef CONFIG_BLOCK
|
|
@@ -971,6 +1039,7 @@ static void init_std_data(struct entropy_store *r)
|
|
|
|
|
|
spin_lock_irqsave(&r->lock, flags);
|
|
|
r->entropy_count = 0;
|
|
|
+ r->entropy_total = 0;
|
|
|
spin_unlock_irqrestore(&r->lock, flags);
|
|
|
|
|
|
now = ktime_get_real();
|