/*
* Configuration information
@@ -341,7 +341,7 @@ static struct poolinfo {
* (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR generators. ACM
* Transactions on Modeling and Computer Simulation 2(3):179-194.
* Also see M. Matsumoto & Y. Kurita, 1994. Twisted GFSR generators
- * II. ACM Transactions on Mdeling and Computer Simulation 4:254-266)
+ * II. ACM Transactions on Modeling and Computer Simulation 4:254-266)
*
* Thanks to Colin Plumb for suggesting this.
*
@@ -625,23 +625,43 @@ static __u32 *batch_entropy_pool;
static int *batch_entropy_credit;
static int batch_max;
static int batch_head, batch_tail;
-static void batch_entropy_process(void *private_);
-static DECLARE_WORK(batch_work, batch_entropy_process, NULL);
+
+/*
+ * entropy_lock protects these 5 variables and the data they point to.
+ * Is acquired in interrupt context so must be acquired in process or BH
+ * context with interrupts disabled.
+ */
+static spinlock_t entropy_lock = SPIN_LOCK_UNLOCKED;
+
+static void batch_entropy_process(unsigned long private_);
+static DECLARE_TASKLET(batch_work, batch_entropy_process, 0);
+struct entropy_data {
+ __u32 entropy[2];
+ int credit;
+};
/* note: the size must be a power of 2 */
static int __init batch_entropy_init(int size, struct entropy_store *r)
{
- batch_entropy_pool = kmalloc(2*size*sizeof(__u32), GFP_KERNEL);
- if (!batch_entropy_pool)
+ void *pool, *credit;
+
+ pool = kmalloc(2*size*sizeof(__u32), GFP_KERNEL);
+ if (!pool)
return -1;
- batch_entropy_credit =kmalloc(size*sizeof(int), GFP_KERNEL);
- if (!batch_entropy_credit) {
- kfree(batch_entropy_pool);
+ credit = kmalloc(size*sizeof(int), GFP_KERNEL);
+ if (!credit) {
+ kfree(pool);
return -1;
}
+
+ spin_lock_irq(&entropy_lock);
+ batch_entropy_pool = pool;
+ batch_entropy_credit = credit;
batch_head = batch_tail = 0;
batch_max = size;
- batch_work.data = r;
+ batch_work.data = (unsigned long)r;
+ spin_unlock_irq(&entropy_lock);
+
return 0;
}
@@ -649,15 +669,21 @@ static int __init batch_entropy_init(int
* Changes to the entropy data is put into a queue rather than being added to
* the entropy counts directly. This is presumably to avoid doing heavy
* hashing calculations during an interrupt in add_timer_randomness().
- * Instead, the entropy is only added to the pool by keventd.
+ * Instead, the entropy is added to the pool next time we run the tasklets.
*/
void batch_entropy_store(u32 a, u32 b, int num)
{
int new;
+ unsigned long flags;
if (!batch_max)
return;
-
+
+ /*
+ * This function is _probably_ only called in irq context, but
+ * better safe than sorry.
+ */
+ spin_lock_irqsave(&entropy_lock, flags);
batch_entropy_pool[2*batch_head] = a;
batch_entropy_pool[(2*batch_head) + 1] = b;
batch_entropy_credit[batch_head] = num;
@@ -667,11 +693,12 @@ void batch_entropy_store(u32 a, u32 b, i
/*
* Schedule it for the next timer tick:
*/
- schedule_delayed_work(&batch_work, 1);
+ tasklet_schedule(&batch_work);
batch_head = new;
} else {
DEBUG_ENT("batch entropy buffer full\n");
}
+ spin_unlock_irqrestore(&entropy_lock, flags);
}
/*
@@ -679,25 +706,34 @@ void batch_entropy_store(u32 a, u32 b, i
* store (normally random_state). If that store has enough entropy, alternate
* between randomizing the data of the primary and secondary stores.
*/
-static void batch_entropy_process(void *private_)
+static void batch_entropy_process(unsigned long private_)
{
struct entropy_store *r = (struct entropy_store *) private_, *p;
int max_entropy = r->poolinfo.POOLBITS;
+ struct entropy_data cache;