|
@@ -2332,13 +2332,18 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
|
|
|
|
|
|
s = memcg_kmem_get_cache(s, gfpflags);
|
|
|
redo:
|
|
|
-
|
|
|
/*
|
|
|
* Must read kmem_cache cpu data via this cpu ptr. Preemption is
|
|
|
* enabled. We may switch back and forth between cpus while
|
|
|
* reading from one cpu area. That does not matter as long
|
|
|
* as we end up on the original cpu again when doing the cmpxchg.
|
|
|
+ *
|
|
|
+ * Preemption is disabled for the retrieval of the tid because that
|
|
|
+ * must occur from the current processor. We cannot allow rescheduling
|
|
|
+ * on a different processor between the determination of the pointer
|
|
|
+ * and the retrieval of the tid.
|
|
|
*/
|
|
|
+ preempt_disable();
|
|
|
c = __this_cpu_ptr(s->cpu_slab);
|
|
|
|
|
|
/*
|
|
@@ -2348,7 +2353,7 @@ redo:
|
|
|
* linked list in between.
|
|
|
*/
|
|
|
tid = c->tid;
|
|
|
- barrier();
|
|
|
+ preempt_enable();
|
|
|
|
|
|
object = c->freelist;
|
|
|
page = c->page;
|
|
@@ -2595,10 +2600,11 @@ redo:
|
|
|
* data is retrieved via this pointer. If we are on the same cpu
|
|
|
* during the cmpxchg then the free will succedd.
|
|
|
*/
|
|
|
+ preempt_disable();
|
|
|
c = __this_cpu_ptr(s->cpu_slab);
|
|
|
|
|
|
tid = c->tid;
|
|
|
- barrier();
|
|
|
+ preempt_enable();
|
|
|
|
|
|
if (likely(page == c->page)) {
|
|
|
set_freepointer(s, object, c->freelist);
|