|
@@ -72,6 +72,7 @@
|
|
|
#include <asm/cacheflush.h>
|
|
|
#include <asm/sections.h>
|
|
|
#include <asm/tlbflush.h>
|
|
|
+#include <asm/io.h>
|
|
|
|
|
|
#define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */
|
|
|
#define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */
|
|
@@ -151,7 +152,10 @@ static int pcpu_reserved_chunk_limit;
|
|
|
*
|
|
|
* During allocation, pcpu_alloc_mutex is kept locked all the time and
|
|
|
* pcpu_lock is grabbed and released as necessary. All actual memory
|
|
|
- * allocations are done using GFP_KERNEL with pcpu_lock released.
|
|
|
+ * allocations are done using GFP_KERNEL with pcpu_lock released. In
|
|
|
+ * general, percpu memory can't be allocated with irq off but
|
|
|
+ * irqsave/restore are still used in alloc path so that it can be used
|
|
|
+ * from early init path - sched_init() specifically.
|
|
|
*
|
|
|
* Free path accesses and alters only the index data structures, so it
|
|
|
* can be safely called from atomic context. When memory needs to be
|
|
@@ -350,63 +354,86 @@ static struct pcpu_chunk *pcpu_chunk_addr_search(void *addr)
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * pcpu_extend_area_map - extend area map for allocation
|
|
|
- * @chunk: target chunk
|
|
|
+ * pcpu_need_to_extend - determine whether chunk area map needs to be extended
|
|
|
+ * @chunk: chunk of interest
|
|
|
*
|
|
|
- * Extend area map of @chunk so that it can accomodate an allocation.
|
|
|
- * A single allocation can split an area into three areas, so this
|
|
|
- * function makes sure that @chunk->map has at least two extra slots.
|
|
|
+ * Determine whether area map of @chunk needs to be extended to
|
|
|
+ * accomodate a new allocation.
|
|
|
*
|
|
|
* CONTEXT:
|
|
|
- * pcpu_alloc_mutex, pcpu_lock. pcpu_lock is released and reacquired
|
|
|
- * if area map is extended.
|
|
|
+ * pcpu_lock.
|
|
|
*
|
|
|
* RETURNS:
|
|
|
- * 0 if noop, 1 if successfully extended, -errno on failure.
|
|
|
+ * New target map allocation length if extension is necessary, 0
|
|
|
+ * otherwise.
|
|
|
*/
|
|
|
-static int pcpu_extend_area_map(struct pcpu_chunk *chunk)
|
|
|
- __releases(lock) __acquires(lock)
|
|
|
+static int pcpu_need_to_extend(struct pcpu_chunk *chunk)
|
|
|
{
|
|
|
int new_alloc;
|
|
|
- int *new;
|
|
|
- size_t size;
|
|
|
|
|
|
- /* has enough? */
|
|
|
if (chunk->map_alloc >= chunk->map_used + 2)
|
|
|
return 0;
|
|
|
|
|
|
- spin_unlock_irq(&pcpu_lock);
|
|
|
-
|
|
|
new_alloc = PCPU_DFL_MAP_ALLOC;
|
|
|
while (new_alloc < chunk->map_used + 2)
|
|
|
new_alloc *= 2;
|
|
|
|
|
|
- new = pcpu_mem_alloc(new_alloc * sizeof(new[0]));
|
|
|
- if (!new) {
|
|
|
- spin_lock_irq(&pcpu_lock);
|
|
|
+ return new_alloc;
|
|
|
+}
|
|
|
+
|
|
|
+/**
|
|
|
+ * pcpu_extend_area_map - extend area map of a chunk
|
|
|
+ * @chunk: chunk of interest
|
|
|
+ * @new_alloc: new target allocation length of the area map
|
|
|
+ *
|
|
|
+ * Extend area map of @chunk to have @new_alloc entries.
|
|
|
+ *
|
|
|
+ * CONTEXT:
|
|
|
+ * Does GFP_KERNEL allocation. Grabs and releases pcpu_lock.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * 0 on success, -errno on failure.
|
|
|
+ */
|
|
|
+static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
|
|
|
+{
|
|
|
+ int *old = NULL, *new = NULL;
|
|
|
+ size_t old_size = 0, new_size = new_alloc * sizeof(new[0]);
|
|
|
+ unsigned long flags;
|
|
|
+
|
|
|
+ new = pcpu_mem_alloc(new_size);
|
|
|
+ if (!new)
|
|
|
return -ENOMEM;
|
|
|
- }
|
|
|
|
|
|
- /*
|
|
|
- * Acquire pcpu_lock and switch to new area map. Only free
|
|
|
- * could have happened inbetween, so map_used couldn't have
|
|
|
- * grown.
|
|
|
- */
|
|
|
- spin_lock_irq(&pcpu_lock);
|
|
|
- BUG_ON(new_alloc < chunk->map_used + 2);
|
|
|
+ /* acquire pcpu_lock and switch to new area map */
|
|
|
+ spin_lock_irqsave(&pcpu_lock, flags);
|
|
|
|
|
|
- size = chunk->map_alloc * sizeof(chunk->map[0]);
|
|
|
- memcpy(new, chunk->map, size);
|
|
|
+ if (new_alloc <= chunk->map_alloc)
|
|
|
+ goto out_unlock;
|
|
|
+
|
|
|
+ old_size = chunk->map_alloc * sizeof(chunk->map[0]);
|
|
|
+ memcpy(new, chunk->map, old_size);
|
|
|
|
|
|
/*
|
|
|
* map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
|
|
|
* one of the first chunks and still using static map.
|
|
|
*/
|
|
|
if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
|
|
|
- pcpu_mem_free(chunk->map, size);
|
|
|
+ old = chunk->map;
|
|
|
|
|
|
chunk->map_alloc = new_alloc;
|
|
|
chunk->map = new;
|
|
|
+ new = NULL;
|
|
|
+
|
|
|
+out_unlock:
|
|
|
+ spin_unlock_irqrestore(&pcpu_lock, flags);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * pcpu_mem_free() might end up calling vfree() which uses
|
|
|
+ * IRQ-unsafe lock and thus can't be called under pcpu_lock.
|
|
|
+ */
|
|
|
+ pcpu_mem_free(old, old_size);
|
|
|
+ pcpu_mem_free(new, new_size);
|
|
|
+
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -1045,7 +1072,8 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
|
|
|
static int warn_limit = 10;
|
|
|
struct pcpu_chunk *chunk;
|
|
|
const char *err;
|
|
|
- int slot, off;
|
|
|
+ int slot, off, new_alloc;
|
|
|
+ unsigned long flags;
|
|
|
|
|
|
if (unlikely(!size || size > PCPU_MIN_UNIT_SIZE || align > PAGE_SIZE)) {
|
|
|
WARN(true, "illegal size (%zu) or align (%zu) for "
|
|
@@ -1054,19 +1082,30 @@ static void *pcpu_alloc(size_t size, size_t align, bool reserved)
|
|
|
}
|
|
|
|
|
|
mutex_lock(&pcpu_alloc_mutex);
|
|
|
- spin_lock_irq(&pcpu_lock);
|
|
|
+ spin_lock_irqsave(&pcpu_lock, flags);
|
|
|
|
|
|
/* serve reserved allocations from the reserved chunk if available */
|
|
|
if (reserved && pcpu_reserved_chunk) {
|
|
|
chunk = pcpu_reserved_chunk;
|
|
|
- if (size > chunk->contig_hint ||
|
|
|
- pcpu_extend_area_map(chunk) < 0) {
|
|
|
- err = "failed to extend area map of reserved chunk";
|
|
|
+
|
|
|
+ if (size > chunk->contig_hint) {
|
|
|
+ err = "alloc from reserved chunk failed";
|
|
|
goto fail_unlock;
|
|
|
}
|
|
|
+
|
|
|
+ while ((new_alloc = pcpu_need_to_extend(chunk))) {
|
|
|
+ spin_unlock_irqrestore(&pcpu_lock, flags);
|
|
|
+ if (pcpu_extend_area_map(chunk, new_alloc) < 0) {
|
|
|
+ err = "failed to extend area map of reserved chunk";
|
|
|
+ goto fail_unlock_mutex;
|
|
|
+ }
|
|
|
+ spin_lock_irqsave(&pcpu_lock, flags);
|
|
|
+ }
|
|
|
+
|
|
|
off = pcpu_alloc_area(chunk, size, align);
|
|
|
if (off >= 0)
|
|
|
goto area_found;
|
|
|
+
|
|
|
err = "alloc from reserved chunk failed";
|
|
|
goto fail_unlock;
|
|
|
}
|
|
@@ -1078,14 +1117,20 @@ restart:
|
|
|
if (size > chunk->contig_hint)
|
|
|
continue;
|
|
|
|
|
|
- switch (pcpu_extend_area_map(chunk)) {
|
|
|
- case 0:
|
|
|
- break;
|
|
|
- case 1:
|
|
|
- goto restart; /* pcpu_lock dropped, restart */
|
|
|
- default:
|
|
|
- err = "failed to extend area map";
|
|
|
- goto fail_unlock;
|
|
|
+ new_alloc = pcpu_need_to_extend(chunk);
|
|
|
+ if (new_alloc) {
|
|
|
+ spin_unlock_irqrestore(&pcpu_lock, flags);
|
|
|
+ if (pcpu_extend_area_map(chunk,
|
|
|
+ new_alloc) < 0) {
|
|
|
+ err = "failed to extend area map";
|
|
|
+ goto fail_unlock_mutex;
|
|
|
+ }
|
|
|
+ spin_lock_irqsave(&pcpu_lock, flags);
|
|
|
+ /*
|
|
|
+ * pcpu_lock has been dropped, need to
|
|
|
+ * restart cpu_slot list walking.
|
|
|
+ */
|
|
|
+ goto restart;
|
|
|
}
|
|
|
|
|
|
off = pcpu_alloc_area(chunk, size, align);
|
|
@@ -1095,7 +1140,7 @@ restart:
|
|
|
}
|
|
|
|
|
|
/* hmmm... no space left, create a new chunk */
|
|
|
- spin_unlock_irq(&pcpu_lock);
|
|
|
+ spin_unlock_irqrestore(&pcpu_lock, flags);
|
|
|
|
|
|
chunk = alloc_pcpu_chunk();
|
|
|
if (!chunk) {
|
|
@@ -1103,16 +1148,16 @@ restart:
|
|
|
goto fail_unlock_mutex;
|
|
|
}
|
|
|
|
|
|
- spin_lock_irq(&pcpu_lock);
|
|
|
+ spin_lock_irqsave(&pcpu_lock, flags);
|
|
|
pcpu_chunk_relocate(chunk, -1);
|
|
|
goto restart;
|
|
|
|
|
|
area_found:
|
|
|
- spin_unlock_irq(&pcpu_lock);
|
|
|
+ spin_unlock_irqrestore(&pcpu_lock, flags);
|
|
|
|
|
|
/* populate, map and clear the area */
|
|
|
if (pcpu_populate_chunk(chunk, off, size)) {
|
|
|
- spin_lock_irq(&pcpu_lock);
|
|
|
+ spin_lock_irqsave(&pcpu_lock, flags);
|
|
|
pcpu_free_area(chunk, off);
|
|
|
err = "failed to populate";
|
|
|
goto fail_unlock;
|
|
@@ -1124,7 +1169,7 @@ area_found:
|
|
|
return __addr_to_pcpu_ptr(chunk->base_addr + off);
|
|
|
|
|
|
fail_unlock:
|
|
|
- spin_unlock_irq(&pcpu_lock);
|
|
|
+ spin_unlock_irqrestore(&pcpu_lock, flags);
|
|
|
fail_unlock_mutex:
|
|
|
mutex_unlock(&pcpu_alloc_mutex);
|
|
|
if (warn_limit) {
|
|
@@ -1256,6 +1301,27 @@ void free_percpu(void *ptr)
|
|
|
}
|
|
|
EXPORT_SYMBOL_GPL(free_percpu);
|
|
|
|
|
|
+/**
|
|
|
+ * per_cpu_ptr_to_phys - convert translated percpu address to physical address
|
|
|
+ * @addr: the address to be converted to physical address
|
|
|
+ *
|
|
|
+ * Given @addr which is dereferenceable address obtained via one of
|
|
|
+ * percpu access macros, this function translates it into its physical
|
|
|
+ * address. The caller is responsible for ensuring @addr stays valid
|
|
|
+ * until this function finishes.
|
|
|
+ *
|
|
|
+ * RETURNS:
|
|
|
+ * The physical address for @addr.
|
|
|
+ */
|
|
|
+phys_addr_t per_cpu_ptr_to_phys(void *addr)
|
|
|
+{
|
|
|
+ if ((unsigned long)addr < VMALLOC_START ||
|
|
|
+ (unsigned long)addr >= VMALLOC_END)
|
|
|
+ return __pa(addr);
|
|
|
+ else
|
|
|
+ return page_to_phys(vmalloc_to_page(addr));
|
|
|
+}
|
|
|
+
|
|
|
static inline size_t pcpu_calc_fc_sizes(size_t static_size,
|
|
|
size_t reserved_size,
|
|
|
ssize_t *dyn_sizep)
|