|
@@ -67,6 +67,25 @@ pte_t * pkmap_page_table;
|
|
|
|
|
|
static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
|
|
|
|
|
|
+/*
|
|
|
+ * Most architectures have no use for kmap_high_get(), so let's abstract
|
|
|
+ * the disabling of IRQ out of the locking in that case to save on a
|
|
|
+ * potential useless overhead.
|
|
|
+ */
|
|
|
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
|
|
|
+#define lock_kmap() spin_lock_irq(&kmap_lock)
|
|
|
+#define unlock_kmap() spin_unlock_irq(&kmap_lock)
|
|
|
+#define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
|
|
|
+#define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
|
|
|
+#else
|
|
|
+#define lock_kmap() spin_lock(&kmap_lock)
|
|
|
+#define unlock_kmap() spin_unlock(&kmap_lock)
|
|
|
+#define lock_kmap_any(flags) \
|
|
|
+ do { spin_lock(&kmap_lock); (void)(flags); } while (0)
|
|
|
+#define unlock_kmap_any(flags) \
|
|
|
+ do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
|
|
|
+#endif
|
|
|
+
|
|
|
static void flush_all_zero_pkmaps(void)
|
|
|
{
|
|
|
int i;
|
|
@@ -113,9 +132,9 @@ static void flush_all_zero_pkmaps(void)
|
|
|
*/
|
|
|
void kmap_flush_unused(void)
|
|
|
{
|
|
|
- spin_lock(&kmap_lock);
|
|
|
+ lock_kmap();
|
|
|
flush_all_zero_pkmaps();
|
|
|
- spin_unlock(&kmap_lock);
|
|
|
+ unlock_kmap();
|
|
|
}
|
|
|
|
|
|
static inline unsigned long map_new_virtual(struct page *page)
|
|
@@ -145,10 +164,10 @@ start:
|
|
|
|
|
|
__set_current_state(TASK_UNINTERRUPTIBLE);
|
|
|
add_wait_queue(&pkmap_map_wait, &wait);
|
|
|
- spin_unlock(&kmap_lock);
|
|
|
+ unlock_kmap();
|
|
|
schedule();
|
|
|
remove_wait_queue(&pkmap_map_wait, &wait);
|
|
|
- spin_lock(&kmap_lock);
|
|
|
+ lock_kmap();
|
|
|
|
|
|
/* Somebody else might have mapped it while we slept */
|
|
|
if (page_address(page))
|
|
@@ -184,29 +203,59 @@ void *kmap_high(struct page *page)
|
|
|
* For highmem pages, we can't trust "virtual" until
|
|
|
* after we have the lock.
|
|
|
*/
|
|
|
- spin_lock(&kmap_lock);
|
|
|
+ lock_kmap();
|
|
|
vaddr = (unsigned long)page_address(page);
|
|
|
if (!vaddr)
|
|
|
vaddr = map_new_virtual(page);
|
|
|
pkmap_count[PKMAP_NR(vaddr)]++;
|
|
|
BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
|
|
|
- spin_unlock(&kmap_lock);
|
|
|
+ unlock_kmap();
|
|
|
return (void*) vaddr;
|
|
|
}
|
|
|
|
|
|
EXPORT_SYMBOL(kmap_high);
|
|
|
|
|
|
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
|
|
|
+/**
|
|
|
+ * kmap_high_get - pin a highmem page into memory
|
|
|
+ * @page: &struct page to pin
|
|
|
+ *
|
|
|
+ * Returns the page's current virtual memory address, or NULL if no mapping
|
|
|
+ * exists. When and only when a non null address is returned then a
|
|
|
+ * matching call to kunmap_high() is necessary.
|
|
|
+ *
|
|
|
+ * This can be called from any context.
|
|
|
+ */
|
|
|
+void *kmap_high_get(struct page *page)
|
|
|
+{
|
|
|
+ unsigned long vaddr, flags;
|
|
|
+
|
|
|
+ lock_kmap_any(flags);
|
|
|
+ vaddr = (unsigned long)page_address(page);
|
|
|
+ if (vaddr) {
|
|
|
+ BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
|
|
|
+ pkmap_count[PKMAP_NR(vaddr)]++;
|
|
|
+ }
|
|
|
+ unlock_kmap_any(flags);
|
|
|
+ return (void*) vaddr;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
/**
|
|
|
* kunmap_high - map a highmem page into memory
|
|
|
* @page: &struct page to unmap
|
|
|
+ *
|
|
|
+ * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
|
|
|
+ * only from user context.
|
|
|
*/
|
|
|
void kunmap_high(struct page *page)
|
|
|
{
|
|
|
unsigned long vaddr;
|
|
|
unsigned long nr;
|
|
|
+ unsigned long flags;
|
|
|
int need_wakeup;
|
|
|
|
|
|
- spin_lock(&kmap_lock);
|
|
|
+ lock_kmap_any(flags);
|
|
|
vaddr = (unsigned long)page_address(page);
|
|
|
BUG_ON(!vaddr);
|
|
|
nr = PKMAP_NR(vaddr);
|
|
@@ -232,7 +281,7 @@ void kunmap_high(struct page *page)
|
|
|
*/
|
|
|
need_wakeup = waitqueue_active(&pkmap_map_wait);
|
|
|
}
|
|
|
- spin_unlock(&kmap_lock);
|
|
|
+ unlock_kmap_any(flags);
|
|
|
|
|
|
/* do wake-up, if needed, race-free outside of the spin lock */
|
|
|
if (need_wakeup)
|