|
@@ -26,7 +26,7 @@ void kunmap(struct page *page)
|
|
|
* However when holding an atomic kmap is is not legal to sleep, so atomic
|
|
|
* kmaps are appropriate for short, tight code paths only.
|
|
|
*/
|
|
|
-void *kmap_atomic(struct page *page, enum km_type type)
|
|
|
+void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
|
|
|
{
|
|
|
enum fixed_addresses idx;
|
|
|
unsigned long vaddr;
|
|
@@ -41,12 +41,17 @@ void *kmap_atomic(struct page *page, enum km_type type)
|
|
|
return page_address(page);
|
|
|
|
|
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
|
|
|
- set_pte(kmap_pte-idx, mk_pte(page, kmap_prot));
|
|
|
+ set_pte(kmap_pte-idx, mk_pte(page, prot));
|
|
|
arch_flush_lazy_mmu_mode();
|
|
|
|
|
|
return (void*) vaddr;
|
|
|
}
|
|
|
|
|
|
+void *kmap_atomic(struct page *page, enum km_type type)
|
|
|
+{
|
|
|
+ return kmap_atomic_prot(page, type, kmap_prot);
|
|
|
+}
|
|
|
+
|
|
|
void kunmap_atomic(void *kvaddr, enum km_type type)
|
|
|
{
|
|
|
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
|