|
@@ -15,8 +15,6 @@
|
|
|
#include <asm/mmu_context.h>
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
|
|
-#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
|
|
|
-
|
|
|
#define kmap_get_fixmap_pte(vaddr) \
|
|
|
pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
|
|
|
|
|
@@ -68,10 +66,9 @@ static inline void kunmap_coherent(struct page *page)
|
|
|
*/
|
|
|
void clear_user_page(void *to, unsigned long address, struct page *page)
|
|
|
{
|
|
|
- __set_bit(PG_mapped, &page->flags);
|
|
|
-
|
|
|
clear_page(to);
|
|
|
- if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS))
|
|
|
+
|
|
|
+ if (pages_do_alias((unsigned long)to, address & PAGE_MASK))
|
|
|
__flush_wback_region(to, PAGE_SIZE);
|
|
|
}
|
|
|
|
|
@@ -79,13 +76,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
|
|
|
unsigned long vaddr, void *dst, const void *src,
|
|
|
unsigned long len)
|
|
|
{
|
|
|
- void *vto;
|
|
|
-
|
|
|
- __set_bit(PG_mapped, &page->flags);
|
|
|
-
|
|
|
- vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
|
|
- memcpy(vto, src, len);
|
|
|
- kunmap_coherent(vto);
|
|
|
+ if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) {
|
|
|
+ void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
|
|
+ memcpy(vto, src, len);
|
|
|
+ kunmap_coherent(vto);
|
|
|
+ } else {
|
|
|
+ memcpy(dst, src, len);
|
|
|
+ set_bit(PG_dcache_dirty, &page->flags);
|
|
|
+ }
|
|
|
|
|
|
if (vma->vm_flags & VM_EXEC)
|
|
|
flush_cache_page(vma, vaddr, page_to_pfn(page));
|
|
@@ -95,13 +93,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
|
|
|
unsigned long vaddr, void *dst, const void *src,
|
|
|
unsigned long len)
|
|
|
{
|
|
|
- void *vfrom;
|
|
|
-
|
|
|
- __set_bit(PG_mapped, &page->flags);
|
|
|
-
|
|
|
- vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
|
|
- memcpy(dst, vfrom, len);
|
|
|
- kunmap_coherent(vfrom);
|
|
|
+ if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) {
|
|
|
+ void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
|
|
|
+ memcpy(dst, vfrom, len);
|
|
|
+ kunmap_coherent(vfrom);
|
|
|
+ } else {
|
|
|
+ memcpy(dst, src, len);
|
|
|
+ set_bit(PG_dcache_dirty, &page->flags);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
void copy_user_highpage(struct page *to, struct page *from,
|
|
@@ -109,14 +108,19 @@ void copy_user_highpage(struct page *to, struct page *from,
|
|
|
{
|
|
|
void *vfrom, *vto;
|
|
|
|
|
|
- __set_bit(PG_mapped, &to->flags);
|
|
|
-
|
|
|
vto = kmap_atomic(to, KM_USER1);
|
|
|
- vfrom = kmap_coherent(from, vaddr);
|
|
|
- copy_page(vto, vfrom);
|
|
|
- kunmap_coherent(vfrom);
|
|
|
|
|
|
- if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS))
|
|
|
+ if (page_mapped(from) && !test_bit(PG_dcache_dirty, &from->flags)) {
|
|
|
+ vfrom = kmap_coherent(from, vaddr);
|
|
|
+ copy_page(vto, vfrom);
|
|
|
+ kunmap_coherent(vfrom);
|
|
|
+ } else {
|
|
|
+ vfrom = kmap_atomic(from, KM_USER0);
|
|
|
+ copy_page(vto, vfrom);
|
|
|
+ kunmap_atomic(vfrom, KM_USER0);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
|
|
|
__flush_wback_region(vto, PAGE_SIZE);
|
|
|
|
|
|
kunmap_atomic(vto, KM_USER1);
|
|
@@ -124,23 +128,3 @@ void copy_user_highpage(struct page *to, struct page *from,
|
|
|
smp_wmb();
|
|
|
}
|
|
|
EXPORT_SYMBOL(copy_user_highpage);
|
|
|
-
|
|
|
-/*
|
|
|
- * For SH-4, we have our own implementation for ptep_get_and_clear
|
|
|
- */
|
|
|
-pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
|
|
|
-{
|
|
|
- pte_t pte = *ptep;
|
|
|
-
|
|
|
- pte_clear(mm, addr, ptep);
|
|
|
- if (!pte_not_present(pte)) {
|
|
|
- unsigned long pfn = pte_pfn(pte);
|
|
|
- if (pfn_valid(pfn)) {
|
|
|
- struct page *page = pfn_to_page(pfn);
|
|
|
- struct address_space *mapping = page_mapping(page);
|
|
|
- if (!mapping || !mapping_writably_mapped(mapping))
|
|
|
- __clear_bit(PG_mapped, &page->flags);
|
|
|
- }
|
|
|
- }
|
|
|
- return pte;
|
|
|
-}
|