|
@@ -774,25 +774,27 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
|
|
size_t size, enum dma_data_direction dir,
|
|
|
void (*op)(const void *, size_t, int))
|
|
|
{
|
|
|
+ unsigned long pfn;
|
|
|
+ size_t left = size;
|
|
|
+
|
|
|
+ pfn = page_to_pfn(page) + offset / PAGE_SIZE;
|
|
|
+ offset %= PAGE_SIZE;
|
|
|
+
|
|
|
/*
|
|
|
* A single sg entry may refer to multiple physically contiguous
|
|
|
* pages. But we still need to process highmem pages individually.
|
|
|
* If highmem is not configured then the bulk of this loop gets
|
|
|
* optimized out.
|
|
|
*/
|
|
|
- size_t left = size;
|
|
|
do {
|
|
|
size_t len = left;
|
|
|
void *vaddr;
|
|
|
|
|
|
+ page = pfn_to_page(pfn);
|
|
|
+
|
|
|
if (PageHighMem(page)) {
|
|
|
- if (len + offset > PAGE_SIZE) {
|
|
|
- if (offset >= PAGE_SIZE) {
|
|
|
- page += offset / PAGE_SIZE;
|
|
|
- offset %= PAGE_SIZE;
|
|
|
- }
|
|
|
+ if (len + offset > PAGE_SIZE)
|
|
|
len = PAGE_SIZE - offset;
|
|
|
- }
|
|
|
vaddr = kmap_high_get(page);
|
|
|
if (vaddr) {
|
|
|
vaddr += offset;
|
|
@@ -809,7 +811,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
|
|
|
op(vaddr, len, dir);
|
|
|
}
|
|
|
offset = 0;
|
|
|
- page++;
|
|
|
+ pfn++;
|
|
|
left -= len;
|
|
|
} while (left);
|
|
|
}
|