|
@@ -941,15 +941,14 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
for (i = 0; i < orig_sgl_count; i++) {
|
|
|
- dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
|
|
|
- KM_IRQ0) + orig_sgl[i].offset;
|
|
|
+ dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])))
|
|
|
+ + orig_sgl[i].offset;
|
|
|
dest = dest_addr;
|
|
|
destlen = orig_sgl[i].length;
|
|
|
|
|
|
if (bounce_addr == 0)
|
|
|
bounce_addr =
|
|
|
- (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
|
|
|
- KM_IRQ0);
|
|
|
+ (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])));
|
|
|
|
|
|
while (destlen) {
|
|
|
src = bounce_addr + bounce_sgl[j].offset;
|
|
@@ -965,7 +964,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
|
|
|
|
|
|
if (bounce_sgl[j].offset == bounce_sgl[j].length) {
|
|
|
/* full */
|
|
|
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
|
|
|
+ kunmap_atomic((void *)bounce_addr);
|
|
|
j++;
|
|
|
|
|
|
/*
|
|
@@ -990,15 +989,14 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
|
|
|
if (destlen || i != orig_sgl_count - 1)
|
|
|
bounce_addr =
|
|
|
(unsigned long)kmap_atomic(
|
|
|
- sg_page((&bounce_sgl[j])), KM_IRQ0);
|
|
|
+ sg_page((&bounce_sgl[j])));
|
|
|
} else if (destlen == 0 && i == orig_sgl_count - 1) {
|
|
|
/* unmap the last bounce that is < PAGE_SIZE */
|
|
|
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
|
|
|
+ kunmap_atomic((void *)bounce_addr);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
|
|
|
- KM_IRQ0);
|
|
|
+ kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset));
|
|
|
}
|
|
|
|
|
|
local_irq_restore(flags);
|
|
@@ -1024,15 +1022,14 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
|
|
|
local_irq_save(flags);
|
|
|
|
|
|
for (i = 0; i < orig_sgl_count; i++) {
|
|
|
- src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
|
|
|
- KM_IRQ0) + orig_sgl[i].offset;
|
|
|
+ src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])))
|
|
|
+ + orig_sgl[i].offset;
|
|
|
src = src_addr;
|
|
|
srclen = orig_sgl[i].length;
|
|
|
|
|
|
if (bounce_addr == 0)
|
|
|
bounce_addr =
|
|
|
- (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
|
|
|
- KM_IRQ0);
|
|
|
+ (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])));
|
|
|
|
|
|
while (srclen) {
|
|
|
/* assume bounce offset always == 0 */
|
|
@@ -1049,22 +1046,22 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
|
|
|
|
|
|
if (bounce_sgl[j].length == PAGE_SIZE) {
|
|
|
/* full..move to next entry */
|
|
|
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
|
|
|
+ kunmap_atomic((void *)bounce_addr);
|
|
|
j++;
|
|
|
|
|
|
/* if we need to use another bounce buffer */
|
|
|
if (srclen || i != orig_sgl_count - 1)
|
|
|
bounce_addr =
|
|
|
(unsigned long)kmap_atomic(
|
|
|
- sg_page((&bounce_sgl[j])), KM_IRQ0);
|
|
|
+ sg_page((&bounce_sgl[j])));
|
|
|
|
|
|
} else if (srclen == 0 && i == orig_sgl_count - 1) {
|
|
|
/* unmap the last bounce that is < PAGE_SIZE */
|
|
|
- kunmap_atomic((void *)bounce_addr, KM_IRQ0);
|
|
|
+ kunmap_atomic((void *)bounce_addr);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
|
|
|
+ kunmap_atomic((void *)(src_addr - orig_sgl[i].offset));
|
|
|
}
|
|
|
|
|
|
local_irq_restore(flags);
|