Эх сурвалжийг харах

powerpc: rename iommu_num_pages function to iommu_nr_pages

This is a preparation patch for introducing a generic iommu_num_pages function.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Muli Ben-Yehuda <muli@il.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Joerg Roedel 16 жил өмнө
parent
commit
3400001c53

+ 6 - 6
arch/powerpc/kernel/iommu.c

@@ -51,7 +51,7 @@ static int protect4gb = 1;
 
 
 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
 static void __iommu_free(struct iommu_table *, dma_addr_t, unsigned int);
 
 
-static inline unsigned long iommu_num_pages(unsigned long vaddr,
+static inline unsigned long iommu_nr_pages(unsigned long vaddr,
 					    unsigned long slen)
 					    unsigned long slen)
 {
 {
 	unsigned long npages;
 	unsigned long npages;
@@ -325,7 +325,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
 		}
 		}
 		/* Allocate iommu entries for that segment */
 		/* Allocate iommu entries for that segment */
 		vaddr = (unsigned long) sg_virt(s);
 		vaddr = (unsigned long) sg_virt(s);
-		npages = iommu_num_pages(vaddr, slen);
+		npages = iommu_nr_pages(vaddr, slen);
 		align = 0;
 		align = 0;
 		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
 		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && slen >= PAGE_SIZE &&
 		    (vaddr & ~PAGE_MASK) == 0)
 		    (vaddr & ~PAGE_MASK) == 0)
@@ -418,7 +418,7 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
 			unsigned long vaddr, npages;
 			unsigned long vaddr, npages;
 
 
 			vaddr = s->dma_address & IOMMU_PAGE_MASK;
 			vaddr = s->dma_address & IOMMU_PAGE_MASK;
-			npages = iommu_num_pages(s->dma_address, s->dma_length);
+			npages = iommu_nr_pages(s->dma_address, s->dma_length);
 			__iommu_free(tbl, vaddr, npages);
 			__iommu_free(tbl, vaddr, npages);
 			s->dma_address = DMA_ERROR_CODE;
 			s->dma_address = DMA_ERROR_CODE;
 			s->dma_length = 0;
 			s->dma_length = 0;
@@ -452,7 +452,7 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
 
 
 		if (sg->dma_length == 0)
 		if (sg->dma_length == 0)
 			break;
 			break;
-		npages = iommu_num_pages(dma_handle, sg->dma_length);
+		npages = iommu_nr_pages(dma_handle, sg->dma_length);
 		__iommu_free(tbl, dma_handle, npages);
 		__iommu_free(tbl, dma_handle, npages);
 		sg = sg_next(sg);
 		sg = sg_next(sg);
 	}
 	}
@@ -584,7 +584,7 @@ dma_addr_t iommu_map_single(struct device *dev, struct iommu_table *tbl,
 	BUG_ON(direction == DMA_NONE);
 	BUG_ON(direction == DMA_NONE);
 
 
 	uaddr = (unsigned long)vaddr;
 	uaddr = (unsigned long)vaddr;
-	npages = iommu_num_pages(uaddr, size);
+	npages = iommu_nr_pages(uaddr, size);
 
 
 	if (tbl) {
 	if (tbl) {
 		align = 0;
 		align = 0;
@@ -617,7 +617,7 @@ void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
 	BUG_ON(direction == DMA_NONE);
 	BUG_ON(direction == DMA_NONE);
 
 
 	if (tbl) {
 	if (tbl) {
-		npages = iommu_num_pages(dma_handle, size);
+		npages = iommu_nr_pages(dma_handle, size);
 		iommu_free(tbl, dma_handle, npages);
 		iommu_free(tbl, dma_handle, npages);
 	}
 	}
 }
 }