Browse Source

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6:
  sparc64: remove unused calc_npages() in iommu_common.h
  sparc64: add the segment boundary checking to IOMMUs while merging SG entries
  [SPARC64]: Don't open-code {get,put}_cpu_var() in flush_tlb_pending().
Linus Torvalds 17 năm trước cách đây
mục cha
commit
4c61f72c72

+ 10 - 2
arch/sparc64/kernel/iommu.c

@@ -516,9 +516,11 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
 	unsigned long flags, handle, prot, ctx;
 	unsigned long flags, handle, prot, ctx;
 	dma_addr_t dma_next = 0, dma_addr;
 	dma_addr_t dma_next = 0, dma_addr;
 	unsigned int max_seg_size;
 	unsigned int max_seg_size;
+	unsigned long seg_boundary_size;
 	int outcount, incount, i;
 	int outcount, incount, i;
 	struct strbuf *strbuf;
 	struct strbuf *strbuf;
 	struct iommu *iommu;
 	struct iommu *iommu;
+	unsigned long base_shift;
 
 
 	BUG_ON(direction == DMA_NONE);
 	BUG_ON(direction == DMA_NONE);
 
 
@@ -549,8 +551,11 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
 	outs->dma_length = 0;
 	outs->dma_length = 0;
 
 
 	max_seg_size = dma_get_max_seg_size(dev);
 	max_seg_size = dma_get_max_seg_size(dev);
+	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
+	base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
 	for_each_sg(sglist, s, nelems, i) {
 	for_each_sg(sglist, s, nelems, i) {
-		unsigned long paddr, npages, entry, slen;
+		unsigned long paddr, npages, entry, out_entry = 0, slen;
 		iopte_t *base;
 		iopte_t *base;
 
 
 		slen = s->length;
 		slen = s->length;
@@ -593,7 +598,9 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
 			 * - allocated dma_addr isn't contiguous to previous allocation
 			 * - allocated dma_addr isn't contiguous to previous allocation
 			 */
 			 */
 			if ((dma_addr != dma_next) ||
 			if ((dma_addr != dma_next) ||
-			    (outs->dma_length + s->length > max_seg_size)) {
+			    (outs->dma_length + s->length > max_seg_size) ||
+			    (is_span_boundary(out_entry, base_shift,
+					      seg_boundary_size, outs, s))) {
 				/* Can't merge: create a new segment */
 				/* Can't merge: create a new segment */
 				segstart = s;
 				segstart = s;
 				outcount++;
 				outcount++;
@@ -607,6 +614,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
 			/* This is a new segment, fill entries */
 			/* This is a new segment, fill entries */
 			outs->dma_address = dma_addr;
 			outs->dma_address = dma_addr;
 			outs->dma_length = slen;
 			outs->dma_length = slen;
+			out_entry = entry;
 		}
 		}
 
 
 		/* Calculate next page pointer for contiguous check */
 		/* Calculate next page pointer for contiguous check */

+ 9 - 9
arch/sparc64/kernel/iommu_common.h

@@ -12,6 +12,7 @@
 #include <linux/mm.h>
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
 #include <linux/scatterlist.h>
 #include <linux/device.h>
 #include <linux/device.h>
+#include <linux/iommu-helper.h>
 
 
 #include <asm/iommu.h>
 #include <asm/iommu.h>
 #include <asm/scatterlist.h>
 #include <asm/scatterlist.h>
@@ -45,17 +46,16 @@ static inline unsigned long iommu_num_pages(unsigned long vaddr,
 	return npages;
 	return npages;
 }
 }
 
 
-static inline unsigned long calc_npages(struct scatterlist *sglist, int nelems)
+static inline int is_span_boundary(unsigned long entry,
+				   unsigned long shift,
+				   unsigned long boundary_size,
+				   struct scatterlist *outs,
+				   struct scatterlist *sg)
 {
 {
-	unsigned long i, npages = 0;
-	struct scatterlist *sg;
+	unsigned long paddr = SG_ENT_PHYS_ADDRESS(outs);
+	int nr = iommu_num_pages(paddr, outs->dma_length + sg->length);
 
 
-	for_each_sg(sglist, sg, nelems, i) {
-		unsigned long paddr = SG_ENT_PHYS_ADDRESS(sg);
-		npages += iommu_num_pages(paddr, sg->length);
-	}
-
-	return npages;
+	return iommu_is_span_boundary(entry, nr, shift, boundary_size);
 }
 }
 
 
 extern unsigned long iommu_range_alloc(struct device *dev,
 extern unsigned long iommu_range_alloc(struct device *dev,

+ 10 - 2
arch/sparc64/kernel/pci_sun4v.c

@@ -335,8 +335,10 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
 	unsigned long flags, handle, prot;
 	unsigned long flags, handle, prot;
 	dma_addr_t dma_next = 0, dma_addr;
 	dma_addr_t dma_next = 0, dma_addr;
 	unsigned int max_seg_size;
 	unsigned int max_seg_size;
+	unsigned long seg_boundary_size;
 	int outcount, incount, i;
 	int outcount, incount, i;
 	struct iommu *iommu;
 	struct iommu *iommu;
+	unsigned long base_shift;
 	long err;
 	long err;
 
 
 	BUG_ON(direction == DMA_NONE);
 	BUG_ON(direction == DMA_NONE);
@@ -362,8 +364,11 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
 	iommu_batch_start(dev, prot, ~0UL);
 	iommu_batch_start(dev, prot, ~0UL);
 
 
 	max_seg_size = dma_get_max_seg_size(dev);
 	max_seg_size = dma_get_max_seg_size(dev);
+	seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
+				  IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
+	base_shift = iommu->page_table_map_base >> IO_PAGE_SHIFT;
 	for_each_sg(sglist, s, nelems, i) {
 	for_each_sg(sglist, s, nelems, i) {
-		unsigned long paddr, npages, entry, slen;
+		unsigned long paddr, npages, entry, out_entry = 0, slen;
 
 
 		slen = s->length;
 		slen = s->length;
 		/* Sanity check */
 		/* Sanity check */
@@ -406,7 +411,9 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
 			 * - allocated dma_addr isn't contiguous to previous allocation
 			 * - allocated dma_addr isn't contiguous to previous allocation
 			 */
 			 */
 			if ((dma_addr != dma_next) ||
 			if ((dma_addr != dma_next) ||
-			    (outs->dma_length + s->length > max_seg_size)) {
+			    (outs->dma_length + s->length > max_seg_size) ||
+			    (is_span_boundary(out_entry, base_shift,
+					      seg_boundary_size, outs, s))) {
 				/* Can't merge: create a new segment */
 				/* Can't merge: create a new segment */
 				segstart = s;
 				segstart = s;
 				outcount++;
 				outcount++;
@@ -420,6 +427,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
 			/* This is a new segment, fill entries */
 			/* This is a new segment, fill entries */
 			outs->dma_address = dma_addr;
 			outs->dma_address = dma_addr;
 			outs->dma_length = slen;
 			outs->dma_length = slen;
+			out_entry = entry;
 		}
 		}
 
 
 		/* Calculate next page pointer for contiguous check */
 		/* Calculate next page pointer for contiguous check */

+ 2 - 5
arch/sparc64/mm/tlb.c

@@ -23,11 +23,8 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers) = { 0, };
 
 
 void flush_tlb_pending(void)
 void flush_tlb_pending(void)
 {
 {
-	struct mmu_gather *mp;
+	struct mmu_gather *mp = &get_cpu_var(mmu_gathers);
 
 
-	preempt_disable();
-
-	mp = &__get_cpu_var(mmu_gathers);
 	if (mp->tlb_nr) {
 	if (mp->tlb_nr) {
 		flush_tsb_user(mp);
 		flush_tsb_user(mp);
 
 
@@ -43,7 +40,7 @@ void flush_tlb_pending(void)
 		mp->tlb_nr = 0;
 		mp->tlb_nr = 0;
 	}
 	}
 
 
-	preempt_enable();
+	put_cpu_var(mmu_gathers);
 }
 }
 
 
 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)
 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig)