|
@@ -95,12 +95,14 @@ iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
|
|
|
*/
|
|
|
|
|
|
static inline unsigned int
|
|
|
-iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents,
|
|
|
+iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
|
|
|
+ struct scatterlist *startsg, int nents,
|
|
|
int (*iommu_alloc_range)(struct ioc *, size_t))
|
|
|
{
|
|
|
struct scatterlist *contig_sg; /* contig chunk head */
|
|
|
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
|
|
|
unsigned int n_mappings = 0;
|
|
|
+ unsigned int max_seg_size = dma_get_max_seg_size(dev);
|
|
|
|
|
|
while (nents > 0) {
|
|
|
|
|
@@ -142,6 +144,9 @@ iommu_coalesce_chunks(struct ioc *ioc, struct scatterlist *startsg, int nents,
|
|
|
IOVP_SIZE) > DMA_CHUNK_SIZE))
|
|
|
break;
|
|
|
|
|
|
+ if (startsg->length + dma_len > max_seg_size)
|
|
|
+ break;
|
|
|
+
|
|
|
/*
|
|
|
** Next see if we can append the next chunk (i.e.
|
|
|
** it must end on one page and begin on another
|