|
@@ -156,6 +156,39 @@ EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
|
|
|
int forbid_dac;
|
|
|
EXPORT_SYMBOL(forbid_dac);
|
|
|
|
|
|
+int
|
|
|
+dma_supported(struct device *dev, u64 mask)
|
|
|
+{
|
|
|
+ /*
|
|
|
+ * we fall back to GFP_DMA when the mask isn't all 1s,
|
|
|
+ * so we can't guarantee allocations that must be
|
|
|
+ * within a tighter range than GFP_DMA..
|
|
|
+ */
|
|
|
+ if (mask < 0x00ffffff)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* Work around chipset bugs */
|
|
|
+ if (forbid_dac > 0 && mask > 0xffffffffULL)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (dma_ops->dma_supported)
|
|
|
+ return dma_ops->dma_supported(dev, mask);
|
|
|
+
|
|
|
+ return 1;
|
|
|
+}
|
|
|
+
|
|
|
+int
|
|
|
+dma_set_mask(struct device *dev, u64 mask)
|
|
|
+{
|
|
|
+ if (!dev->dma_mask || !dma_supported(dev, mask))
|
|
|
+ return -EIO;
|
|
|
+
|
|
|
+ *dev->dma_mask = mask;
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
+
|
|
|
static __devinit void via_no_dac(struct pci_dev *dev)
|
|
|
{
|
|
|
if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
|