|
@@ -12,8 +12,8 @@ struct nouveau_sgdma_be {
|
|
|
struct drm_device *dev;
|
|
|
|
|
|
dma_addr_t *pages;
|
|
|
- bool *ttm_alloced;
|
|
|
unsigned nr_pages;
|
|
|
+ bool unmap_pages;
|
|
|
|
|
|
u64 offset;
|
|
|
bool bound;
|
|
@@ -26,43 +26,28 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
|
|
|
{
|
|
|
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
|
|
struct drm_device *dev = nvbe->dev;
|
|
|
+ int i;
|
|
|
|
|
|
NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
|
|
|
|
|
|
- if (nvbe->pages)
|
|
|
- return -EINVAL;
|
|
|
-
|
|
|
- nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
|
|
|
- if (!nvbe->pages)
|
|
|
- return -ENOMEM;
|
|
|
+ nvbe->pages = dma_addrs;
|
|
|
+ nvbe->nr_pages = num_pages;
|
|
|
+ nvbe->unmap_pages = true;
|
|
|
|
|
|
- nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
|
|
|
- if (!nvbe->ttm_alloced) {
|
|
|
- kfree(nvbe->pages);
|
|
|
- nvbe->pages = NULL;
|
|
|
- return -ENOMEM;
|
|
|
+ /* this code path isn't called and is incorrect anyways */
|
|
|
+ if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
|
|
|
+ nvbe->unmap_pages = false;
|
|
|
+ return 0;
|
|
|
}
|
|
|
|
|
|
- nvbe->nr_pages = 0;
|
|
|
- while (num_pages--) {
|
|
|
- /* this code path isn't called and is incorrect anyways */
|
|
|
- if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/
|
|
|
- nvbe->pages[nvbe->nr_pages] =
|
|
|
- dma_addrs[nvbe->nr_pages];
|
|
|
- nvbe->ttm_alloced[nvbe->nr_pages] = true;
|
|
|
- } else {
|
|
|
- nvbe->pages[nvbe->nr_pages] =
|
|
|
- pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
|
|
|
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
|
- if (pci_dma_mapping_error(dev->pdev,
|
|
|
- nvbe->pages[nvbe->nr_pages])) {
|
|
|
- be->func->clear(be);
|
|
|
- return -EFAULT;
|
|
|
- }
|
|
|
- nvbe->ttm_alloced[nvbe->nr_pages] = false;
|
|
|
+ for (i = 0; i < num_pages; i++) {
|
|
|
+ nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
|
|
|
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
|
+ if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
|
|
|
+ nvbe->nr_pages = --i;
|
|
|
+ be->func->clear(be);
|
|
|
+ return -EFAULT;
|
|
|
}
|
|
|
-
|
|
|
- nvbe->nr_pages++;
|
|
|
}
|
|
|
|
|
|
return 0;
|
|
@@ -72,25 +57,16 @@ static void
|
|
|
nouveau_sgdma_clear(struct ttm_backend *be)
|
|
|
{
|
|
|
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
|
|
|
- struct drm_device *dev;
|
|
|
-
|
|
|
- if (nvbe && nvbe->pages) {
|
|
|
- dev = nvbe->dev;
|
|
|
- NV_DEBUG(dev, "\n");
|
|
|
+ struct drm_device *dev = nvbe->dev;
|
|
|
|
|
|
- if (nvbe->bound)
|
|
|
- be->func->unbind(be);
|
|
|
+ if (nvbe->bound)
|
|
|
+ be->func->unbind(be);
|
|
|
|
|
|
+ if (nvbe->unmap_pages) {
|
|
|
while (nvbe->nr_pages--) {
|
|
|
- if (!nvbe->ttm_alloced[nvbe->nr_pages])
|
|
|
- pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
|
|
|
+ pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
|
|
|
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
|
|
|
}
|
|
|
- kfree(nvbe->pages);
|
|
|
- kfree(nvbe->ttm_alloced);
|
|
|
- nvbe->pages = NULL;
|
|
|
- nvbe->ttm_alloced = NULL;
|
|
|
- nvbe->nr_pages = 0;
|
|
|
}
|
|
|
}
|
|
|
|