|
@@ -33,17 +33,11 @@
|
|
|
|
|
|
#include <mach/timer.h>
|
|
|
|
|
|
-#include <linux/mm.h>
|
|
|
#include <linux/pfn.h>
|
|
|
#include <linux/atomic.h>
|
|
|
#include <linux/sched.h>
|
|
|
#include <mach/dma.h>
|
|
|
|
|
|
-/* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */
|
|
|
-/* especially since dc4 doesn't use kmalloc'd memory. */
|
|
|
-
|
|
|
-#define ALLOW_MAP_OF_KMALLOC_MEMORY 0
|
|
|
-
|
|
|
/* ---- Public Variables ------------------------------------------------- */
|
|
|
|
|
|
/* ---- Private Constants and Types -------------------------------------- */
|
|
@@ -53,58 +47,18 @@
|
|
|
#define CONTROLLER_FROM_HANDLE(handle) (((handle) >> 4) & 0x0f)
|
|
|
#define CHANNEL_FROM_HANDLE(handle) ((handle) & 0x0f)
|
|
|
|
|
|
-#define DMA_MAP_DEBUG 0
|
|
|
-
|
|
|
-#if DMA_MAP_DEBUG
|
|
|
-# define DMA_MAP_PRINT(fmt, args...) printk("%s: " fmt, __func__, ## args)
|
|
|
-#else
|
|
|
-# define DMA_MAP_PRINT(fmt, args...)
|
|
|
-#endif
|
|
|
|
|
|
/* ---- Private Variables ------------------------------------------------ */
|
|
|
|
|
|
static DMA_Global_t gDMA;
|
|
|
static struct proc_dir_entry *gDmaDir;
|
|
|
|
|
|
-static atomic_t gDmaStatMemTypeKmalloc = ATOMIC_INIT(0);
|
|
|
-static atomic_t gDmaStatMemTypeVmalloc = ATOMIC_INIT(0);
|
|
|
-static atomic_t gDmaStatMemTypeUser = ATOMIC_INIT(0);
|
|
|
-static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0);
|
|
|
-
|
|
|
#include "dma_device.c"
|
|
|
|
|
|
/* ---- Private Function Prototypes -------------------------------------- */
|
|
|
|
|
|
/* ---- Functions ------------------------------------------------------- */
|
|
|
|
|
|
-/****************************************************************************/
|
|
|
-/**
|
|
|
-* Displays information for /proc/dma/mem-type
|
|
|
-*/
|
|
|
-/****************************************************************************/
|
|
|
-
|
|
|
-static int dma_proc_read_mem_type(char *buf, char **start, off_t offset,
|
|
|
- int count, int *eof, void *data)
|
|
|
-{
|
|
|
- int len = 0;
|
|
|
-
|
|
|
- len += sprintf(buf + len, "dma_map_mem statistics\n");
|
|
|
- len +=
|
|
|
- sprintf(buf + len, "coherent: %d\n",
|
|
|
- atomic_read(&gDmaStatMemTypeCoherent));
|
|
|
- len +=
|
|
|
- sprintf(buf + len, "kmalloc: %d\n",
|
|
|
- atomic_read(&gDmaStatMemTypeKmalloc));
|
|
|
- len +=
|
|
|
- sprintf(buf + len, "vmalloc: %d\n",
|
|
|
- atomic_read(&gDmaStatMemTypeVmalloc));
|
|
|
- len +=
|
|
|
- sprintf(buf + len, "user: %d\n",
|
|
|
- atomic_read(&gDmaStatMemTypeUser));
|
|
|
-
|
|
|
- return len;
|
|
|
-}
|
|
|
-
|
|
|
/****************************************************************************/
|
|
|
/**
|
|
|
* Displays information for /proc/dma/channels
|
|
@@ -846,8 +800,6 @@ int dma_init(void)
|
|
|
dma_proc_read_channels, NULL);
|
|
|
create_proc_read_entry("devices", 0, gDmaDir,
|
|
|
dma_proc_read_devices, NULL);
|
|
|
- create_proc_read_entry("mem-type", 0, gDmaDir,
|
|
|
- dma_proc_read_mem_type, NULL);
|
|
|
}
|
|
|
|
|
|
out:
|
|
@@ -1565,767 +1517,3 @@ int dma_set_device_handler(DMA_Device_t dev, /* Device to set the callback for.
|
|
|
}
|
|
|
|
|
|
EXPORT_SYMBOL(dma_set_device_handler);
|
|
|
-
|
|
|
-/****************************************************************************/
|
|
|
-/**
|
|
|
-* Initializes a memory mapping structure
|
|
|
-*/
|
|
|
-/****************************************************************************/
|
|
|
-
|
|
|
-int dma_init_mem_map(DMA_MemMap_t *memMap)
|
|
|
-{
|
|
|
- memset(memMap, 0, sizeof(*memMap));
|
|
|
-
|
|
|
- sema_init(&memMap->lock, 1);
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-EXPORT_SYMBOL(dma_init_mem_map);
|
|
|
-
|
|
|
-/****************************************************************************/
|
|
|
-/**
|
|
|
-* Releases any memory currently being held by a memory mapping structure.
|
|
|
-*/
|
|
|
-/****************************************************************************/
|
|
|
-
|
|
|
-int dma_term_mem_map(DMA_MemMap_t *memMap)
|
|
|
-{
|
|
|
- down(&memMap->lock); /* Just being paranoid */
|
|
|
-
|
|
|
- /* Free up any allocated memory */
|
|
|
-
|
|
|
- up(&memMap->lock);
|
|
|
- memset(memMap, 0, sizeof(*memMap));
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-EXPORT_SYMBOL(dma_term_mem_map);
|
|
|
-
|
|
|
-/****************************************************************************/
|
|
|
-/**
|
|
|
-* Looks at a memory address and categorizes it.
|
|
|
-*
|
|
|
-* @return One of the values from the DMA_MemType_t enumeration.
|
|
|
-*/
|
|
|
-/****************************************************************************/
|
|
|
-
|
|
|
-DMA_MemType_t dma_mem_type(void *addr)
|
|
|
-{
|
|
|
- unsigned long addrVal = (unsigned long)addr;
|
|
|
-
|
|
|
- if (addrVal >= CONSISTENT_BASE) {
|
|
|
- /* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */
|
|
|
-
|
|
|
- /* dma_alloc_xxx pages are physically and virtually contiguous */
|
|
|
-
|
|
|
- return DMA_MEM_TYPE_DMA;
|
|
|
- }
|
|
|
-
|
|
|
- /* Technically, we could add one more classification. Addresses between VMALLOC_END */
|
|
|
- /* and the beginning of the DMA virtual address could be considered to be I/O space. */
|
|
|
- /* Right now, nobody cares about this particular classification, so we ignore it. */
|
|
|
-
|
|
|
- if (is_vmalloc_addr(addr)) {
|
|
|
- /* Address comes from the vmalloc'd region. Pages are virtually */
|
|
|
- /* contiguous but NOT physically contiguous */
|
|
|
-
|
|
|
- return DMA_MEM_TYPE_VMALLOC;
|
|
|
- }
|
|
|
-
|
|
|
- if (addrVal >= PAGE_OFFSET) {
|
|
|
- /* PAGE_OFFSET is typically 0xC0000000 */
|
|
|
-
|
|
|
- /* kmalloc'd pages are physically contiguous */
|
|
|
-
|
|
|
- return DMA_MEM_TYPE_KMALLOC;
|
|
|
- }
|
|
|
-
|
|
|
- return DMA_MEM_TYPE_USER;
|
|
|
-}
|
|
|
-
|
|
|
-EXPORT_SYMBOL(dma_mem_type);
|
|
|
-
|
|
|
-/****************************************************************************/
|
|
|
-/**
|
|
|
-* Looks at a memory address and determines if we support DMA'ing to/from
|
|
|
-* that type of memory.
|
|
|
-*
|
|
|
-* @return boolean -
|
|
|
-* return value != 0 means dma supported
|
|
|
-* return value == 0 means dma not supported
|
|
|
-*/
|
|
|
-/****************************************************************************/
|
|
|
-
|
|
|
-int dma_mem_supports_dma(void *addr)
|
|
|
-{
|
|
|
- DMA_MemType_t memType = dma_mem_type(addr);
|
|
|
-
|
|
|
- return (memType == DMA_MEM_TYPE_DMA)
|
|
|
-#if ALLOW_MAP_OF_KMALLOC_MEMORY
|
|
|
- || (memType == DMA_MEM_TYPE_KMALLOC)
|
|
|
-#endif
|
|
|
- || (memType == DMA_MEM_TYPE_USER);
|
|
|
-}
|
|
|
-
|
|
|
-EXPORT_SYMBOL(dma_mem_supports_dma);
|
|
|
-
|
|
|
-/****************************************************************************/
|
|
|
-/**
|
|
|
-* Maps in a memory region such that it can be used for performing a DMA.
|
|
|
-*
|
|
|
-* @return
|
|
|
-*/
|
|
|
-/****************************************************************************/
|
|
|
-
|
|
|
-int dma_map_start(DMA_MemMap_t *memMap, /* Stores state information about the map */
|
|
|
- enum dma_data_direction dir /* Direction that the mapping will be going */
|
|
|
- ) {
|
|
|
- int rc;
|
|
|
-
|
|
|
- down(&memMap->lock);
|
|
|
-
|
|
|
- DMA_MAP_PRINT("memMap: %p\n", memMap);
|
|
|
-
|
|
|
- if (memMap->inUse) {
|
|
|
- printk(KERN_ERR "%s: memory map %p is already being used\n",
|
|
|
- __func__, memMap);
|
|
|
- rc = -EBUSY;
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
- memMap->inUse = 1;
|
|
|
- memMap->dir = dir;
|
|
|
- memMap->numRegionsUsed = 0;
|
|
|
-
|
|
|
- rc = 0;
|
|
|
-
|
|
|
-out:
|
|
|
-
|
|
|
- DMA_MAP_PRINT("returning %d", rc);
|
|
|
-
|
|
|
- up(&memMap->lock);
|
|
|
-
|
|
|
- return rc;
|
|
|
-}
|
|
|
-
|
|
|
-EXPORT_SYMBOL(dma_map_start);
|
|
|
-
|
|
|
-/****************************************************************************/
|
|
|
-/**
|
|
|
-* Adds a segment of memory to a memory map. Each segment is both
|
|
|
-* physically and virtually contiguous.
|
|
|
-*
|
|
|
-* @return 0 on success, error code otherwise.
|
|
|
-*/
|
|
|
-/****************************************************************************/
|
|
|
-
|
|
|
-static int dma_map_add_segment(DMA_MemMap_t *memMap, /* Stores state information about the map */
|
|
|
- DMA_Region_t *region, /* Region that the segment belongs to */
|
|
|
- void *virtAddr, /* Virtual address of the segment being added */
|
|
|
- dma_addr_t physAddr, /* Physical address of the segment being added */
|
|
|
- size_t numBytes /* Number of bytes of the segment being added */
|
|
|
- ) {
|
|
|
- DMA_Segment_t *segment;
|
|
|
-
|
|
|
- DMA_MAP_PRINT("memMap:%p va:%p pa:0x%x #:%d\n", memMap, virtAddr,
|
|
|
- physAddr, numBytes);
|
|
|
-
|
|
|
- /* Sanity check */
|
|
|
-
|
|
|
- if (((unsigned long)virtAddr < (unsigned long)region->virtAddr)
|
|
|
- || (((unsigned long)virtAddr + numBytes)) >
|
|
|
- ((unsigned long)region->virtAddr + region->numBytes)) {
|
|
|
- printk(KERN_ERR
|
|
|
- "%s: virtAddr %p is outside region @ %p len: %d\n",
|
|
|
- __func__, virtAddr, region->virtAddr, region->numBytes);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
-
|
|
|
- if (region->numSegmentsUsed > 0) {
|
|
|
- /* Check to see if this segment is physically contiguous with the previous one */
|
|
|
-
|
|
|
- segment = ®ion->segment[region->numSegmentsUsed - 1];
|
|
|
-
|
|
|
- if ((segment->physAddr + segment->numBytes) == physAddr) {
|
|
|
- /* It is - just add on to the end */
|
|
|
-
|
|
|
- DMA_MAP_PRINT("appending %d bytes to last segment\n",
|
|
|
- numBytes);
|
|
|
-
|
|
|
- segment->numBytes += numBytes;
|
|
|
-
|
|
|
- return 0;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- /* Reallocate to hold more segments, if required. */
|
|
|
-
|
|
|
- if (region->numSegmentsUsed >= region->numSegmentsAllocated) {
|
|
|
- DMA_Segment_t *newSegment;
|
|
|
- size_t oldSize =
|
|
|
- region->numSegmentsAllocated * sizeof(*newSegment);
|
|
|
- int newAlloc = region->numSegmentsAllocated + 4;
|
|
|
- size_t newSize = newAlloc * sizeof(*newSegment);
|
|
|
-
|
|
|
- newSegment = kmalloc(newSize, GFP_KERNEL);
|
|
|
- if (newSegment == NULL) {
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
- memcpy(newSegment, region->segment, oldSize);
|
|
|
- memset(&((uint8_t *) newSegment)[oldSize], 0,
|
|
|
- newSize - oldSize);
|
|
|
- kfree(region->segment);
|
|
|
-
|
|
|
- region->numSegmentsAllocated = newAlloc;
|
|
|
- region->segment = newSegment;
|
|
|
- }
|
|
|
-
|
|
|
- segment = ®ion->segment[region->numSegmentsUsed];
|
|
|
- region->numSegmentsUsed++;
|
|
|
-
|
|
|
- segment->virtAddr = virtAddr;
|
|
|
- segment->physAddr = physAddr;
|
|
|
- segment->numBytes = numBytes;
|
|
|
-
|
|
|
- DMA_MAP_PRINT("returning success\n");
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-/****************************************************************************/
|
|
|
-/**
|
|
|
-* Adds a region of memory to a memory map. Each region is virtually
|
|
|
-* contiguous, but not necessarily physically contiguous.
|
|
|
-*
|
|
|
-* @return 0 on success, error code otherwise.
|
|
|
-*/
|
|
|
-/****************************************************************************/
|
|
|
-
|
|
|
-int dma_map_add_region(DMA_MemMap_t *memMap, /* Stores state information about the map */
|
|
|
- void *mem, /* Virtual address that we want to get a map of */
|
|
|
- size_t numBytes /* Number of bytes being mapped */
|
|
|
- ) {
|
|
|
- unsigned long addr = (unsigned long)mem;
|
|
|
- unsigned int offset;
|
|
|
- int rc = 0;
|
|
|
- DMA_Region_t *region;
|
|
|
- dma_addr_t physAddr;
|
|
|
-
|
|
|
- down(&memMap->lock);
|
|
|
-
|
|
|
- DMA_MAP_PRINT("memMap:%p va:%p #:%d\n", memMap, mem, numBytes);
|
|
|
-
|
|
|
- if (!memMap->inUse) {
|
|
|
- printk(KERN_ERR "%s: Make sure you call dma_map_start first\n",
|
|
|
- __func__);
|
|
|
- rc = -EINVAL;
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
- /* Reallocate to hold more regions. */
|
|
|
-
|
|
|
- if (memMap->numRegionsUsed >= memMap->numRegionsAllocated) {
|
|
|
- DMA_Region_t *newRegion;
|
|
|
- size_t oldSize =
|
|
|
- memMap->numRegionsAllocated * sizeof(*newRegion);
|
|
|
- int newAlloc = memMap->numRegionsAllocated + 4;
|
|
|
- size_t newSize = newAlloc * sizeof(*newRegion);
|
|
|
-
|
|
|
- newRegion = kmalloc(newSize, GFP_KERNEL);
|
|
|
- if (newRegion == NULL) {
|
|
|
- rc = -ENOMEM;
|
|
|
- goto out;
|
|
|
- }
|
|
|
- memcpy(newRegion, memMap->region, oldSize);
|
|
|
- memset(&((uint8_t *) newRegion)[oldSize], 0, newSize - oldSize);
|
|
|
-
|
|
|
- kfree(memMap->region);
|
|
|
-
|
|
|
- memMap->numRegionsAllocated = newAlloc;
|
|
|
- memMap->region = newRegion;
|
|
|
- }
|
|
|
-
|
|
|
- region = &memMap->region[memMap->numRegionsUsed];
|
|
|
- memMap->numRegionsUsed++;
|
|
|
-
|
|
|
- offset = addr & ~PAGE_MASK;
|
|
|
-
|
|
|
- region->memType = dma_mem_type(mem);
|
|
|
- region->virtAddr = mem;
|
|
|
- region->numBytes = numBytes;
|
|
|
- region->numSegmentsUsed = 0;
|
|
|
- region->numLockedPages = 0;
|
|
|
- region->lockedPages = NULL;
|
|
|
-
|
|
|
- switch (region->memType) {
|
|
|
- case DMA_MEM_TYPE_VMALLOC:
|
|
|
- {
|
|
|
- atomic_inc(&gDmaStatMemTypeVmalloc);
|
|
|
-
|
|
|
- /* printk(KERN_ERR "%s: vmalloc'd pages are not supported\n", __func__); */
|
|
|
-
|
|
|
- /* vmalloc'd pages are not physically contiguous */
|
|
|
-
|
|
|
- rc = -EINVAL;
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- case DMA_MEM_TYPE_KMALLOC:
|
|
|
- {
|
|
|
- atomic_inc(&gDmaStatMemTypeKmalloc);
|
|
|
-
|
|
|
- /* kmalloc'd pages are physically contiguous, so they'll have exactly */
|
|
|
- /* one segment */
|
|
|
-
|
|
|
-#if ALLOW_MAP_OF_KMALLOC_MEMORY
|
|
|
- physAddr =
|
|
|
- dma_map_single(NULL, mem, numBytes, memMap->dir);
|
|
|
- rc = dma_map_add_segment(memMap, region, mem, physAddr,
|
|
|
- numBytes);
|
|
|
-#else
|
|
|
- rc = -EINVAL;
|
|
|
-#endif
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- case DMA_MEM_TYPE_DMA:
|
|
|
- {
|
|
|
- /* dma_alloc_xxx pages are physically contiguous */
|
|
|
-
|
|
|
- atomic_inc(&gDmaStatMemTypeCoherent);
|
|
|
-
|
|
|
- physAddr = (vmalloc_to_pfn(mem) << PAGE_SHIFT) + offset;
|
|
|
-
|
|
|
- dma_sync_single_for_cpu(NULL, physAddr, numBytes,
|
|
|
- memMap->dir);
|
|
|
- rc = dma_map_add_segment(memMap, region, mem, physAddr,
|
|
|
- numBytes);
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- case DMA_MEM_TYPE_USER:
|
|
|
- {
|
|
|
- size_t firstPageOffset;
|
|
|
- size_t firstPageSize;
|
|
|
- struct page **pages;
|
|
|
- struct task_struct *userTask;
|
|
|
-
|
|
|
- atomic_inc(&gDmaStatMemTypeUser);
|
|
|
-
|
|
|
-#if 1
|
|
|
- /* If the pages are user pages, then the dma_mem_map_set_user_task function */
|
|
|
- /* must have been previously called. */
|
|
|
-
|
|
|
- if (memMap->userTask == NULL) {
|
|
|
- printk(KERN_ERR
|
|
|
- "%s: must call dma_mem_map_set_user_task when using user-mode memory\n",
|
|
|
- __func__);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
-
|
|
|
- /* User pages need to be locked. */
|
|
|
-
|
|
|
- firstPageOffset =
|
|
|
- (unsigned long)region->virtAddr & (PAGE_SIZE - 1);
|
|
|
- firstPageSize = PAGE_SIZE - firstPageOffset;
|
|
|
-
|
|
|
- region->numLockedPages = (firstPageOffset
|
|
|
- + region->numBytes +
|
|
|
- PAGE_SIZE - 1) / PAGE_SIZE;
|
|
|
- pages =
|
|
|
- kmalloc(region->numLockedPages *
|
|
|
- sizeof(struct page *), GFP_KERNEL);
|
|
|
-
|
|
|
- if (pages == NULL) {
|
|
|
- region->numLockedPages = 0;
|
|
|
- return -ENOMEM;
|
|
|
- }
|
|
|
-
|
|
|
- userTask = memMap->userTask;
|
|
|
-
|
|
|
- down_read(&userTask->mm->mmap_sem);
|
|
|
- rc = get_user_pages(userTask, /* task */
|
|
|
- userTask->mm, /* mm */
|
|
|
- (unsigned long)region->virtAddr, /* start */
|
|
|
- region->numLockedPages, /* len */
|
|
|
- memMap->dir == DMA_FROM_DEVICE, /* write */
|
|
|
- 0, /* force */
|
|
|
- pages, /* pages (array of pointers to page) */
|
|
|
- NULL); /* vmas */
|
|
|
- up_read(&userTask->mm->mmap_sem);
|
|
|
-
|
|
|
- if (rc != region->numLockedPages) {
|
|
|
- kfree(pages);
|
|
|
- region->numLockedPages = 0;
|
|
|
-
|
|
|
- if (rc >= 0) {
|
|
|
- rc = -EINVAL;
|
|
|
- }
|
|
|
- } else {
|
|
|
- uint8_t *virtAddr = region->virtAddr;
|
|
|
- size_t bytesRemaining;
|
|
|
- int pageIdx;
|
|
|
-
|
|
|
- rc = 0; /* Since get_user_pages returns +ve number */
|
|
|
-
|
|
|
- region->lockedPages = pages;
|
|
|
-
|
|
|
- /* We've locked the user pages. Now we need to walk them and figure */
|
|
|
- /* out the physical addresses. */
|
|
|
-
|
|
|
- /* The first page may be partial */
|
|
|
-
|
|
|
- dma_map_add_segment(memMap,
|
|
|
- region,
|
|
|
- virtAddr,
|
|
|
- PFN_PHYS(page_to_pfn
|
|
|
- (pages[0])) +
|
|
|
- firstPageOffset,
|
|
|
- firstPageSize);
|
|
|
-
|
|
|
- virtAddr += firstPageSize;
|
|
|
- bytesRemaining =
|
|
|
- region->numBytes - firstPageSize;
|
|
|
-
|
|
|
- for (pageIdx = 1;
|
|
|
- pageIdx < region->numLockedPages;
|
|
|
- pageIdx++) {
|
|
|
- size_t bytesThisPage =
|
|
|
- (bytesRemaining >
|
|
|
- PAGE_SIZE ? PAGE_SIZE :
|
|
|
- bytesRemaining);
|
|
|
-
|
|
|
- DMA_MAP_PRINT
|
|
|
- ("pageIdx:%d pages[pageIdx]=%p pfn=%u phys=%u\n",
|
|
|
- pageIdx, pages[pageIdx],
|
|
|
- page_to_pfn(pages[pageIdx]),
|
|
|
- PFN_PHYS(page_to_pfn
|
|
|
- (pages[pageIdx])));
|
|
|
-
|
|
|
- dma_map_add_segment(memMap,
|
|
|
- region,
|
|
|
- virtAddr,
|
|
|
- PFN_PHYS(page_to_pfn
|
|
|
- (pages
|
|
|
- [pageIdx])),
|
|
|
- bytesThisPage);
|
|
|
-
|
|
|
- virtAddr += bytesThisPage;
|
|
|
- bytesRemaining -= bytesThisPage;
|
|
|
- }
|
|
|
- }
|
|
|
-#else
|
|
|
- printk(KERN_ERR
|
|
|
- "%s: User mode pages are not yet supported\n",
|
|
|
- __func__);
|
|
|
-
|
|
|
- /* user pages are not physically contiguous */
|
|
|
-
|
|
|
- rc = -EINVAL;
|
|
|
-#endif
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- default:
|
|
|
- {
|
|
|
- printk(KERN_ERR "%s: Unsupported memory type: %d\n",
|
|
|
- __func__, region->memType);
|
|
|
-
|
|
|
- rc = -EINVAL;
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- if (rc != 0) {
|
|
|
- memMap->numRegionsUsed--;
|
|
|
- }
|
|
|
-
|
|
|
-out:
|
|
|
-
|
|
|
- DMA_MAP_PRINT("returning %d\n", rc);
|
|
|
-
|
|
|
- up(&memMap->lock);
|
|
|
-
|
|
|
- return rc;
|
|
|
-}
|
|
|
-
|
|
|
-EXPORT_SYMBOL(dma_map_add_segment);
|
|
|
-
|
|
|
-/****************************************************************************/
|
|
|
-/**
|
|
|
-* Maps in a memory region such that it can be used for performing a DMA.
|
|
|
-*
|
|
|
-* @return 0 on success, error code otherwise.
|
|
|
-*/
|
|
|
-/****************************************************************************/
|
|
|
-
|
|
|
-int dma_map_mem(DMA_MemMap_t *memMap, /* Stores state information about the map */
|
|
|
- void *mem, /* Virtual address that we want to get a map of */
|
|
|
- size_t numBytes, /* Number of bytes being mapped */
|
|
|
- enum dma_data_direction dir /* Direction that the mapping will be going */
|
|
|
- ) {
|
|
|
- int rc;
|
|
|
-
|
|
|
- rc = dma_map_start(memMap, dir);
|
|
|
- if (rc == 0) {
|
|
|
- rc = dma_map_add_region(memMap, mem, numBytes);
|
|
|
- if (rc < 0) {
|
|
|
- /* Since the add fails, this function will fail, and the caller won't */
|
|
|
- /* call unmap, so we need to do it here. */
|
|
|
-
|
|
|
- dma_unmap(memMap, 0);
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- return rc;
|
|
|
-}
|
|
|
-
|
|
|
-EXPORT_SYMBOL(dma_map_mem);
|
|
|
-
|
|
|
-/****************************************************************************/
|
|
|
-/**
|
|
|
-* Setup a descriptor ring for a given memory map.
|
|
|
-*
|
|
|
-* It is assumed that the descriptor ring has already been initialized, and
|
|
|
-* this routine will only reallocate a new descriptor ring if the existing
|
|
|
-* one is too small.
|
|
|
-*
|
|
|
-* @return 0 on success, error code otherwise.
|
|
|
-*/
|
|
|
-/****************************************************************************/
|
|
|
-
|
|
|
-int dma_map_create_descriptor_ring(DMA_Device_t dev, /* DMA device (where the ring is stored) */
|
|
|
- DMA_MemMap_t *memMap, /* Memory map that will be used */
|
|
|
- dma_addr_t devPhysAddr /* Physical address of device */
|
|
|
- ) {
|
|
|
- int rc;
|
|
|
- int numDescriptors;
|
|
|
- DMA_DeviceAttribute_t *devAttr;
|
|
|
- DMA_Region_t *region;
|
|
|
- DMA_Segment_t *segment;
|
|
|
- dma_addr_t srcPhysAddr;
|
|
|
- dma_addr_t dstPhysAddr;
|
|
|
- int regionIdx;
|
|
|
- int segmentIdx;
|
|
|
-
|
|
|
- devAttr = &DMA_gDeviceAttribute[dev];
|
|
|
-
|
|
|
- down(&memMap->lock);
|
|
|
-
|
|
|
- /* Figure out how many descriptors we need */
|
|
|
-
|
|
|
- numDescriptors = 0;
|
|
|
- for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
|
|
|
- region = &memMap->region[regionIdx];
|
|
|
-
|
|
|
- for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
|
|
|
- segmentIdx++) {
|
|
|
- segment = ®ion->segment[segmentIdx];
|
|
|
-
|
|
|
- if (memMap->dir == DMA_TO_DEVICE) {
|
|
|
- srcPhysAddr = segment->physAddr;
|
|
|
- dstPhysAddr = devPhysAddr;
|
|
|
- } else {
|
|
|
- srcPhysAddr = devPhysAddr;
|
|
|
- dstPhysAddr = segment->physAddr;
|
|
|
- }
|
|
|
-
|
|
|
- rc =
|
|
|
- dma_calculate_descriptor_count(dev, srcPhysAddr,
|
|
|
- dstPhysAddr,
|
|
|
- segment->
|
|
|
- numBytes);
|
|
|
- if (rc < 0) {
|
|
|
- printk(KERN_ERR
|
|
|
- "%s: dma_calculate_descriptor_count failed: %d\n",
|
|
|
- __func__, rc);
|
|
|
- goto out;
|
|
|
- }
|
|
|
- numDescriptors += rc;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- /* Adjust the size of the ring, if it isn't big enough */
|
|
|
-
|
|
|
- if (numDescriptors > devAttr->ring.descriptorsAllocated) {
|
|
|
- dma_free_descriptor_ring(&devAttr->ring);
|
|
|
- rc =
|
|
|
- dma_alloc_descriptor_ring(&devAttr->ring,
|
|
|
- numDescriptors);
|
|
|
- if (rc < 0) {
|
|
|
- printk(KERN_ERR
|
|
|
- "%s: dma_alloc_descriptor_ring failed: %d\n",
|
|
|
- __func__, rc);
|
|
|
- goto out;
|
|
|
- }
|
|
|
- } else {
|
|
|
- rc =
|
|
|
- dma_init_descriptor_ring(&devAttr->ring,
|
|
|
- numDescriptors);
|
|
|
- if (rc < 0) {
|
|
|
- printk(KERN_ERR
|
|
|
- "%s: dma_init_descriptor_ring failed: %d\n",
|
|
|
- __func__, rc);
|
|
|
- goto out;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- /* Populate the descriptors */
|
|
|
-
|
|
|
- for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
|
|
|
- region = &memMap->region[regionIdx];
|
|
|
-
|
|
|
- for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
|
|
|
- segmentIdx++) {
|
|
|
- segment = ®ion->segment[segmentIdx];
|
|
|
-
|
|
|
- if (memMap->dir == DMA_TO_DEVICE) {
|
|
|
- srcPhysAddr = segment->physAddr;
|
|
|
- dstPhysAddr = devPhysAddr;
|
|
|
- } else {
|
|
|
- srcPhysAddr = devPhysAddr;
|
|
|
- dstPhysAddr = segment->physAddr;
|
|
|
- }
|
|
|
-
|
|
|
- rc =
|
|
|
- dma_add_descriptors(&devAttr->ring, dev,
|
|
|
- srcPhysAddr, dstPhysAddr,
|
|
|
- segment->numBytes);
|
|
|
- if (rc < 0) {
|
|
|
- printk(KERN_ERR
|
|
|
- "%s: dma_add_descriptors failed: %d\n",
|
|
|
- __func__, rc);
|
|
|
- goto out;
|
|
|
- }
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- rc = 0;
|
|
|
-
|
|
|
-out:
|
|
|
-
|
|
|
- up(&memMap->lock);
|
|
|
- return rc;
|
|
|
-}
|
|
|
-
|
|
|
-EXPORT_SYMBOL(dma_map_create_descriptor_ring);
|
|
|
-
|
|
|
-/****************************************************************************/
|
|
|
-/**
|
|
|
-* Maps in a memory region such that it can be used for performing a DMA.
|
|
|
-*
|
|
|
-* @return
|
|
|
-*/
|
|
|
-/****************************************************************************/
|
|
|
-
|
|
|
-int dma_unmap(DMA_MemMap_t *memMap, /* Stores state information about the map */
|
|
|
- int dirtied /* non-zero if any of the pages were modified */
|
|
|
- ) {
|
|
|
-
|
|
|
- int rc = 0;
|
|
|
- int regionIdx;
|
|
|
- int segmentIdx;
|
|
|
- DMA_Region_t *region;
|
|
|
- DMA_Segment_t *segment;
|
|
|
-
|
|
|
- down(&memMap->lock);
|
|
|
-
|
|
|
- for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
|
|
|
- region = &memMap->region[regionIdx];
|
|
|
-
|
|
|
- for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
|
|
|
- segmentIdx++) {
|
|
|
- segment = ®ion->segment[segmentIdx];
|
|
|
-
|
|
|
- switch (region->memType) {
|
|
|
- case DMA_MEM_TYPE_VMALLOC:
|
|
|
- {
|
|
|
- printk(KERN_ERR
|
|
|
- "%s: vmalloc'd pages are not yet supported\n",
|
|
|
- __func__);
|
|
|
- rc = -EINVAL;
|
|
|
- goto out;
|
|
|
- }
|
|
|
-
|
|
|
- case DMA_MEM_TYPE_KMALLOC:
|
|
|
- {
|
|
|
-#if ALLOW_MAP_OF_KMALLOC_MEMORY
|
|
|
- dma_unmap_single(NULL,
|
|
|
- segment->physAddr,
|
|
|
- segment->numBytes,
|
|
|
- memMap->dir);
|
|
|
-#endif
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- case DMA_MEM_TYPE_DMA:
|
|
|
- {
|
|
|
- dma_sync_single_for_cpu(NULL,
|
|
|
- segment->
|
|
|
- physAddr,
|
|
|
- segment->
|
|
|
- numBytes,
|
|
|
- memMap->dir);
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- case DMA_MEM_TYPE_USER:
|
|
|
- {
|
|
|
- /* Nothing to do here. */
|
|
|
-
|
|
|
- break;
|
|
|
- }
|
|
|
-
|
|
|
- default:
|
|
|
- {
|
|
|
- printk(KERN_ERR
|
|
|
- "%s: Unsupported memory type: %d\n",
|
|
|
- __func__, region->memType);
|
|
|
- rc = -EINVAL;
|
|
|
- goto out;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- segment->virtAddr = NULL;
|
|
|
- segment->physAddr = 0;
|
|
|
- segment->numBytes = 0;
|
|
|
- }
|
|
|
-
|
|
|
- if (region->numLockedPages > 0) {
|
|
|
- int pageIdx;
|
|
|
-
|
|
|
- /* Some user pages were locked. We need to go and unlock them now. */
|
|
|
-
|
|
|
- for (pageIdx = 0; pageIdx < region->numLockedPages;
|
|
|
- pageIdx++) {
|
|
|
- struct page *page =
|
|
|
- region->lockedPages[pageIdx];
|
|
|
-
|
|
|
- if (memMap->dir == DMA_FROM_DEVICE) {
|
|
|
- SetPageDirty(page);
|
|
|
- }
|
|
|
- page_cache_release(page);
|
|
|
- }
|
|
|
- kfree(region->lockedPages);
|
|
|
- region->numLockedPages = 0;
|
|
|
- region->lockedPages = NULL;
|
|
|
- }
|
|
|
-
|
|
|
- region->memType = DMA_MEM_TYPE_NONE;
|
|
|
- region->virtAddr = NULL;
|
|
|
- region->numBytes = 0;
|
|
|
- region->numSegmentsUsed = 0;
|
|
|
- }
|
|
|
- memMap->userTask = NULL;
|
|
|
- memMap->numRegionsUsed = 0;
|
|
|
- memMap->inUse = 0;
|
|
|
-
|
|
|
-out:
|
|
|
- up(&memMap->lock);
|
|
|
-
|
|
|
- return rc;
|
|
|
-}
|
|
|
-
|
|
|
-EXPORT_SYMBOL(dma_unmap);
|