1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519 |
- /*
- * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
- *
- * Copyright (C) 2008 Atmel Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- *
- * This supports the Atmel AHB DMA Controller,
- *
- * The driver has currently been tested with the Atmel AT91SAM9RL
- * and AT91SAM9G45 series.
- */
- #include <linux/clk.h>
- #include <linux/dmaengine.h>
- #include <linux/dma-mapping.h>
- #include <linux/dmapool.h>
- #include <linux/interrupt.h>
- #include <linux/module.h>
- #include <linux/platform_device.h>
- #include <linux/slab.h>
- #include "at_hdmac_regs.h"
- /*
- * Glossary
- * --------
- *
- * at_hdmac : Name of the ATmel AHB DMA Controller
- * at_dma_ / atdma : ATmel DMA controller entity related
- * atc_ / atchan : ATmel DMA Channel entity related
- */
- #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
- #define ATC_DEFAULT_CTRLA (0)
- #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
- |ATC_DIF(AT_DMA_MEM_IF))
- /*
- * Initial number of descriptors to allocate for each channel. This could
- * be increased during dma usage.
- */
- static unsigned int init_nr_desc_per_channel = 64;
- module_param(init_nr_desc_per_channel, uint, 0644);
- MODULE_PARM_DESC(init_nr_desc_per_channel,
- "initial descriptors per channel (default: 64)");
- /* prototypes */
- static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
- /*----------------------------------------------------------------------*/
- static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
- {
- return list_first_entry(&atchan->active_list,
- struct at_desc, desc_node);
- }
- static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
- {
- return list_first_entry(&atchan->queue,
- struct at_desc, desc_node);
- }
- /**
- * atc_alloc_descriptor - allocate and return an initialized descriptor
- * @chan: the channel to allocate descriptors for
- * @gfp_flags: GFP allocation flags
- *
- * Note: The ack-bit is positioned in the descriptor flag at creation time
- * to make initial allocation more convenient. This bit will be cleared
- * and control will be given to client at usage time (during
- * preparation functions).
- */
- static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
- gfp_t gfp_flags)
- {
- struct at_desc *desc = NULL;
- struct at_dma *atdma = to_at_dma(chan->device);
- dma_addr_t phys;
- desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
- if (desc) {
- memset(desc, 0, sizeof(struct at_desc));
- INIT_LIST_HEAD(&desc->tx_list);
- dma_async_tx_descriptor_init(&desc->txd, chan);
- /* txd.flags will be overwritten in prep functions */
- desc->txd.flags = DMA_CTRL_ACK;
- desc->txd.tx_submit = atc_tx_submit;
- desc->txd.phys = phys;
- }
- return desc;
- }
- /**
- * atc_desc_get - get an unused descriptor from free_list
- * @atchan: channel we want a new descriptor for
- */
- static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
- {
- struct at_desc *desc, *_desc;
- struct at_desc *ret = NULL;
- unsigned long flags;
- unsigned int i = 0;
- LIST_HEAD(tmp_list);
- spin_lock_irqsave(&atchan->lock, flags);
- list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
- i++;
- if (async_tx_test_ack(&desc->txd)) {
- list_del(&desc->desc_node);
- ret = desc;
- break;
- }
- dev_dbg(chan2dev(&atchan->chan_common),
- "desc %p not ACKed\n", desc);
- }
- spin_unlock_irqrestore(&atchan->lock, flags);
- dev_vdbg(chan2dev(&atchan->chan_common),
- "scanned %u descriptors on freelist\n", i);
- /* no more descriptor available in initial pool: create one more */
- if (!ret) {
- ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
- if (ret) {
- spin_lock_irqsave(&atchan->lock, flags);
- atchan->descs_allocated++;
- spin_unlock_irqrestore(&atchan->lock, flags);
- } else {
- dev_err(chan2dev(&atchan->chan_common),
- "not enough descriptors available\n");
- }
- }
- return ret;
- }
- /**
- * atc_desc_put - move a descriptor, including any children, to the free list
- * @atchan: channel we work on
- * @desc: descriptor, at the head of a chain, to move to free list
- */
- static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
- {
- if (desc) {
- struct at_desc *child;
- unsigned long flags;
- spin_lock_irqsave(&atchan->lock, flags);
- list_for_each_entry(child, &desc->tx_list, desc_node)
- dev_vdbg(chan2dev(&atchan->chan_common),
- "moving child desc %p to freelist\n",
- child);
- list_splice_init(&desc->tx_list, &atchan->free_list);
- dev_vdbg(chan2dev(&atchan->chan_common),
- "moving desc %p to freelist\n", desc);
- list_add(&desc->desc_node, &atchan->free_list);
- spin_unlock_irqrestore(&atchan->lock, flags);
- }
- }
- /**
- * atc_desc_chain - build chain adding a descripor
- * @first: address of first descripor of the chain
- * @prev: address of previous descripor of the chain
- * @desc: descriptor to queue
- *
- * Called from prep_* functions
- */
- static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
- struct at_desc *desc)
- {
- if (!(*first)) {
- *first = desc;
- } else {
- /* inform the HW lli about chaining */
- (*prev)->lli.dscr = desc->txd.phys;
- /* insert the link descriptor to the LD ring */
- list_add_tail(&desc->desc_node,
- &(*first)->tx_list);
- }
- *prev = desc;
- }
- /**
- * atc_assign_cookie - compute and assign new cookie
- * @atchan: channel we work on
- * @desc: descriptor to assign cookie for
- *
- * Called with atchan->lock held and bh disabled
- */
- static dma_cookie_t
- atc_assign_cookie(struct at_dma_chan *atchan, struct at_desc *desc)
- {
- dma_cookie_t cookie = atchan->chan_common.cookie;
- if (++cookie < 0)
- cookie = 1;
- atchan->chan_common.cookie = cookie;
- desc->txd.cookie = cookie;
- return cookie;
- }
- /**
- * atc_dostart - starts the DMA engine for real
- * @atchan: the channel we want to start
- * @first: first descriptor in the list we want to begin with
- *
- * Called with atchan->lock held and bh disabled
- */
- static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
- {
- struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
- /* ASSERT: channel is idle */
- if (atc_chan_is_enabled(atchan)) {
- dev_err(chan2dev(&atchan->chan_common),
- "BUG: Attempted to start non-idle channel\n");
- dev_err(chan2dev(&atchan->chan_common),
- " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
- channel_readl(atchan, SADDR),
- channel_readl(atchan, DADDR),
- channel_readl(atchan, CTRLA),
- channel_readl(atchan, CTRLB),
- channel_readl(atchan, DSCR));
- /* The tasklet will hopefully advance the queue... */
- return;
- }
- vdbg_dump_regs(atchan);
- /* clear any pending interrupt */
- while (dma_readl(atdma, EBCISR))
- cpu_relax();
- channel_writel(atchan, SADDR, 0);
- channel_writel(atchan, DADDR, 0);
- channel_writel(atchan, CTRLA, 0);
- channel_writel(atchan, CTRLB, 0);
- channel_writel(atchan, DSCR, first->txd.phys);
- dma_writel(atdma, CHER, atchan->mask);
- vdbg_dump_regs(atchan);
- }
- /**
- * atc_chain_complete - finish work for one transaction chain
- * @atchan: channel we work on
- * @desc: descriptor at the head of the chain we want do complete
- *
- * Called with atchan->lock held and bh disabled */
- static void
- atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
- {
- struct dma_async_tx_descriptor *txd = &desc->txd;
- dev_vdbg(chan2dev(&atchan->chan_common),
- "descriptor %u complete\n", txd->cookie);
- atchan->completed_cookie = txd->cookie;
- /* move children to free_list */
- list_splice_init(&desc->tx_list, &atchan->free_list);
- /* move myself to free_list */
- list_move(&desc->desc_node, &atchan->free_list);
- /* unmap dma addresses (not on slave channels) */
- if (!atchan->chan_common.private) {
- struct device *parent = chan2parent(&atchan->chan_common);
- if (!(txd->flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (txd->flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- dma_unmap_single(parent,
- desc->lli.daddr,
- desc->len, DMA_FROM_DEVICE);
- else
- dma_unmap_page(parent,
- desc->lli.daddr,
- desc->len, DMA_FROM_DEVICE);
- }
- if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- dma_unmap_single(parent,
- desc->lli.saddr,
- desc->len, DMA_TO_DEVICE);
- else
- dma_unmap_page(parent,
- desc->lli.saddr,
- desc->len, DMA_TO_DEVICE);
- }
- }
- /* for cyclic transfers,
- * no need to replay callback function while stopping */
- if (!atc_chan_is_cyclic(atchan)) {
- dma_async_tx_callback callback = txd->callback;
- void *param = txd->callback_param;
- /*
- * The API requires that no submissions are done from a
- * callback, so we don't need to drop the lock here
- */
- if (callback)
- callback(param);
- }
- dma_run_dependencies(txd);
- }
- /**
- * atc_complete_all - finish work for all transactions
- * @atchan: channel to complete transactions for
- *
- * Eventually submit queued descriptors if any
- *
- * Assume channel is idle while calling this function
- * Called with atchan->lock held and bh disabled
- */
- static void atc_complete_all(struct at_dma_chan *atchan)
- {
- struct at_desc *desc, *_desc;
- LIST_HEAD(list);
- dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
- BUG_ON(atc_chan_is_enabled(atchan));
- /*
- * Submit queued descriptors ASAP, i.e. before we go through
- * the completed ones.
- */
- if (!list_empty(&atchan->queue))
- atc_dostart(atchan, atc_first_queued(atchan));
- /* empty active_list now it is completed */
- list_splice_init(&atchan->active_list, &list);
- /* empty queue list by moving descriptors (if any) to active_list */
- list_splice_init(&atchan->queue, &atchan->active_list);
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
- atc_chain_complete(atchan, desc);
- }
- /**
- * atc_cleanup_descriptors - cleanup up finished descriptors in active_list
- * @atchan: channel to be cleaned up
- *
- * Called with atchan->lock held and bh disabled
- */
- static void atc_cleanup_descriptors(struct at_dma_chan *atchan)
- {
- struct at_desc *desc, *_desc;
- struct at_desc *child;
- dev_vdbg(chan2dev(&atchan->chan_common), "cleanup descriptors\n");
- list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
- if (!(desc->lli.ctrla & ATC_DONE))
- /* This one is currently in progress */
- return;
- list_for_each_entry(child, &desc->tx_list, desc_node)
- if (!(child->lli.ctrla & ATC_DONE))
- /* Currently in progress */
- return;
- /*
- * No descriptors so far seem to be in progress, i.e.
- * this chain must be done.
- */
- atc_chain_complete(atchan, desc);
- }
- }
- /**
- * atc_advance_work - at the end of a transaction, move forward
- * @atchan: channel where the transaction ended
- *
- * Called with atchan->lock held and bh disabled
- */
- static void atc_advance_work(struct at_dma_chan *atchan)
- {
- dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
- if (list_empty(&atchan->active_list) ||
- list_is_singular(&atchan->active_list)) {
- atc_complete_all(atchan);
- } else {
- atc_chain_complete(atchan, atc_first_active(atchan));
- /* advance work */
- atc_dostart(atchan, atc_first_active(atchan));
- }
- }
- /**
- * atc_handle_error - handle errors reported by DMA controller
- * @atchan: channel where error occurs
- *
- * Called with atchan->lock held and bh disabled
- */
- static void atc_handle_error(struct at_dma_chan *atchan)
- {
- struct at_desc *bad_desc;
- struct at_desc *child;
- /*
- * The descriptor currently at the head of the active list is
- * broked. Since we don't have any way to report errors, we'll
- * just have to scream loudly and try to carry on.
- */
- bad_desc = atc_first_active(atchan);
- list_del_init(&bad_desc->desc_node);
- /* As we are stopped, take advantage to push queued descriptors
- * in active_list */
- list_splice_init(&atchan->queue, atchan->active_list.prev);
- /* Try to restart the controller */
- if (!list_empty(&atchan->active_list))
- atc_dostart(atchan, atc_first_active(atchan));
- /*
- * KERN_CRITICAL may seem harsh, but since this only happens
- * when someone submits a bad physical address in a
- * descriptor, we should consider ourselves lucky that the
- * controller flagged an error instead of scribbling over
- * random memory locations.
- */
- dev_crit(chan2dev(&atchan->chan_common),
- "Bad descriptor submitted for DMA!\n");
- dev_crit(chan2dev(&atchan->chan_common),
- " cookie: %d\n", bad_desc->txd.cookie);
- atc_dump_lli(atchan, &bad_desc->lli);
- list_for_each_entry(child, &bad_desc->tx_list, desc_node)
- atc_dump_lli(atchan, &child->lli);
- /* Pretend the descriptor completed successfully */
- atc_chain_complete(atchan, bad_desc);
- }
- /**
- * atc_handle_cyclic - at the end of a period, run callback function
- * @atchan: channel used for cyclic operations
- *
- * Called with atchan->lock held and bh disabled
- */
- static void atc_handle_cyclic(struct at_dma_chan *atchan)
- {
- struct at_desc *first = atc_first_active(atchan);
- struct dma_async_tx_descriptor *txd = &first->txd;
- dma_async_tx_callback callback = txd->callback;
- void *param = txd->callback_param;
- dev_vdbg(chan2dev(&atchan->chan_common),
- "new cyclic period llp 0x%08x\n",
- channel_readl(atchan, DSCR));
- if (callback)
- callback(param);
- }
- /*-- IRQ & Tasklet ---------------------------------------------------*/
- static void atc_tasklet(unsigned long data)
- {
- struct at_dma_chan *atchan = (struct at_dma_chan *)data;
- unsigned long flags;
- spin_lock_irqsave(&atchan->lock, flags);
- if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
- atc_handle_error(atchan);
- else if (atc_chan_is_cyclic(atchan))
- atc_handle_cyclic(atchan);
- else
- atc_advance_work(atchan);
- spin_unlock_irqrestore(&atchan->lock, flags);
- }
- static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
- {
- struct at_dma *atdma = (struct at_dma *)dev_id;
- struct at_dma_chan *atchan;
- int i;
- u32 status, pending, imr;
- int ret = IRQ_NONE;
- do {
- imr = dma_readl(atdma, EBCIMR);
- status = dma_readl(atdma, EBCISR);
- pending = status & imr;
- if (!pending)
- break;
- dev_vdbg(atdma->dma_common.dev,
- "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
- status, imr, pending);
- for (i = 0; i < atdma->dma_common.chancnt; i++) {
- atchan = &atdma->chan[i];
- if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
- if (pending & AT_DMA_ERR(i)) {
- /* Disable channel on AHB error */
- dma_writel(atdma, CHDR,
- AT_DMA_RES(i) | atchan->mask);
- /* Give information to tasklet */
- set_bit(ATC_IS_ERROR, &atchan->status);
- }
- tasklet_schedule(&atchan->tasklet);
- ret = IRQ_HANDLED;
- }
- }
- } while (pending);
- return ret;
- }
- /*-- DMA Engine API --------------------------------------------------*/
- /**
- * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
- * @desc: descriptor at the head of the transaction chain
- *
- * Queue chain if DMA engine is working already
- *
- * Cookie increment and adding to active_list or queue must be atomic
- */
- static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
- {
- struct at_desc *desc = txd_to_at_desc(tx);
- struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
- dma_cookie_t cookie;
- unsigned long flags;
- spin_lock_irqsave(&atchan->lock, flags);
- cookie = atc_assign_cookie(atchan, desc);
- if (list_empty(&atchan->active_list)) {
- dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
- desc->txd.cookie);
- atc_dostart(atchan, desc);
- list_add_tail(&desc->desc_node, &atchan->active_list);
- } else {
- dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
- desc->txd.cookie);
- list_add_tail(&desc->desc_node, &atchan->queue);
- }
- spin_unlock_irqrestore(&atchan->lock, flags);
- return cookie;
- }
- /**
- * atc_prep_dma_memcpy - prepare a memcpy operation
- * @chan: the channel to prepare operation on
- * @dest: operation virtual destination address
- * @src: operation virtual source address
- * @len: operation length
- * @flags: tx descriptor status flags
- */
- static struct dma_async_tx_descriptor *
- atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
- size_t len, unsigned long flags)
- {
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
- struct at_desc *desc = NULL;
- struct at_desc *first = NULL;
- struct at_desc *prev = NULL;
- size_t xfer_count;
- size_t offset;
- unsigned int src_width;
- unsigned int dst_width;
- u32 ctrla;
- u32 ctrlb;
- dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
- dest, src, len, flags);
- if (unlikely(!len)) {
- dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
- return NULL;
- }
- ctrla = ATC_DEFAULT_CTRLA;
- ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
- | ATC_SRC_ADDR_MODE_INCR
- | ATC_DST_ADDR_MODE_INCR
- | ATC_FC_MEM2MEM;
- /*
- * We can be a lot more clever here, but this should take care
- * of the most common optimization.
- */
- if (!((src | dest | len) & 3)) {
- ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
- src_width = dst_width = 2;
- } else if (!((src | dest | len) & 1)) {
- ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
- src_width = dst_width = 1;
- } else {
- ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
- src_width = dst_width = 0;
- }
- for (offset = 0; offset < len; offset += xfer_count << src_width) {
- xfer_count = min_t(size_t, (len - offset) >> src_width,
- ATC_BTSIZE_MAX);
- desc = atc_desc_get(atchan);
- if (!desc)
- goto err_desc_get;
- desc->lli.saddr = src + offset;
- desc->lli.daddr = dest + offset;
- desc->lli.ctrla = ctrla | xfer_count;
- desc->lli.ctrlb = ctrlb;
- desc->txd.cookie = 0;
- atc_desc_chain(&first, &prev, desc);
- }
- /* First descriptor of the chain embedds additional information */
- first->txd.cookie = -EBUSY;
- first->len = len;
- /* set end-of-link to the last link descriptor of list*/
- set_desc_eol(desc);
- first->txd.flags = flags; /* client is in control of this ack */
- return &first->txd;
- err_desc_get:
- atc_desc_put(atchan, first);
- return NULL;
- }
- /**
- * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
- * @chan: DMA channel
- * @sgl: scatterlist to transfer to/from
- * @sg_len: number of entries in @scatterlist
- * @direction: DMA direction
- * @flags: tx descriptor status flags
- */
- static struct dma_async_tx_descriptor *
- atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
- unsigned int sg_len, enum dma_data_direction direction,
- unsigned long flags)
- {
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
- struct at_dma_slave *atslave = chan->private;
- struct at_desc *first = NULL;
- struct at_desc *prev = NULL;
- u32 ctrla;
- u32 ctrlb;
- dma_addr_t reg;
- unsigned int reg_width;
- unsigned int mem_width;
- unsigned int i;
- struct scatterlist *sg;
- size_t total_len = 0;
- dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
- sg_len,
- direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
- flags);
- if (unlikely(!atslave || !sg_len)) {
- dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
- return NULL;
- }
- reg_width = atslave->reg_width;
- ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla;
- ctrlb = ATC_IEN;
- switch (direction) {
- case DMA_TO_DEVICE:
- ctrla |= ATC_DST_WIDTH(reg_width);
- ctrlb |= ATC_DST_ADDR_MODE_FIXED
- | ATC_SRC_ADDR_MODE_INCR
- | ATC_FC_MEM2PER
- | ATC_SIF(AT_DMA_MEM_IF) | ATC_DIF(AT_DMA_PER_IF);
- reg = atslave->tx_reg;
- for_each_sg(sgl, sg, sg_len, i) {
- struct at_desc *desc;
- u32 len;
- u32 mem;
- desc = atc_desc_get(atchan);
- if (!desc)
- goto err_desc_get;
- mem = sg_dma_address(sg);
- len = sg_dma_len(sg);
- mem_width = 2;
- if (unlikely(mem & 3 || len & 3))
- mem_width = 0;
- desc->lli.saddr = mem;
- desc->lli.daddr = reg;
- desc->lli.ctrla = ctrla
- | ATC_SRC_WIDTH(mem_width)
- | len >> mem_width;
- desc->lli.ctrlb = ctrlb;
- atc_desc_chain(&first, &prev, desc);
- total_len += len;
- }
- break;
- case DMA_FROM_DEVICE:
- ctrla |= ATC_SRC_WIDTH(reg_width);
- ctrlb |= ATC_DST_ADDR_MODE_INCR
- | ATC_SRC_ADDR_MODE_FIXED
- | ATC_FC_PER2MEM
- | ATC_SIF(AT_DMA_PER_IF) | ATC_DIF(AT_DMA_MEM_IF);
- reg = atslave->rx_reg;
- for_each_sg(sgl, sg, sg_len, i) {
- struct at_desc *desc;
- u32 len;
- u32 mem;
- desc = atc_desc_get(atchan);
- if (!desc)
- goto err_desc_get;
- mem = sg_dma_address(sg);
- len = sg_dma_len(sg);
- mem_width = 2;
- if (unlikely(mem & 3 || len & 3))
- mem_width = 0;
- desc->lli.saddr = reg;
- desc->lli.daddr = mem;
- desc->lli.ctrla = ctrla
- | ATC_DST_WIDTH(mem_width)
- | len >> reg_width;
- desc->lli.ctrlb = ctrlb;
- atc_desc_chain(&first, &prev, desc);
- total_len += len;
- }
- break;
- default:
- return NULL;
- }
- /* set end-of-link to the last link descriptor of list*/
- set_desc_eol(prev);
- /* First descriptor of the chain embedds additional information */
- first->txd.cookie = -EBUSY;
- first->len = total_len;
- /* first link descriptor of list is responsible of flags */
- first->txd.flags = flags; /* client is in control of this ack */
- return &first->txd;
- err_desc_get:
- dev_err(chan2dev(chan), "not enough descriptors available\n");
- atc_desc_put(atchan, first);
- return NULL;
- }
- /**
- * atc_dma_cyclic_check_values
- * Check for too big/unaligned periods and unaligned DMA buffer
- */
- static int
- atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
- size_t period_len, enum dma_data_direction direction)
- {
- if (period_len > (ATC_BTSIZE_MAX << reg_width))
- goto err_out;
- if (unlikely(period_len & ((1 << reg_width) - 1)))
- goto err_out;
- if (unlikely(buf_addr & ((1 << reg_width) - 1)))
- goto err_out;
- if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
- goto err_out;
- return 0;
- err_out:
- return -EINVAL;
- }
- /**
- * atc_dma_cyclic_fill_desc - Fill one period decriptor
- */
- static int
- atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
- unsigned int period_index, dma_addr_t buf_addr,
- size_t period_len, enum dma_data_direction direction)
- {
- u32 ctrla;
- unsigned int reg_width = atslave->reg_width;
- /* prepare common CRTLA value */
- ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla
- | ATC_DST_WIDTH(reg_width)
- | ATC_SRC_WIDTH(reg_width)
- | period_len >> reg_width;
- switch (direction) {
- case DMA_TO_DEVICE:
- desc->lli.saddr = buf_addr + (period_len * period_index);
- desc->lli.daddr = atslave->tx_reg;
- desc->lli.ctrla = ctrla;
- desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
- | ATC_SRC_ADDR_MODE_INCR
- | ATC_FC_MEM2PER
- | ATC_SIF(AT_DMA_MEM_IF)
- | ATC_DIF(AT_DMA_PER_IF);
- break;
- case DMA_FROM_DEVICE:
- desc->lli.saddr = atslave->rx_reg;
- desc->lli.daddr = buf_addr + (period_len * period_index);
- desc->lli.ctrla = ctrla;
- desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
- | ATC_SRC_ADDR_MODE_FIXED
- | ATC_FC_PER2MEM
- | ATC_SIF(AT_DMA_PER_IF)
- | ATC_DIF(AT_DMA_MEM_IF);
- break;
- default:
- return -EINVAL;
- }
- return 0;
- }
- /**
- * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
- * @chan: the DMA channel to prepare
- * @buf_addr: physical DMA address where the buffer starts
- * @buf_len: total number of bytes for the entire buffer
- * @period_len: number of bytes for each period
- * @direction: transfer direction, to or from device
- */
- static struct dma_async_tx_descriptor *
- atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
- size_t period_len, enum dma_data_direction direction)
- {
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
- struct at_dma_slave *atslave = chan->private;
- struct at_desc *first = NULL;
- struct at_desc *prev = NULL;
- unsigned long was_cyclic;
- unsigned int periods = buf_len / period_len;
- unsigned int i;
- dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
- direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
- buf_addr,
- periods, buf_len, period_len);
- if (unlikely(!atslave || !buf_len || !period_len)) {
- dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
- return NULL;
- }
- was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
- if (was_cyclic) {
- dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
- return NULL;
- }
- /* Check for too big/unaligned periods and unaligned DMA buffer */
- if (atc_dma_cyclic_check_values(atslave->reg_width, buf_addr,
- period_len, direction))
- goto err_out;
- /* build cyclic linked list */
- for (i = 0; i < periods; i++) {
- struct at_desc *desc;
- desc = atc_desc_get(atchan);
- if (!desc)
- goto err_desc_get;
- if (atc_dma_cyclic_fill_desc(atslave, desc, i, buf_addr,
- period_len, direction))
- goto err_desc_get;
- atc_desc_chain(&first, &prev, desc);
- }
- /* lets make a cyclic list */
- prev->lli.dscr = first->txd.phys;
- /* First descriptor of the chain embedds additional information */
- first->txd.cookie = -EBUSY;
- first->len = buf_len;
- return &first->txd;
- err_desc_get:
- dev_err(chan2dev(chan), "not enough descriptors available\n");
- atc_desc_put(atchan, first);
- err_out:
- clear_bit(ATC_IS_CYCLIC, &atchan->status);
- return NULL;
- }
- static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
- unsigned long arg)
- {
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
- struct at_dma *atdma = to_at_dma(chan->device);
- int chan_id = atchan->chan_common.chan_id;
- unsigned long flags;
- LIST_HEAD(list);
- dev_vdbg(chan2dev(chan), "atc_control (%d)\n", cmd);
- if (cmd == DMA_PAUSE) {
- spin_lock_irqsave(&atchan->lock, flags);
- dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
- set_bit(ATC_IS_PAUSED, &atchan->status);
- spin_unlock_irqrestore(&atchan->lock, flags);
- } else if (cmd == DMA_RESUME) {
- if (!atc_chan_is_paused(atchan))
- return 0;
- spin_lock_irqsave(&atchan->lock, flags);
- dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
- clear_bit(ATC_IS_PAUSED, &atchan->status);
- spin_unlock_irqrestore(&atchan->lock, flags);
- } else if (cmd == DMA_TERMINATE_ALL) {
- struct at_desc *desc, *_desc;
- /*
- * This is only called when something went wrong elsewhere, so
- * we don't really care about the data. Just disable the
- * channel. We still have to poll the channel enable bit due
- * to AHB/HSB limitations.
- */
- spin_lock_irqsave(&atchan->lock, flags);
- /* disabling channel: must also remove suspend state */
- dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
- /* confirm that this channel is disabled */
- while (dma_readl(atdma, CHSR) & atchan->mask)
- cpu_relax();
- /* active_list entries will end up before queued entries */
- list_splice_init(&atchan->queue, &list);
- list_splice_init(&atchan->active_list, &list);
- /* Flush all pending and queued descriptors */
- list_for_each_entry_safe(desc, _desc, &list, desc_node)
- atc_chain_complete(atchan, desc);
- clear_bit(ATC_IS_PAUSED, &atchan->status);
- /* if channel dedicated to cyclic operations, free it */
- clear_bit(ATC_IS_CYCLIC, &atchan->status);
- spin_unlock_irqrestore(&atchan->lock, flags);
- } else {
- return -ENXIO;
- }
- return 0;
- }
- /**
- * atc_tx_status - poll for transaction completion
- * @chan: DMA channel
- * @cookie: transaction identifier to check status of
- * @txstate: if not %NULL updated with transaction state
- *
- * If @txstate is passed in, upon return it reflect the driver
- * internal state and can be used with dma_async_is_complete() to check
- * the status of multiple cookies without re-checking hardware state.
- */
- static enum dma_status
- atc_tx_status(struct dma_chan *chan,
- dma_cookie_t cookie,
- struct dma_tx_state *txstate)
- {
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
- dma_cookie_t last_used;
- dma_cookie_t last_complete;
- unsigned long flags;
- enum dma_status ret;
- spin_lock_irqsave(&atchan->lock, flags);
- last_complete = atchan->completed_cookie;
- last_used = chan->cookie;
- ret = dma_async_is_complete(cookie, last_complete, last_used);
- if (ret != DMA_SUCCESS) {
- atc_cleanup_descriptors(atchan);
- last_complete = atchan->completed_cookie;
- last_used = chan->cookie;
- ret = dma_async_is_complete(cookie, last_complete, last_used);
- }
- spin_unlock_irqrestore(&atchan->lock, flags);
- if (ret != DMA_SUCCESS)
- dma_set_tx_state(txstate, last_complete, last_used,
- atc_first_active(atchan)->len);
- else
- dma_set_tx_state(txstate, last_complete, last_used, 0);
- if (atc_chan_is_paused(atchan))
- ret = DMA_PAUSED;
- dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d (d%d, u%d)\n",
- ret, cookie, last_complete ? last_complete : 0,
- last_used ? last_used : 0);
- return ret;
- }
- /**
- * atc_issue_pending - try to finish work
- * @chan: target DMA channel
- */
- static void atc_issue_pending(struct dma_chan *chan)
- {
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
- unsigned long flags;
- dev_vdbg(chan2dev(chan), "issue_pending\n");
- /* Not needed for cyclic transfers */
- if (atc_chan_is_cyclic(atchan))
- return;
- spin_lock_irqsave(&atchan->lock, flags);
- if (!atc_chan_is_enabled(atchan)) {
- atc_advance_work(atchan);
- }
- spin_unlock_irqrestore(&atchan->lock, flags);
- }
- /**
- * atc_alloc_chan_resources - allocate resources for DMA channel
- * @chan: allocate descriptor resources for this channel
- * @client: current client requesting the channel be ready for requests
- *
- * return - the number of allocated descriptors
- */
- static int atc_alloc_chan_resources(struct dma_chan *chan)
- {
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
- struct at_dma *atdma = to_at_dma(chan->device);
- struct at_desc *desc;
- struct at_dma_slave *atslave;
- unsigned long flags;
- int i;
- u32 cfg;
- LIST_HEAD(tmp_list);
- dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
- /* ASSERT: channel is idle */
- if (atc_chan_is_enabled(atchan)) {
- dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
- return -EIO;
- }
- cfg = ATC_DEFAULT_CFG;
- atslave = chan->private;
- if (atslave) {
- /*
- * We need controller-specific data to set up slave
- * transfers.
- */
- BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
- /* if cfg configuration specified take it instad of default */
- if (atslave->cfg)
- cfg = atslave->cfg;
- }
- /* have we already been set up?
- * reconfigure channel but no need to reallocate descriptors */
- if (!list_empty(&atchan->free_list))
- return atchan->descs_allocated;
- /* Allocate initial pool of descriptors */
- for (i = 0; i < init_nr_desc_per_channel; i++) {
- desc = atc_alloc_descriptor(chan, GFP_KERNEL);
- if (!desc) {
- dev_err(atdma->dma_common.dev,
- "Only %d initial descriptors\n", i);
- break;
- }
- list_add_tail(&desc->desc_node, &tmp_list);
- }
- spin_lock_irqsave(&atchan->lock, flags);
- atchan->descs_allocated = i;
- list_splice(&tmp_list, &atchan->free_list);
- atchan->completed_cookie = chan->cookie = 1;
- spin_unlock_irqrestore(&atchan->lock, flags);
- /* channel parameters */
- channel_writel(atchan, CFG, cfg);
- dev_dbg(chan2dev(chan),
- "alloc_chan_resources: allocated %d descriptors\n",
- atchan->descs_allocated);
- return atchan->descs_allocated;
- }
- /**
- * atc_free_chan_resources - free all channel resources
- * @chan: DMA channel
- */
- static void atc_free_chan_resources(struct dma_chan *chan)
- {
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
- struct at_dma *atdma = to_at_dma(chan->device);
- struct at_desc *desc, *_desc;
- LIST_HEAD(list);
- dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
- atchan->descs_allocated);
- /* ASSERT: channel is idle */
- BUG_ON(!list_empty(&atchan->active_list));
- BUG_ON(!list_empty(&atchan->queue));
- BUG_ON(atc_chan_is_enabled(atchan));
- list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
- dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
- list_del(&desc->desc_node);
- /* free link descriptor */
- dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
- }
- list_splice_init(&atchan->free_list, &list);
- atchan->descs_allocated = 0;
- atchan->status = 0;
- dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
- }
- /*-- Module Management -----------------------------------------------*/
- /**
- * at_dma_off - disable DMA controller
- * @atdma: the Atmel HDAMC device
- */
- static void at_dma_off(struct at_dma *atdma)
- {
- dma_writel(atdma, EN, 0);
- /* disable all interrupts */
- dma_writel(atdma, EBCIDR, -1L);
- /* confirm that all channels are disabled */
- while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
- cpu_relax();
- }
- static int __init at_dma_probe(struct platform_device *pdev)
- {
- struct at_dma_platform_data *pdata;
- struct resource *io;
- struct at_dma *atdma;
- size_t size;
- int irq;
- int err;
- int i;
- /* get DMA Controller parameters from platform */
- pdata = pdev->dev.platform_data;
- if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
- return -EINVAL;
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!io)
- return -EINVAL;
- irq = platform_get_irq(pdev, 0);
- if (irq < 0)
- return irq;
- size = sizeof(struct at_dma);
- size += pdata->nr_channels * sizeof(struct at_dma_chan);
- atdma = kzalloc(size, GFP_KERNEL);
- if (!atdma)
- return -ENOMEM;
- /* discover transaction capabilites from the platform data */
- atdma->dma_common.cap_mask = pdata->cap_mask;
- atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
- size = resource_size(io);
- if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
- err = -EBUSY;
- goto err_kfree;
- }
- atdma->regs = ioremap(io->start, size);
- if (!atdma->regs) {
- err = -ENOMEM;
- goto err_release_r;
- }
- atdma->clk = clk_get(&pdev->dev, "dma_clk");
- if (IS_ERR(atdma->clk)) {
- err = PTR_ERR(atdma->clk);
- goto err_clk;
- }
- clk_enable(atdma->clk);
- /* force dma off, just in case */
- at_dma_off(atdma);
- err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
- if (err)
- goto err_irq;
- platform_set_drvdata(pdev, atdma);
- /* create a pool of consistent memory blocks for hardware descriptors */
- atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
- &pdev->dev, sizeof(struct at_desc),
- 4 /* word alignment */, 0);
- if (!atdma->dma_desc_pool) {
- dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
- err = -ENOMEM;
- goto err_pool_create;
- }
- /* clear any pending interrupt */
- while (dma_readl(atdma, EBCISR))
- cpu_relax();
- /* initialize channels related values */
- INIT_LIST_HEAD(&atdma->dma_common.channels);
- for (i = 0; i < pdata->nr_channels; i++) {
- struct at_dma_chan *atchan = &atdma->chan[i];
- atchan->chan_common.device = &atdma->dma_common;
- atchan->chan_common.cookie = atchan->completed_cookie = 1;
- list_add_tail(&atchan->chan_common.device_node,
- &atdma->dma_common.channels);
- atchan->ch_regs = atdma->regs + ch_regs(i);
- spin_lock_init(&atchan->lock);
- atchan->mask = 1 << i;
- INIT_LIST_HEAD(&atchan->active_list);
- INIT_LIST_HEAD(&atchan->queue);
- INIT_LIST_HEAD(&atchan->free_list);
- tasklet_init(&atchan->tasklet, atc_tasklet,
- (unsigned long)atchan);
- atc_enable_irq(atchan);
- }
- /* set base routines */
- atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
- atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
- atdma->dma_common.device_tx_status = atc_tx_status;
- atdma->dma_common.device_issue_pending = atc_issue_pending;
- atdma->dma_common.dev = &pdev->dev;
- /* set prep routines based on capability */
- if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
- atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
- if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
- atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
- /* controller can do slave DMA: can trigger cyclic transfers */
- dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
- atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
- atdma->dma_common.device_control = atc_control;
- }
- dma_writel(atdma, EN, AT_DMA_ENABLE);
- dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
- dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
- dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
- pdata->nr_channels);
- dma_async_device_register(&atdma->dma_common);
- return 0;
- err_pool_create:
- platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq(pdev, 0), atdma);
- err_irq:
- clk_disable(atdma->clk);
- clk_put(atdma->clk);
- err_clk:
- iounmap(atdma->regs);
- atdma->regs = NULL;
- err_release_r:
- release_mem_region(io->start, size);
- err_kfree:
- kfree(atdma);
- return err;
- }
- static int __exit at_dma_remove(struct platform_device *pdev)
- {
- struct at_dma *atdma = platform_get_drvdata(pdev);
- struct dma_chan *chan, *_chan;
- struct resource *io;
- at_dma_off(atdma);
- dma_async_device_unregister(&atdma->dma_common);
- dma_pool_destroy(atdma->dma_desc_pool);
- platform_set_drvdata(pdev, NULL);
- free_irq(platform_get_irq(pdev, 0), atdma);
- list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
- device_node) {
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
- /* Disable interrupts */
- atc_disable_irq(atchan);
- tasklet_disable(&atchan->tasklet);
- tasklet_kill(&atchan->tasklet);
- list_del(&chan->device_node);
- }
- clk_disable(atdma->clk);
- clk_put(atdma->clk);
- iounmap(atdma->regs);
- atdma->regs = NULL;
- io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(io->start, resource_size(io));
- kfree(atdma);
- return 0;
- }
- static void at_dma_shutdown(struct platform_device *pdev)
- {
- struct at_dma *atdma = platform_get_drvdata(pdev);
- at_dma_off(platform_get_drvdata(pdev));
- clk_disable(atdma->clk);
- }
- static int at_dma_prepare(struct device *dev)
- {
- struct platform_device *pdev = to_platform_device(dev);
- struct at_dma *atdma = platform_get_drvdata(pdev);
- struct dma_chan *chan, *_chan;
- list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
- device_node) {
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
- /* wait for transaction completion (except in cyclic case) */
- if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
- return -EAGAIN;
- }
- return 0;
- }
- static void atc_suspend_cyclic(struct at_dma_chan *atchan)
- {
- struct dma_chan *chan = &atchan->chan_common;
- /* Channel should be paused by user
- * do it anyway even if it is not done already */
- if (!atc_chan_is_paused(atchan)) {
- dev_warn(chan2dev(chan),
- "cyclic channel not paused, should be done by channel user\n");
- atc_control(chan, DMA_PAUSE, 0);
- }
- /* now preserve additional data for cyclic operations */
- /* next descriptor address in the cyclic list */
- atchan->save_dscr = channel_readl(atchan, DSCR);
- vdbg_dump_regs(atchan);
- }
- static int at_dma_suspend_noirq(struct device *dev)
- {
- struct platform_device *pdev = to_platform_device(dev);
- struct at_dma *atdma = platform_get_drvdata(pdev);
- struct dma_chan *chan, *_chan;
- /* preserve data */
- list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
- device_node) {
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
- if (atc_chan_is_cyclic(atchan))
- atc_suspend_cyclic(atchan);
- atchan->save_cfg = channel_readl(atchan, CFG);
- }
- atdma->save_imr = dma_readl(atdma, EBCIMR);
- /* disable DMA controller */
- at_dma_off(atdma);
- clk_disable(atdma->clk);
- return 0;
- }
- static void atc_resume_cyclic(struct at_dma_chan *atchan)
- {
- struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
- /* restore channel status for cyclic descriptors list:
- * next descriptor in the cyclic list at the time of suspend */
- channel_writel(atchan, SADDR, 0);
- channel_writel(atchan, DADDR, 0);
- channel_writel(atchan, CTRLA, 0);
- channel_writel(atchan, CTRLB, 0);
- channel_writel(atchan, DSCR, atchan->save_dscr);
- dma_writel(atdma, CHER, atchan->mask);
- /* channel pause status should be removed by channel user
- * We cannot take the initiative to do it here */
- vdbg_dump_regs(atchan);
- }
- static int at_dma_resume_noirq(struct device *dev)
- {
- struct platform_device *pdev = to_platform_device(dev);
- struct at_dma *atdma = platform_get_drvdata(pdev);
- struct dma_chan *chan, *_chan;
- /* bring back DMA controller */
- clk_enable(atdma->clk);
- dma_writel(atdma, EN, AT_DMA_ENABLE);
- /* clear any pending interrupt */
- while (dma_readl(atdma, EBCISR))
- cpu_relax();
- /* restore saved data */
- dma_writel(atdma, EBCIER, atdma->save_imr);
- list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
- device_node) {
- struct at_dma_chan *atchan = to_at_dma_chan(chan);
- channel_writel(atchan, CFG, atchan->save_cfg);
- if (atc_chan_is_cyclic(atchan))
- atc_resume_cyclic(atchan);
- }
- return 0;
- }
- static const struct dev_pm_ops at_dma_dev_pm_ops = {
- .prepare = at_dma_prepare,
- .suspend_noirq = at_dma_suspend_noirq,
- .resume_noirq = at_dma_resume_noirq,
- };
- static struct platform_driver at_dma_driver = {
- .remove = __exit_p(at_dma_remove),
- .shutdown = at_dma_shutdown,
- .driver = {
- .name = "at_hdmac",
- .pm = &at_dma_dev_pm_ops,
- },
- };
- static int __init at_dma_init(void)
- {
- return platform_driver_probe(&at_dma_driver, at_dma_probe);
- }
- subsys_initcall(at_dma_init);
- static void __exit at_dma_exit(void)
- {
- platform_driver_unregister(&at_dma_driver);
- }
- module_exit(at_dma_exit);
- MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
- MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
- MODULE_LICENSE("GPL");
- MODULE_ALIAS("platform:at_hdmac");
|