|
@@ -148,6 +148,24 @@ static struct variant_data variant_ux500v2 = {
|
|
|
.pwrreg_clkgate = true,
|
|
|
};
|
|
|
|
|
|
+/*
|
|
|
+ * Validate mmc prerequisites
|
|
|
+ */
|
|
|
+static int mmci_validate_data(struct mmci_host *host,
|
|
|
+ struct mmc_data *data)
|
|
|
+{
|
|
|
+ if (!data)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ if (!is_power_of_2(data->blksz)) {
|
|
|
+ dev_err(mmc_dev(host->mmc),
|
|
|
+ "unsupported block size (%d bytes)\n", data->blksz);
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* This must be called with host->lock held
|
|
|
*/
|
|
@@ -370,10 +388,33 @@ static inline void mmci_dma_release(struct mmci_host *host)
|
|
|
host->dma_rx_channel = host->dma_tx_channel = NULL;
|
|
|
}
|
|
|
|
|
|
+static void mmci_dma_data_error(struct mmci_host *host)
|
|
|
+{
|
|
|
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
|
|
|
+ dmaengine_terminate_all(host->dma_current);
|
|
|
+ host->dma_current = NULL;
|
|
|
+ host->dma_desc_current = NULL;
|
|
|
+ host->data->host_cookie = 0;
|
|
|
+}
|
|
|
+
|
|
|
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
|
|
{
|
|
|
- struct dma_chan *chan = host->dma_current;
|
|
|
+ struct dma_chan *chan;
|
|
|
enum dma_data_direction dir;
|
|
|
+
|
|
|
+ if (data->flags & MMC_DATA_READ) {
|
|
|
+ dir = DMA_FROM_DEVICE;
|
|
|
+ chan = host->dma_rx_channel;
|
|
|
+ } else {
|
|
|
+ dir = DMA_TO_DEVICE;
|
|
|
+ chan = host->dma_tx_channel;
|
|
|
+ }
|
|
|
+
|
|
|
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
|
|
|
+}
|
|
|
+
|
|
|
+static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
|
|
|
+{
|
|
|
u32 status;
|
|
|
int i;
|
|
|
|
|
@@ -392,19 +433,13 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
|
|
* contiguous buffers. On TX, we'll get a FIFO underrun error.
|
|
|
*/
|
|
|
if (status & MCI_RXDATAAVLBLMASK) {
|
|
|
- dmaengine_terminate_all(chan);
|
|
|
+ mmci_dma_data_error(host);
|
|
|
if (!data->error)
|
|
|
data->error = -EIO;
|
|
|
}
|
|
|
|
|
|
- if (data->flags & MMC_DATA_WRITE) {
|
|
|
- dir = DMA_TO_DEVICE;
|
|
|
- } else {
|
|
|
- dir = DMA_FROM_DEVICE;
|
|
|
- }
|
|
|
-
|
|
|
if (!data->host_cookie)
|
|
|
- dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
|
|
|
+ mmci_dma_unmap(host, data);
|
|
|
|
|
|
/*
|
|
|
* Use of DMA with scatter-gather is impossible.
|
|
@@ -414,16 +449,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
|
|
dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
|
|
|
mmci_dma_release(host);
|
|
|
}
|
|
|
-}
|
|
|
|
|
|
-static void mmci_dma_data_error(struct mmci_host *host)
|
|
|
-{
|
|
|
- dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
|
|
|
- dmaengine_terminate_all(host->dma_current);
|
|
|
+ host->dma_current = NULL;
|
|
|
+ host->dma_desc_current = NULL;
|
|
|
}
|
|
|
|
|
|
-static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
|
|
- struct mmci_host_next *next)
|
|
|
+/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
|
|
|
+static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
|
|
+ struct dma_chan **dma_chan,
|
|
|
+ struct dma_async_tx_descriptor **dma_desc)
|
|
|
{
|
|
|
struct variant_data *variant = host->variant;
|
|
|
struct dma_slave_config conf = {
|
|
@@ -441,16 +475,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
|
|
enum dma_data_direction buffer_dirn;
|
|
|
int nr_sg;
|
|
|
|
|
|
- /* Check if next job is already prepared */
|
|
|
- if (data->host_cookie && !next &&
|
|
|
- host->dma_current && host->dma_desc_current)
|
|
|
- return 0;
|
|
|
-
|
|
|
- if (!next) {
|
|
|
- host->dma_current = NULL;
|
|
|
- host->dma_desc_current = NULL;
|
|
|
- }
|
|
|
-
|
|
|
if (data->flags & MMC_DATA_READ) {
|
|
|
conf.direction = DMA_DEV_TO_MEM;
|
|
|
buffer_dirn = DMA_FROM_DEVICE;
|
|
@@ -480,29 +504,41 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
|
|
if (!desc)
|
|
|
goto unmap_exit;
|
|
|
|
|
|
- if (next) {
|
|
|
- next->dma_chan = chan;
|
|
|
- next->dma_desc = desc;
|
|
|
- } else {
|
|
|
- host->dma_current = chan;
|
|
|
- host->dma_desc_current = desc;
|
|
|
- }
|
|
|
+ *dma_chan = chan;
|
|
|
+ *dma_desc = desc;
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
unmap_exit:
|
|
|
- if (!next)
|
|
|
- dmaengine_terminate_all(chan);
|
|
|
dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
|
|
|
return -ENOMEM;
|
|
|
}
|
|
|
|
|
|
+static inline int mmci_dma_prep_data(struct mmci_host *host,
|
|
|
+ struct mmc_data *data)
|
|
|
+{
|
|
|
+ /* Check if next job is already prepared. */
|
|
|
+ if (host->dma_current && host->dma_desc_current)
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ /* No job were prepared thus do it now. */
|
|
|
+ return __mmci_dma_prep_data(host, data, &host->dma_current,
|
|
|
+ &host->dma_desc_current);
|
|
|
+}
|
|
|
+
|
|
|
+static inline int mmci_dma_prep_next(struct mmci_host *host,
|
|
|
+ struct mmc_data *data)
|
|
|
+{
|
|
|
+ struct mmci_host_next *nd = &host->next_data;
|
|
|
+ return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
|
|
|
+}
|
|
|
+
|
|
|
static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
|
|
|
{
|
|
|
int ret;
|
|
|
struct mmc_data *data = host->data;
|
|
|
|
|
|
- ret = mmci_dma_prep_data(host, host->data, NULL);
|
|
|
+ ret = mmci_dma_prep_data(host, host->data);
|
|
|
if (ret)
|
|
|
return ret;
|
|
|
|
|
@@ -532,19 +568,11 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
|
|
|
{
|
|
|
struct mmci_host_next *next = &host->next_data;
|
|
|
|
|
|
- if (data->host_cookie && data->host_cookie != next->cookie) {
|
|
|
- pr_warning("[%s] invalid cookie: data->host_cookie %d"
|
|
|
- " host->next_data.cookie %d\n",
|
|
|
- __func__, data->host_cookie, host->next_data.cookie);
|
|
|
- data->host_cookie = 0;
|
|
|
- }
|
|
|
-
|
|
|
- if (!data->host_cookie)
|
|
|
- return;
|
|
|
+ WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
|
|
|
+ WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
|
|
|
|
|
|
host->dma_desc_current = next->dma_desc;
|
|
|
host->dma_current = next->dma_chan;
|
|
|
-
|
|
|
next->dma_desc = NULL;
|
|
|
next->dma_chan = NULL;
|
|
|
}
|
|
@@ -559,19 +587,13 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
|
|
if (!data)
|
|
|
return;
|
|
|
|
|
|
- if (data->host_cookie) {
|
|
|
- data->host_cookie = 0;
|
|
|
+ BUG_ON(data->host_cookie);
|
|
|
+
|
|
|
+ if (mmci_validate_data(host, data))
|
|
|
return;
|
|
|
- }
|
|
|
|
|
|
- /* if config for dma */
|
|
|
- if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
|
|
|
- ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
|
|
|
- if (mmci_dma_prep_data(host, data, nd))
|
|
|
- data->host_cookie = 0;
|
|
|
- else
|
|
|
- data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
|
|
|
- }
|
|
|
+ if (!mmci_dma_prep_next(host, data))
|
|
|
+ data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
|
|
|
}
|
|
|
|
|
|
static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
|
@@ -579,29 +601,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
|
|
{
|
|
|
struct mmci_host *host = mmc_priv(mmc);
|
|
|
struct mmc_data *data = mrq->data;
|
|
|
- struct dma_chan *chan;
|
|
|
- enum dma_data_direction dir;
|
|
|
|
|
|
- if (!data)
|
|
|
+ if (!data || !data->host_cookie)
|
|
|
return;
|
|
|
|
|
|
- if (data->flags & MMC_DATA_READ) {
|
|
|
- dir = DMA_FROM_DEVICE;
|
|
|
- chan = host->dma_rx_channel;
|
|
|
- } else {
|
|
|
- dir = DMA_TO_DEVICE;
|
|
|
- chan = host->dma_tx_channel;
|
|
|
- }
|
|
|
+ mmci_dma_unmap(host, data);
|
|
|
|
|
|
+ if (err) {
|
|
|
+ struct mmci_host_next *next = &host->next_data;
|
|
|
+ struct dma_chan *chan;
|
|
|
+ if (data->flags & MMC_DATA_READ)
|
|
|
+ chan = host->dma_rx_channel;
|
|
|
+ else
|
|
|
+ chan = host->dma_tx_channel;
|
|
|
+ dmaengine_terminate_all(chan);
|
|
|
|
|
|
- /* if config for dma */
|
|
|
- if (chan) {
|
|
|
- if (err)
|
|
|
- dmaengine_terminate_all(chan);
|
|
|
- if (data->host_cookie)
|
|
|
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
|
|
|
- data->sg_len, dir);
|
|
|
- mrq->data->host_cookie = 0;
|
|
|
+ next->dma_desc = NULL;
|
|
|
+ next->dma_chan = NULL;
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -622,6 +638,11 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
|
|
{
|
|
|
}
|
|
|
|
|
|
+static inline void mmci_dma_finalize(struct mmci_host *host,
|
|
|
+ struct mmc_data *data)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
static inline void mmci_dma_data_error(struct mmci_host *host)
|
|
|
{
|
|
|
}
|
|
@@ -772,8 +793,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
|
|
|
u32 remain, success;
|
|
|
|
|
|
/* Terminate the DMA transfer */
|
|
|
- if (dma_inprogress(host))
|
|
|
+ if (dma_inprogress(host)) {
|
|
|
mmci_dma_data_error(host);
|
|
|
+ mmci_dma_unmap(host, data);
|
|
|
+ }
|
|
|
|
|
|
/*
|
|
|
* Calculate how far we are into the transfer. Note that
|
|
@@ -812,7 +835,7 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
|
|
|
|
|
|
if (status & MCI_DATAEND || data->error) {
|
|
|
if (dma_inprogress(host))
|
|
|
- mmci_dma_unmap(host, data);
|
|
|
+ mmci_dma_finalize(host, data);
|
|
|
mmci_stop_data(host);
|
|
|
|
|
|
if (!data->error)
|
|
@@ -849,8 +872,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
|
|
|
if (!cmd->data || cmd->error) {
|
|
|
if (host->data) {
|
|
|
/* Terminate the DMA transfer */
|
|
|
- if (dma_inprogress(host))
|
|
|
+ if (dma_inprogress(host)) {
|
|
|
mmci_dma_data_error(host);
|
|
|
+ mmci_dma_unmap(host, host->data);
|
|
|
+ }
|
|
|
mmci_stop_data(host);
|
|
|
}
|
|
|
mmci_request_end(host, cmd->mrq);
|
|
@@ -1076,10 +1101,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
|
|
|
|
|
WARN_ON(host->mrq != NULL);
|
|
|
|
|
|
- if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
|
|
|
- dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
|
|
|
- mrq->data->blksz);
|
|
|
- mrq->cmd->error = -EINVAL;
|
|
|
+ mrq->cmd->error = mmci_validate_data(host, mrq->data);
|
|
|
+ if (mrq->cmd->error) {
|
|
|
mmc_request_done(mmc, mrq);
|
|
|
return;
|
|
|
}
|