|
@@ -29,6 +29,7 @@
|
|
|
* and hooked into this driver.
|
|
|
*/
|
|
|
|
|
|
+
|
|
|
#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
|
|
|
#define SUPPORT_SYSRQ
|
|
|
#endif
|
|
@@ -117,6 +118,12 @@ struct pl011_dmarx_data {
|
|
|
struct pl011_sgbuf sgbuf_b;
|
|
|
dma_cookie_t cookie;
|
|
|
bool running;
|
|
|
+ struct timer_list timer;
|
|
|
+ unsigned int last_residue;
|
|
|
+ unsigned long last_jiffies;
|
|
|
+ bool auto_poll_rate;
|
|
|
+ unsigned int poll_rate;
|
|
|
+ unsigned int poll_timeout;
|
|
|
};
|
|
|
|
|
|
struct pl011_dmatx_data {
|
|
@@ -223,16 +230,18 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
|
|
|
static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
|
|
|
enum dma_data_direction dir)
|
|
|
{
|
|
|
- sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
|
|
|
+ dma_addr_t dma_addr;
|
|
|
+
|
|
|
+ sg->buf = dma_alloc_coherent(chan->device->dev,
|
|
|
+ PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
|
|
|
if (!sg->buf)
|
|
|
return -ENOMEM;
|
|
|
|
|
|
- sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE);
|
|
|
+ sg_init_table(&sg->sg, 1);
|
|
|
+ sg_set_page(&sg->sg, phys_to_page(dma_addr),
|
|
|
+ PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
|
|
|
+ sg_dma_address(&sg->sg) = dma_addr;
|
|
|
|
|
|
- if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) {
|
|
|
- kfree(sg->buf);
|
|
|
- return -EINVAL;
|
|
|
- }
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
@@ -240,8 +249,9 @@ static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
|
|
|
enum dma_data_direction dir)
|
|
|
{
|
|
|
if (sg->buf) {
|
|
|
- dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir);
|
|
|
- kfree(sg->buf);
|
|
|
+ dma_free_coherent(chan->device->dev,
|
|
|
+ PL011_DMA_BUFFER_SIZE, sg->buf,
|
|
|
+ sg_dma_address(&sg->sg));
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -300,6 +310,29 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
|
|
|
dmaengine_slave_config(chan, &rx_conf);
|
|
|
uap->dmarx.chan = chan;
|
|
|
|
|
|
+ if (plat->dma_rx_poll_enable) {
|
|
|
+ /* Set poll rate if specified. */
|
|
|
+ if (plat->dma_rx_poll_rate) {
|
|
|
+ uap->dmarx.auto_poll_rate = false;
|
|
|
+ uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
|
|
|
+ } else {
|
|
|
+ /*
|
|
|
+ * 100 ms defaults to poll rate if not
|
|
|
+ * specified. This will be adjusted with
|
|
|
+ * the baud rate at set_termios.
|
|
|
+ */
|
|
|
+ uap->dmarx.auto_poll_rate = true;
|
|
|
+ uap->dmarx.poll_rate = 100;
|
|
|
+ }
|
|
|
+ /* 3 secs defaults poll_timeout if not specified. */
|
|
|
+ if (plat->dma_rx_poll_timeout)
|
|
|
+ uap->dmarx.poll_timeout =
|
|
|
+ plat->dma_rx_poll_timeout;
|
|
|
+ else
|
|
|
+ uap->dmarx.poll_timeout = 3000;
|
|
|
+ } else
|
|
|
+ uap->dmarx.auto_poll_rate = false;
|
|
|
+
|
|
|
dev_info(uap->port.dev, "DMA channel RX %s\n",
|
|
|
dma_chan_name(uap->dmarx.chan));
|
|
|
}
|
|
@@ -701,24 +734,30 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
|
|
|
struct tty_port *port = &uap->port.state->port;
|
|
|
struct pl011_sgbuf *sgbuf = use_buf_b ?
|
|
|
&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
|
|
|
- struct device *dev = uap->dmarx.chan->device->dev;
|
|
|
int dma_count = 0;
|
|
|
u32 fifotaken = 0; /* only used for vdbg() */
|
|
|
|
|
|
- /* Pick everything from the DMA first */
|
|
|
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
|
|
|
+ int dmataken = 0;
|
|
|
+
|
|
|
+ if (uap->dmarx.poll_rate) {
|
|
|
+ /* The data can be taken by polling */
|
|
|
+ dmataken = sgbuf->sg.length - dmarx->last_residue;
|
|
|
+ /* Recalculate the pending size */
|
|
|
+ if (pending >= dmataken)
|
|
|
+ pending -= dmataken;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Pick the remain data from the DMA */
|
|
|
if (pending) {
|
|
|
- /* Sync in buffer */
|
|
|
- dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
|
|
|
|
|
|
/*
|
|
|
* First take all chars in the DMA pipe, then look in the FIFO.
|
|
|
* Note that tty_insert_flip_buf() tries to take as many chars
|
|
|
* as it can.
|
|
|
*/
|
|
|
- dma_count = tty_insert_flip_string(port, sgbuf->buf, pending);
|
|
|
-
|
|
|
- /* Return buffer to device */
|
|
|
- dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
|
|
|
+ dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
|
|
|
+ pending);
|
|
|
|
|
|
uap->port.icount.rx += dma_count;
|
|
|
if (dma_count < pending)
|
|
@@ -726,6 +765,10 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
|
|
|
"couldn't insert all characters (TTY is full?)\n");
|
|
|
}
|
|
|
|
|
|
+ /* Reset the last_residue for Rx DMA poll */
|
|
|
+ if (uap->dmarx.poll_rate)
|
|
|
+ dmarx->last_residue = sgbuf->sg.length;
|
|
|
+
|
|
|
/*
|
|
|
* Only continue with trying to read the FIFO if all DMA chars have
|
|
|
* been taken first.
|
|
@@ -865,6 +908,57 @@ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
|
|
|
writew(uap->dmacr, uap->port.membase + UART011_DMACR);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * Timer handler for Rx DMA polling.
|
|
|
+ * Every polling, It checks the residue in the dma buffer and transfer
|
|
|
+ * data to the tty. Also, last_residue is updated for the next polling.
|
|
|
+ */
|
|
|
+static void pl011_dma_rx_poll(unsigned long args)
|
|
|
+{
|
|
|
+ struct uart_amba_port *uap = (struct uart_amba_port *)args;
|
|
|
+ struct tty_port *port = &uap->port.state->port;
|
|
|
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
|
|
|
+ struct dma_chan *rxchan = uap->dmarx.chan;
|
|
|
+ unsigned long flags = 0;
|
|
|
+ unsigned int dmataken = 0;
|
|
|
+ unsigned int size = 0;
|
|
|
+ struct pl011_sgbuf *sgbuf;
|
|
|
+ int dma_count;
|
|
|
+ struct dma_tx_state state;
|
|
|
+
|
|
|
+ sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
|
|
|
+ rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
|
|
|
+ if (likely(state.residue < dmarx->last_residue)) {
|
|
|
+ dmataken = sgbuf->sg.length - dmarx->last_residue;
|
|
|
+ size = dmarx->last_residue - state.residue;
|
|
|
+ dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
|
|
|
+ size);
|
|
|
+ if (dma_count == size)
|
|
|
+ dmarx->last_residue = state.residue;
|
|
|
+ dmarx->last_jiffies = jiffies;
|
|
|
+ }
|
|
|
+ tty_flip_buffer_push(port);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If no data is received in poll_timeout, the driver will fall back
|
|
|
+ * to interrupt mode. We will retrigger DMA at the first interrupt.
|
|
|
+ */
|
|
|
+ if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
|
|
|
+ > uap->dmarx.poll_timeout) {
|
|
|
+
|
|
|
+ spin_lock_irqsave(&uap->port.lock, flags);
|
|
|
+ pl011_dma_rx_stop(uap);
|
|
|
+ spin_unlock_irqrestore(&uap->port.lock, flags);
|
|
|
+
|
|
|
+ uap->dmarx.running = false;
|
|
|
+ dmaengine_terminate_all(rxchan);
|
|
|
+ del_timer(&uap->dmarx.timer);
|
|
|
+ } else {
|
|
|
+ mod_timer(&uap->dmarx.timer,
|
|
|
+ jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static void pl011_dma_startup(struct uart_amba_port *uap)
|
|
|
{
|
|
|
int ret;
|
|
@@ -927,6 +1021,16 @@ skip_rx:
|
|
|
if (pl011_dma_rx_trigger_dma(uap))
|
|
|
dev_dbg(uap->port.dev, "could not trigger initial "
|
|
|
"RX DMA job, fall back to interrupt mode\n");
|
|
|
+ if (uap->dmarx.poll_rate) {
|
|
|
+ init_timer(&(uap->dmarx.timer));
|
|
|
+ uap->dmarx.timer.function = pl011_dma_rx_poll;
|
|
|
+ uap->dmarx.timer.data = (unsigned long)uap;
|
|
|
+ mod_timer(&uap->dmarx.timer,
|
|
|
+ jiffies +
|
|
|
+ msecs_to_jiffies(uap->dmarx.poll_rate));
|
|
|
+ uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
|
|
|
+ uap->dmarx.last_jiffies = jiffies;
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -962,6 +1066,8 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
|
|
|
/* Clean up the RX DMA */
|
|
|
pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
|
|
|
pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
|
|
|
+ if (uap->dmarx.poll_rate)
|
|
|
+ del_timer_sync(&uap->dmarx.timer);
|
|
|
uap->using_rx_dma = false;
|
|
|
}
|
|
|
}
|
|
@@ -976,7 +1082,6 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
|
|
|
return uap->using_rx_dma && uap->dmarx.running;
|
|
|
}
|
|
|
|
|
|
-
|
|
|
#else
|
|
|
/* Blank functions if the DMA engine is not available */
|
|
|
static inline void pl011_dma_probe(struct uart_amba_port *uap)
|
|
@@ -1088,8 +1193,18 @@ static void pl011_rx_chars(struct uart_amba_port *uap)
|
|
|
dev_dbg(uap->port.dev, "could not trigger RX DMA job "
|
|
|
"fall back to interrupt mode again\n");
|
|
|
uap->im |= UART011_RXIM;
|
|
|
- } else
|
|
|
+ } else {
|
|
|
uap->im &= ~UART011_RXIM;
|
|
|
+ /* Start Rx DMA poll */
|
|
|
+ if (uap->dmarx.poll_rate) {
|
|
|
+ uap->dmarx.last_jiffies = jiffies;
|
|
|
+ uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
|
|
|
+ mod_timer(&uap->dmarx.timer,
|
|
|
+ jiffies +
|
|
|
+ msecs_to_jiffies(uap->dmarx.poll_rate));
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
writew(uap->im, uap->port.membase + UART011_IMSC);
|
|
|
}
|
|
|
spin_lock(&uap->port.lock);
|
|
@@ -1164,7 +1279,6 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
|
|
|
unsigned int dummy_read;
|
|
|
|
|
|
spin_lock_irqsave(&uap->port.lock, flags);
|
|
|
-
|
|
|
status = readw(uap->port.membase + UART011_MIS);
|
|
|
if (status) {
|
|
|
do {
|
|
@@ -1551,6 +1665,11 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
|
|
|
*/
|
|
|
baud = uart_get_baud_rate(port, termios, old, 0,
|
|
|
port->uartclk / clkdiv);
|
|
|
+ /*
|
|
|
+ * Adjust RX DMA polling rate with baud rate if not specified.
|
|
|
+ */
|
|
|
+ if (uap->dmarx.auto_poll_rate)
|
|
|
+ uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
|
|
|
|
|
|
if (baud > port->uartclk/16)
|
|
|
quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
|