serial-tegra.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401
  1. /*
  2. * serial_tegra.c
  3. *
  4. * High-speed serial driver for NVIDIA Tegra SoCs
  5. *
  6. * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
  7. *
  8. * Author: Laxman Dewangan <ldewangan@nvidia.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms and conditions of the GNU General Public License,
  12. * version 2, as published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope it will be useful, but WITHOUT
  15. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  16. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  17. * more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  21. */
  22. #include <linux/clk.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/delay.h>
  25. #include <linux/dmaengine.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/dmapool.h>
  28. #include <linux/io.h>
  29. #include <linux/irq.h>
  30. #include <linux/module.h>
  31. #include <linux/of.h>
  32. #include <linux/of_device.h>
  33. #include <linux/pagemap.h>
  34. #include <linux/platform_device.h>
  35. #include <linux/serial.h>
  36. #include <linux/serial_8250.h>
  37. #include <linux/serial_core.h>
  38. #include <linux/serial_reg.h>
  39. #include <linux/slab.h>
  40. #include <linux/string.h>
  41. #include <linux/termios.h>
  42. #include <linux/tty.h>
  43. #include <linux/tty_flip.h>
  44. #include <linux/clk/tegra.h>
  45. #define TEGRA_UART_TYPE "TEGRA_UART"
  46. #define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
  47. #define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
  48. #define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
  49. #define TEGRA_UART_LSR_TXFIFO_FULL 0x100
  50. #define TEGRA_UART_IER_EORD 0x20
  51. #define TEGRA_UART_MCR_RTS_EN 0x40
  52. #define TEGRA_UART_MCR_CTS_EN 0x20
  53. #define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
  54. UART_LSR_PE | UART_LSR_FE)
  55. #define TEGRA_UART_IRDA_CSR 0x08
  56. #define TEGRA_UART_SIR_ENABLED 0x80
  57. #define TEGRA_UART_TX_PIO 1
  58. #define TEGRA_UART_TX_DMA 2
  59. #define TEGRA_UART_MIN_DMA 16
  60. #define TEGRA_UART_FIFO_SIZE 32
  61. /*
  62. * Tx fifo trigger level setting in tegra uart is in
  63. * reverse way then conventional uart.
  64. */
  65. #define TEGRA_UART_TX_TRIG_16B 0x00
  66. #define TEGRA_UART_TX_TRIG_8B 0x10
  67. #define TEGRA_UART_TX_TRIG_4B 0x20
  68. #define TEGRA_UART_TX_TRIG_1B 0x30
  69. #define TEGRA_UART_MAXIMUM 5
  70. /* Default UART setting when started: 115200 no parity, stop, 8 data bits */
  71. #define TEGRA_UART_DEFAULT_BAUD 115200
  72. #define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
  73. /* Tx transfer mode */
  74. #define TEGRA_TX_PIO 1
  75. #define TEGRA_TX_DMA 2
  76. /**
  77. * tegra_uart_chip_data: SOC specific data.
  78. *
  79. * @tx_fifo_full_status: Status flag available for checking tx fifo full.
  80. * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
  81. * Tegra30 does not allow this.
  82. * @support_clk_src_div: Clock source support the clock divider.
  83. */
  84. struct tegra_uart_chip_data {
  85. bool tx_fifo_full_status;
  86. bool allow_txfifo_reset_fifo_mode;
  87. bool support_clk_src_div;
  88. };
  89. struct tegra_uart_port {
  90. struct uart_port uport;
  91. const struct tegra_uart_chip_data *cdata;
  92. struct clk *uart_clk;
  93. unsigned int current_baud;
  94. /* Register shadow */
  95. unsigned long fcr_shadow;
  96. unsigned long mcr_shadow;
  97. unsigned long lcr_shadow;
  98. unsigned long ier_shadow;
  99. bool rts_active;
  100. int tx_in_progress;
  101. unsigned int tx_bytes;
  102. bool enable_modem_interrupt;
  103. bool rx_timeout;
  104. int rx_in_progress;
  105. int symb_bit;
  106. int dma_req_sel;
  107. struct dma_chan *rx_dma_chan;
  108. struct dma_chan *tx_dma_chan;
  109. dma_addr_t rx_dma_buf_phys;
  110. dma_addr_t tx_dma_buf_phys;
  111. unsigned char *rx_dma_buf_virt;
  112. unsigned char *tx_dma_buf_virt;
  113. struct dma_async_tx_descriptor *tx_dma_desc;
  114. struct dma_async_tx_descriptor *rx_dma_desc;
  115. dma_cookie_t tx_cookie;
  116. dma_cookie_t rx_cookie;
  117. int tx_bytes_requested;
  118. int rx_bytes_requested;
  119. };
  120. static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
  121. static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
  122. static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
  123. unsigned long reg)
  124. {
  125. return readl(tup->uport.membase + (reg << tup->uport.regshift));
  126. }
  127. static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
  128. unsigned long reg)
  129. {
  130. writel(val, tup->uport.membase + (reg << tup->uport.regshift));
  131. }
  132. static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
  133. {
  134. return container_of(u, struct tegra_uart_port, uport);
  135. }
  136. static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
  137. {
  138. struct tegra_uart_port *tup = to_tegra_uport(u);
  139. /*
  140. * RI - Ring detector is active
  141. * CD/DCD/CAR - Carrier detect is always active. For some reason
  142. * linux has different names for carrier detect.
  143. * DSR - Data Set ready is active as the hardware doesn't support it.
  144. * Don't know if the linux support this yet?
  145. * CTS - Clear to send. Always set to active, as the hardware handles
  146. * CTS automatically.
  147. */
  148. if (tup->enable_modem_interrupt)
  149. return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
  150. return TIOCM_CTS;
  151. }
  152. static void set_rts(struct tegra_uart_port *tup, bool active)
  153. {
  154. unsigned long mcr;
  155. mcr = tup->mcr_shadow;
  156. if (active)
  157. mcr |= TEGRA_UART_MCR_RTS_EN;
  158. else
  159. mcr &= ~TEGRA_UART_MCR_RTS_EN;
  160. if (mcr != tup->mcr_shadow) {
  161. tegra_uart_write(tup, mcr, UART_MCR);
  162. tup->mcr_shadow = mcr;
  163. }
  164. return;
  165. }
  166. static void set_dtr(struct tegra_uart_port *tup, bool active)
  167. {
  168. unsigned long mcr;
  169. mcr = tup->mcr_shadow;
  170. if (active)
  171. mcr |= UART_MCR_DTR;
  172. else
  173. mcr &= ~UART_MCR_DTR;
  174. if (mcr != tup->mcr_shadow) {
  175. tegra_uart_write(tup, mcr, UART_MCR);
  176. tup->mcr_shadow = mcr;
  177. }
  178. return;
  179. }
  180. static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
  181. {
  182. struct tegra_uart_port *tup = to_tegra_uport(u);
  183. unsigned long mcr;
  184. int dtr_enable;
  185. mcr = tup->mcr_shadow;
  186. tup->rts_active = !!(mctrl & TIOCM_RTS);
  187. set_rts(tup, tup->rts_active);
  188. dtr_enable = !!(mctrl & TIOCM_DTR);
  189. set_dtr(tup, dtr_enable);
  190. return;
  191. }
  192. static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
  193. {
  194. struct tegra_uart_port *tup = to_tegra_uport(u);
  195. unsigned long lcr;
  196. lcr = tup->lcr_shadow;
  197. if (break_ctl)
  198. lcr |= UART_LCR_SBC;
  199. else
  200. lcr &= ~UART_LCR_SBC;
  201. tegra_uart_write(tup, lcr, UART_LCR);
  202. tup->lcr_shadow = lcr;
  203. }
  204. /* Wait for a symbol-time. */
  205. static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
  206. unsigned int syms)
  207. {
  208. if (tup->current_baud)
  209. udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
  210. tup->current_baud));
  211. }
  212. static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
  213. {
  214. unsigned long fcr = tup->fcr_shadow;
  215. if (tup->cdata->allow_txfifo_reset_fifo_mode) {
  216. fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
  217. tegra_uart_write(tup, fcr, UART_FCR);
  218. } else {
  219. fcr &= ~UART_FCR_ENABLE_FIFO;
  220. tegra_uart_write(tup, fcr, UART_FCR);
  221. udelay(60);
  222. fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
  223. tegra_uart_write(tup, fcr, UART_FCR);
  224. fcr |= UART_FCR_ENABLE_FIFO;
  225. tegra_uart_write(tup, fcr, UART_FCR);
  226. }
  227. /* Dummy read to ensure the write is posted */
  228. tegra_uart_read(tup, UART_SCR);
  229. /* Wait for the flush to propagate. */
  230. tegra_uart_wait_sym_time(tup, 1);
  231. }
  232. static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
  233. {
  234. unsigned long rate;
  235. unsigned int divisor;
  236. unsigned long lcr;
  237. int ret;
  238. if (tup->current_baud == baud)
  239. return 0;
  240. if (tup->cdata->support_clk_src_div) {
  241. rate = baud * 16;
  242. ret = clk_set_rate(tup->uart_clk, rate);
  243. if (ret < 0) {
  244. dev_err(tup->uport.dev,
  245. "clk_set_rate() failed for rate %lu\n", rate);
  246. return ret;
  247. }
  248. divisor = 1;
  249. } else {
  250. rate = clk_get_rate(tup->uart_clk);
  251. divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
  252. }
  253. lcr = tup->lcr_shadow;
  254. lcr |= UART_LCR_DLAB;
  255. tegra_uart_write(tup, lcr, UART_LCR);
  256. tegra_uart_write(tup, divisor & 0xFF, UART_TX);
  257. tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
  258. lcr &= ~UART_LCR_DLAB;
  259. tegra_uart_write(tup, lcr, UART_LCR);
  260. /* Dummy read to ensure the write is posted */
  261. tegra_uart_read(tup, UART_SCR);
  262. tup->current_baud = baud;
  263. /* wait two character intervals at new rate */
  264. tegra_uart_wait_sym_time(tup, 2);
  265. return 0;
  266. }
  267. static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
  268. unsigned long lsr)
  269. {
  270. char flag = TTY_NORMAL;
  271. if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
  272. if (lsr & UART_LSR_OE) {
  273. /* Overrrun error */
  274. flag |= TTY_OVERRUN;
  275. tup->uport.icount.overrun++;
  276. dev_err(tup->uport.dev, "Got overrun errors\n");
  277. } else if (lsr & UART_LSR_PE) {
  278. /* Parity error */
  279. flag |= TTY_PARITY;
  280. tup->uport.icount.parity++;
  281. dev_err(tup->uport.dev, "Got Parity errors\n");
  282. } else if (lsr & UART_LSR_FE) {
  283. flag |= TTY_FRAME;
  284. tup->uport.icount.frame++;
  285. dev_err(tup->uport.dev, "Got frame errors\n");
  286. } else if (lsr & UART_LSR_BI) {
  287. dev_err(tup->uport.dev, "Got Break\n");
  288. tup->uport.icount.brk++;
  289. /* If FIFO read error without any data, reset Rx FIFO */
  290. if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
  291. tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
  292. }
  293. }
  294. return flag;
  295. }
  296. static int tegra_uart_request_port(struct uart_port *u)
  297. {
  298. return 0;
  299. }
  300. static void tegra_uart_release_port(struct uart_port *u)
  301. {
  302. /* Nothing to do here */
  303. }
  304. static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
  305. {
  306. struct circ_buf *xmit = &tup->uport.state->xmit;
  307. int i;
  308. for (i = 0; i < max_bytes; i++) {
  309. BUG_ON(uart_circ_empty(xmit));
  310. if (tup->cdata->tx_fifo_full_status) {
  311. unsigned long lsr = tegra_uart_read(tup, UART_LSR);
  312. if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
  313. break;
  314. }
  315. tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
  316. xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
  317. tup->uport.icount.tx++;
  318. }
  319. }
  320. static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
  321. unsigned int bytes)
  322. {
  323. if (bytes > TEGRA_UART_MIN_DMA)
  324. bytes = TEGRA_UART_MIN_DMA;
  325. tup->tx_in_progress = TEGRA_UART_TX_PIO;
  326. tup->tx_bytes = bytes;
  327. tup->ier_shadow |= UART_IER_THRI;
  328. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  329. }
  330. static void tegra_uart_tx_dma_complete(void *args)
  331. {
  332. struct tegra_uart_port *tup = args;
  333. struct circ_buf *xmit = &tup->uport.state->xmit;
  334. struct dma_tx_state state;
  335. unsigned long flags;
  336. int count;
  337. dmaengine_tx_status(tup->tx_dma_chan, tup->rx_cookie, &state);
  338. count = tup->tx_bytes_requested - state.residue;
  339. async_tx_ack(tup->tx_dma_desc);
  340. spin_lock_irqsave(&tup->uport.lock, flags);
  341. xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
  342. tup->tx_in_progress = 0;
  343. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  344. uart_write_wakeup(&tup->uport);
  345. tegra_uart_start_next_tx(tup);
  346. spin_unlock_irqrestore(&tup->uport.lock, flags);
  347. }
  348. static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
  349. unsigned long count)
  350. {
  351. struct circ_buf *xmit = &tup->uport.state->xmit;
  352. dma_addr_t tx_phys_addr;
  353. dma_sync_single_for_device(tup->uport.dev, tup->tx_dma_buf_phys,
  354. UART_XMIT_SIZE, DMA_TO_DEVICE);
  355. tup->tx_bytes = count & ~(0xF);
  356. tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
  357. tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
  358. tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
  359. DMA_PREP_INTERRUPT);
  360. if (!tup->tx_dma_desc) {
  361. dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
  362. return -EIO;
  363. }
  364. tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
  365. tup->tx_dma_desc->callback_param = tup;
  366. tup->tx_in_progress = TEGRA_UART_TX_DMA;
  367. tup->tx_bytes_requested = tup->tx_bytes;
  368. tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
  369. dma_async_issue_pending(tup->tx_dma_chan);
  370. return 0;
  371. }
  372. static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
  373. {
  374. unsigned long tail;
  375. unsigned long count;
  376. struct circ_buf *xmit = &tup->uport.state->xmit;
  377. tail = (unsigned long)&xmit->buf[xmit->tail];
  378. count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
  379. if (!count)
  380. return;
  381. if (count < TEGRA_UART_MIN_DMA)
  382. tegra_uart_start_pio_tx(tup, count);
  383. else if (BYTES_TO_ALIGN(tail) > 0)
  384. tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
  385. else
  386. tegra_uart_start_tx_dma(tup, count);
  387. }
  388. /* Called by serial core driver with u->lock taken. */
  389. static void tegra_uart_start_tx(struct uart_port *u)
  390. {
  391. struct tegra_uart_port *tup = to_tegra_uport(u);
  392. struct circ_buf *xmit = &u->state->xmit;
  393. if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
  394. tegra_uart_start_next_tx(tup);
  395. }
  396. static unsigned int tegra_uart_tx_empty(struct uart_port *u)
  397. {
  398. struct tegra_uart_port *tup = to_tegra_uport(u);
  399. unsigned int ret = 0;
  400. unsigned long flags;
  401. spin_lock_irqsave(&u->lock, flags);
  402. if (!tup->tx_in_progress) {
  403. unsigned long lsr = tegra_uart_read(tup, UART_LSR);
  404. if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
  405. ret = TIOCSER_TEMT;
  406. }
  407. spin_unlock_irqrestore(&u->lock, flags);
  408. return ret;
  409. }
  410. static void tegra_uart_stop_tx(struct uart_port *u)
  411. {
  412. struct tegra_uart_port *tup = to_tegra_uport(u);
  413. struct circ_buf *xmit = &tup->uport.state->xmit;
  414. struct dma_tx_state state;
  415. int count;
  416. dmaengine_terminate_all(tup->tx_dma_chan);
  417. dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
  418. count = tup->tx_bytes_requested - state.residue;
  419. async_tx_ack(tup->tx_dma_desc);
  420. xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
  421. tup->tx_in_progress = 0;
  422. return;
  423. }
  424. static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
  425. {
  426. struct circ_buf *xmit = &tup->uport.state->xmit;
  427. tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
  428. tup->tx_in_progress = 0;
  429. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  430. uart_write_wakeup(&tup->uport);
  431. tegra_uart_start_next_tx(tup);
  432. return;
  433. }
  434. static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
  435. struct tty_port *tty)
  436. {
  437. do {
  438. char flag = TTY_NORMAL;
  439. unsigned long lsr = 0;
  440. unsigned char ch;
  441. lsr = tegra_uart_read(tup, UART_LSR);
  442. if (!(lsr & UART_LSR_DR))
  443. break;
  444. flag = tegra_uart_decode_rx_error(tup, lsr);
  445. ch = (unsigned char) tegra_uart_read(tup, UART_RX);
  446. tup->uport.icount.rx++;
  447. if (!uart_handle_sysrq_char(&tup->uport, ch) && tty)
  448. tty_insert_flip_char(tty, ch, flag);
  449. } while (1);
  450. return;
  451. }
  452. static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
  453. struct tty_port *tty, int count)
  454. {
  455. int copied;
  456. tup->uport.icount.rx += count;
  457. if (!tty) {
  458. dev_err(tup->uport.dev, "No tty port\n");
  459. return;
  460. }
  461. dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
  462. TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
  463. copied = tty_insert_flip_string(tty,
  464. ((unsigned char *)(tup->rx_dma_buf_virt)), count);
  465. if (copied != count) {
  466. WARN_ON(1);
  467. dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
  468. }
  469. dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
  470. TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
  471. }
  472. static void tegra_uart_rx_dma_complete(void *args)
  473. {
  474. struct tegra_uart_port *tup = args;
  475. struct uart_port *u = &tup->uport;
  476. int count = tup->rx_bytes_requested;
  477. struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
  478. struct tty_port *port = &u->state->port;
  479. unsigned long flags;
  480. async_tx_ack(tup->rx_dma_desc);
  481. spin_lock_irqsave(&u->lock, flags);
  482. /* Deactivate flow control to stop sender */
  483. if (tup->rts_active)
  484. set_rts(tup, false);
  485. /* If we are here, DMA is stopped */
  486. if (count)
  487. tegra_uart_copy_rx_to_tty(tup, port, count);
  488. tegra_uart_handle_rx_pio(tup, port);
  489. if (tty) {
  490. tty_flip_buffer_push(port);
  491. tty_kref_put(tty);
  492. }
  493. tegra_uart_start_rx_dma(tup);
  494. /* Activate flow control to start transfer */
  495. if (tup->rts_active)
  496. set_rts(tup, true);
  497. spin_unlock_irqrestore(&u->lock, flags);
  498. }
  499. static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
  500. {
  501. struct dma_tx_state state;
  502. struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
  503. struct tty_port *port = &tup->uport.state->port;
  504. int count;
  505. /* Deactivate flow control to stop sender */
  506. if (tup->rts_active)
  507. set_rts(tup, false);
  508. dmaengine_terminate_all(tup->rx_dma_chan);
  509. dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
  510. count = tup->rx_bytes_requested - state.residue;
  511. /* If we are here, DMA is stopped */
  512. if (count)
  513. tegra_uart_copy_rx_to_tty(tup, port, count);
  514. tegra_uart_handle_rx_pio(tup, port);
  515. if (tty) {
  516. tty_flip_buffer_push(port);
  517. tty_kref_put(tty);
  518. }
  519. tegra_uart_start_rx_dma(tup);
  520. if (tup->rts_active)
  521. set_rts(tup, true);
  522. }
  523. static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
  524. {
  525. unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
  526. tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
  527. tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
  528. DMA_PREP_INTERRUPT);
  529. if (!tup->rx_dma_desc) {
  530. dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
  531. return -EIO;
  532. }
  533. tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
  534. tup->rx_dma_desc->callback_param = tup;
  535. dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
  536. count, DMA_TO_DEVICE);
  537. tup->rx_bytes_requested = count;
  538. tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
  539. dma_async_issue_pending(tup->rx_dma_chan);
  540. return 0;
  541. }
  542. static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
  543. {
  544. struct tegra_uart_port *tup = to_tegra_uport(u);
  545. unsigned long msr;
  546. msr = tegra_uart_read(tup, UART_MSR);
  547. if (!(msr & UART_MSR_ANY_DELTA))
  548. return;
  549. if (msr & UART_MSR_TERI)
  550. tup->uport.icount.rng++;
  551. if (msr & UART_MSR_DDSR)
  552. tup->uport.icount.dsr++;
  553. /* We may only get DDCD when HW init and reset */
  554. if (msr & UART_MSR_DDCD)
  555. uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
  556. /* Will start/stop_tx accordingly */
  557. if (msr & UART_MSR_DCTS)
  558. uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
  559. return;
  560. }
  561. static irqreturn_t tegra_uart_isr(int irq, void *data)
  562. {
  563. struct tegra_uart_port *tup = data;
  564. struct uart_port *u = &tup->uport;
  565. unsigned long iir;
  566. unsigned long ier;
  567. bool is_rx_int = false;
  568. unsigned long flags;
  569. spin_lock_irqsave(&u->lock, flags);
  570. while (1) {
  571. iir = tegra_uart_read(tup, UART_IIR);
  572. if (iir & UART_IIR_NO_INT) {
  573. if (is_rx_int) {
  574. tegra_uart_handle_rx_dma(tup);
  575. if (tup->rx_in_progress) {
  576. ier = tup->ier_shadow;
  577. ier |= (UART_IER_RLSI | UART_IER_RTOIE |
  578. TEGRA_UART_IER_EORD);
  579. tup->ier_shadow = ier;
  580. tegra_uart_write(tup, ier, UART_IER);
  581. }
  582. }
  583. spin_unlock_irqrestore(&u->lock, flags);
  584. return IRQ_HANDLED;
  585. }
  586. switch ((iir >> 1) & 0x7) {
  587. case 0: /* Modem signal change interrupt */
  588. tegra_uart_handle_modem_signal_change(u);
  589. break;
  590. case 1: /* Transmit interrupt only triggered when using PIO */
  591. tup->ier_shadow &= ~UART_IER_THRI;
  592. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  593. tegra_uart_handle_tx_pio(tup);
  594. break;
  595. case 4: /* End of data */
  596. case 6: /* Rx timeout */
  597. case 2: /* Receive */
  598. if (!is_rx_int) {
  599. is_rx_int = true;
  600. /* Disable Rx interrupts */
  601. ier = tup->ier_shadow;
  602. ier |= UART_IER_RDI;
  603. tegra_uart_write(tup, ier, UART_IER);
  604. ier &= ~(UART_IER_RDI | UART_IER_RLSI |
  605. UART_IER_RTOIE | TEGRA_UART_IER_EORD);
  606. tup->ier_shadow = ier;
  607. tegra_uart_write(tup, ier, UART_IER);
  608. }
  609. break;
  610. case 3: /* Receive error */
  611. tegra_uart_decode_rx_error(tup,
  612. tegra_uart_read(tup, UART_LSR));
  613. break;
  614. case 5: /* break nothing to handle */
  615. case 7: /* break nothing to handle */
  616. break;
  617. }
  618. }
  619. }
  620. static void tegra_uart_stop_rx(struct uart_port *u)
  621. {
  622. struct tegra_uart_port *tup = to_tegra_uport(u);
  623. struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
  624. struct tty_port *port = &u->state->port;
  625. struct dma_tx_state state;
  626. unsigned long ier;
  627. int count;
  628. if (tup->rts_active)
  629. set_rts(tup, false);
  630. if (!tup->rx_in_progress)
  631. return;
  632. tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
  633. ier = tup->ier_shadow;
  634. ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
  635. TEGRA_UART_IER_EORD);
  636. tup->ier_shadow = ier;
  637. tegra_uart_write(tup, ier, UART_IER);
  638. tup->rx_in_progress = 0;
  639. if (tup->rx_dma_chan) {
  640. dmaengine_terminate_all(tup->rx_dma_chan);
  641. dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
  642. async_tx_ack(tup->rx_dma_desc);
  643. count = tup->rx_bytes_requested - state.residue;
  644. tegra_uart_copy_rx_to_tty(tup, port, count);
  645. tegra_uart_handle_rx_pio(tup, port);
  646. } else {
  647. tegra_uart_handle_rx_pio(tup, port);
  648. }
  649. if (tty) {
  650. tty_flip_buffer_push(port);
  651. tty_kref_put(tty);
  652. }
  653. return;
  654. }
  655. static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
  656. {
  657. unsigned long flags;
  658. unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
  659. unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
  660. unsigned long wait_time;
  661. unsigned long lsr;
  662. unsigned long msr;
  663. unsigned long mcr;
  664. /* Disable interrupts */
  665. tegra_uart_write(tup, 0, UART_IER);
  666. lsr = tegra_uart_read(tup, UART_LSR);
  667. if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
  668. msr = tegra_uart_read(tup, UART_MSR);
  669. mcr = tegra_uart_read(tup, UART_MCR);
  670. if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
  671. dev_err(tup->uport.dev,
  672. "Tx Fifo not empty, CTS disabled, waiting\n");
  673. /* Wait for Tx fifo to be empty */
  674. while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
  675. wait_time = min(fifo_empty_time, 100lu);
  676. udelay(wait_time);
  677. fifo_empty_time -= wait_time;
  678. if (!fifo_empty_time) {
  679. msr = tegra_uart_read(tup, UART_MSR);
  680. mcr = tegra_uart_read(tup, UART_MCR);
  681. if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
  682. (msr & UART_MSR_CTS))
  683. dev_err(tup->uport.dev,
  684. "Slave not ready\n");
  685. break;
  686. }
  687. lsr = tegra_uart_read(tup, UART_LSR);
  688. }
  689. }
  690. spin_lock_irqsave(&tup->uport.lock, flags);
  691. /* Reset the Rx and Tx FIFOs */
  692. tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
  693. tup->current_baud = 0;
  694. spin_unlock_irqrestore(&tup->uport.lock, flags);
  695. clk_disable_unprepare(tup->uart_clk);
  696. }
  697. static int tegra_uart_hw_init(struct tegra_uart_port *tup)
  698. {
  699. int ret;
  700. tup->fcr_shadow = 0;
  701. tup->mcr_shadow = 0;
  702. tup->lcr_shadow = 0;
  703. tup->ier_shadow = 0;
  704. tup->current_baud = 0;
  705. clk_prepare_enable(tup->uart_clk);
  706. /* Reset the UART controller to clear all previous status.*/
  707. tegra_periph_reset_assert(tup->uart_clk);
  708. udelay(10);
  709. tegra_periph_reset_deassert(tup->uart_clk);
  710. tup->rx_in_progress = 0;
  711. tup->tx_in_progress = 0;
  712. /*
  713. * Set the trigger level
  714. *
  715. * For PIO mode:
  716. *
  717. * For receive, this will interrupt the CPU after that many number of
  718. * bytes are received, for the remaining bytes the receive timeout
  719. * interrupt is received. Rx high watermark is set to 4.
  720. *
  721. * For transmit, if the trasnmit interrupt is enabled, this will
  722. * interrupt the CPU when the number of entries in the FIFO reaches the
  723. * low watermark. Tx low watermark is set to 16 bytes.
  724. *
  725. * For DMA mode:
  726. *
  727. * Set the Tx trigger to 16. This should match the DMA burst size that
  728. * programmed in the DMA registers.
  729. */
  730. tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
  731. tup->fcr_shadow |= UART_FCR_R_TRIG_01;
  732. tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
  733. tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
  734. /*
  735. * Initialize the UART with default configuration
  736. * (115200, N, 8, 1) so that the receive DMA buffer may be
  737. * enqueued
  738. */
  739. tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
  740. tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
  741. tup->fcr_shadow |= UART_FCR_DMA_SELECT;
  742. tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
  743. ret = tegra_uart_start_rx_dma(tup);
  744. if (ret < 0) {
  745. dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
  746. return ret;
  747. }
  748. tup->rx_in_progress = 1;
  749. /*
  750. * Enable IE_RXS for the receive status interrupts like line errros.
  751. * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
  752. *
  753. * If using DMA mode, enable EORD instead of receive interrupt which
  754. * will interrupt after the UART is done with the receive instead of
  755. * the interrupt when the FIFO "threshold" is reached.
  756. *
  757. * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
  758. * the DATA is sitting in the FIFO and couldn't be transferred to the
  759. * DMA as the DMA size alignment(4 bytes) is not met. EORD will be
  760. * triggered when there is a pause of the incomming data stream for 4
  761. * characters long.
  762. *
  763. * For pauses in the data which is not aligned to 4 bytes, we get
  764. * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
  765. * then the EORD.
  766. */
  767. tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | TEGRA_UART_IER_EORD;
  768. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  769. return 0;
  770. }
  771. static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
  772. bool dma_to_memory)
  773. {
  774. struct dma_chan *dma_chan;
  775. unsigned char *dma_buf;
  776. dma_addr_t dma_phys;
  777. int ret;
  778. struct dma_slave_config dma_sconfig;
  779. dma_cap_mask_t mask;
  780. dma_cap_zero(mask);
  781. dma_cap_set(DMA_SLAVE, mask);
  782. dma_chan = dma_request_channel(mask, NULL, NULL);
  783. if (!dma_chan) {
  784. dev_err(tup->uport.dev,
  785. "Dma channel is not available, will try later\n");
  786. return -EPROBE_DEFER;
  787. }
  788. if (dma_to_memory) {
  789. dma_buf = dma_alloc_coherent(tup->uport.dev,
  790. TEGRA_UART_RX_DMA_BUFFER_SIZE,
  791. &dma_phys, GFP_KERNEL);
  792. if (!dma_buf) {
  793. dev_err(tup->uport.dev,
  794. "Not able to allocate the dma buffer\n");
  795. dma_release_channel(dma_chan);
  796. return -ENOMEM;
  797. }
  798. } else {
  799. dma_phys = dma_map_single(tup->uport.dev,
  800. tup->uport.state->xmit.buf, UART_XMIT_SIZE,
  801. DMA_TO_DEVICE);
  802. dma_buf = tup->uport.state->xmit.buf;
  803. }
  804. dma_sconfig.slave_id = tup->dma_req_sel;
  805. if (dma_to_memory) {
  806. dma_sconfig.src_addr = tup->uport.mapbase;
  807. dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  808. dma_sconfig.src_maxburst = 4;
  809. } else {
  810. dma_sconfig.dst_addr = tup->uport.mapbase;
  811. dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  812. dma_sconfig.dst_maxburst = 16;
  813. }
  814. ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
  815. if (ret < 0) {
  816. dev_err(tup->uport.dev,
  817. "Dma slave config failed, err = %d\n", ret);
  818. goto scrub;
  819. }
  820. if (dma_to_memory) {
  821. tup->rx_dma_chan = dma_chan;
  822. tup->rx_dma_buf_virt = dma_buf;
  823. tup->rx_dma_buf_phys = dma_phys;
  824. } else {
  825. tup->tx_dma_chan = dma_chan;
  826. tup->tx_dma_buf_virt = dma_buf;
  827. tup->tx_dma_buf_phys = dma_phys;
  828. }
  829. return 0;
  830. scrub:
  831. dma_release_channel(dma_chan);
  832. return ret;
  833. }
  834. static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
  835. bool dma_to_memory)
  836. {
  837. struct dma_chan *dma_chan;
  838. if (dma_to_memory) {
  839. dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
  840. tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
  841. dma_chan = tup->rx_dma_chan;
  842. tup->rx_dma_chan = NULL;
  843. tup->rx_dma_buf_phys = 0;
  844. tup->rx_dma_buf_virt = NULL;
  845. } else {
  846. dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
  847. UART_XMIT_SIZE, DMA_TO_DEVICE);
  848. dma_chan = tup->tx_dma_chan;
  849. tup->tx_dma_chan = NULL;
  850. tup->tx_dma_buf_phys = 0;
  851. tup->tx_dma_buf_virt = NULL;
  852. }
  853. dma_release_channel(dma_chan);
  854. }
  855. static int tegra_uart_startup(struct uart_port *u)
  856. {
  857. struct tegra_uart_port *tup = to_tegra_uport(u);
  858. int ret;
  859. ret = tegra_uart_dma_channel_allocate(tup, false);
  860. if (ret < 0) {
  861. dev_err(u->dev, "Tx Dma allocation failed, err = %d\n", ret);
  862. return ret;
  863. }
  864. ret = tegra_uart_dma_channel_allocate(tup, true);
  865. if (ret < 0) {
  866. dev_err(u->dev, "Rx Dma allocation failed, err = %d\n", ret);
  867. goto fail_rx_dma;
  868. }
  869. ret = tegra_uart_hw_init(tup);
  870. if (ret < 0) {
  871. dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
  872. goto fail_hw_init;
  873. }
  874. ret = request_irq(u->irq, tegra_uart_isr, IRQF_DISABLED,
  875. dev_name(u->dev), tup);
  876. if (ret < 0) {
  877. dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
  878. goto fail_hw_init;
  879. }
  880. return 0;
  881. fail_hw_init:
  882. tegra_uart_dma_channel_free(tup, true);
  883. fail_rx_dma:
  884. tegra_uart_dma_channel_free(tup, false);
  885. return ret;
  886. }
  887. static void tegra_uart_shutdown(struct uart_port *u)
  888. {
  889. struct tegra_uart_port *tup = to_tegra_uport(u);
  890. tegra_uart_hw_deinit(tup);
  891. tup->rx_in_progress = 0;
  892. tup->tx_in_progress = 0;
  893. tegra_uart_dma_channel_free(tup, true);
  894. tegra_uart_dma_channel_free(tup, false);
  895. free_irq(u->irq, tup);
  896. }
  897. static void tegra_uart_enable_ms(struct uart_port *u)
  898. {
  899. struct tegra_uart_port *tup = to_tegra_uport(u);
  900. if (tup->enable_modem_interrupt) {
  901. tup->ier_shadow |= UART_IER_MSI;
  902. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  903. }
  904. }
  905. static void tegra_uart_set_termios(struct uart_port *u,
  906. struct ktermios *termios, struct ktermios *oldtermios)
  907. {
  908. struct tegra_uart_port *tup = to_tegra_uport(u);
  909. unsigned int baud;
  910. unsigned long flags;
  911. unsigned int lcr;
  912. int symb_bit = 1;
  913. struct clk *parent_clk = clk_get_parent(tup->uart_clk);
  914. unsigned long parent_clk_rate = clk_get_rate(parent_clk);
  915. int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
  916. max_divider *= 16;
  917. spin_lock_irqsave(&u->lock, flags);
  918. /* Changing configuration, it is safe to stop any rx now */
  919. if (tup->rts_active)
  920. set_rts(tup, false);
  921. /* Clear all interrupts as configuration is going to be change */
  922. tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
  923. tegra_uart_read(tup, UART_IER);
  924. tegra_uart_write(tup, 0, UART_IER);
  925. tegra_uart_read(tup, UART_IER);
  926. /* Parity */
  927. lcr = tup->lcr_shadow;
  928. lcr &= ~UART_LCR_PARITY;
  929. /* CMSPAR isn't supported by this driver */
  930. termios->c_cflag &= ~CMSPAR;
  931. if ((termios->c_cflag & PARENB) == PARENB) {
  932. symb_bit++;
  933. if (termios->c_cflag & PARODD) {
  934. lcr |= UART_LCR_PARITY;
  935. lcr &= ~UART_LCR_EPAR;
  936. lcr &= ~UART_LCR_SPAR;
  937. } else {
  938. lcr |= UART_LCR_PARITY;
  939. lcr |= UART_LCR_EPAR;
  940. lcr &= ~UART_LCR_SPAR;
  941. }
  942. }
  943. lcr &= ~UART_LCR_WLEN8;
  944. switch (termios->c_cflag & CSIZE) {
  945. case CS5:
  946. lcr |= UART_LCR_WLEN5;
  947. symb_bit += 5;
  948. break;
  949. case CS6:
  950. lcr |= UART_LCR_WLEN6;
  951. symb_bit += 6;
  952. break;
  953. case CS7:
  954. lcr |= UART_LCR_WLEN7;
  955. symb_bit += 7;
  956. break;
  957. default:
  958. lcr |= UART_LCR_WLEN8;
  959. symb_bit += 8;
  960. break;
  961. }
  962. /* Stop bits */
  963. if (termios->c_cflag & CSTOPB) {
  964. lcr |= UART_LCR_STOP;
  965. symb_bit += 2;
  966. } else {
  967. lcr &= ~UART_LCR_STOP;
  968. symb_bit++;
  969. }
  970. tegra_uart_write(tup, lcr, UART_LCR);
  971. tup->lcr_shadow = lcr;
  972. tup->symb_bit = symb_bit;
  973. /* Baud rate. */
  974. baud = uart_get_baud_rate(u, termios, oldtermios,
  975. parent_clk_rate/max_divider,
  976. parent_clk_rate/16);
  977. spin_unlock_irqrestore(&u->lock, flags);
  978. tegra_set_baudrate(tup, baud);
  979. if (tty_termios_baud_rate(termios))
  980. tty_termios_encode_baud_rate(termios, baud, baud);
  981. spin_lock_irqsave(&u->lock, flags);
  982. /* Flow control */
  983. if (termios->c_cflag & CRTSCTS) {
  984. tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
  985. tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
  986. tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
  987. /* if top layer has asked to set rts active then do so here */
  988. if (tup->rts_active)
  989. set_rts(tup, true);
  990. } else {
  991. tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
  992. tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
  993. tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
  994. }
  995. /* update the port timeout based on new settings */
  996. uart_update_timeout(u, termios->c_cflag, baud);
  997. /* Make sure all write has completed */
  998. tegra_uart_read(tup, UART_IER);
  999. /* Reenable interrupt */
  1000. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  1001. tegra_uart_read(tup, UART_IER);
  1002. spin_unlock_irqrestore(&u->lock, flags);
  1003. return;
  1004. }
  1005. /*
  1006. * Flush any TX data submitted for DMA and PIO. Called when the
  1007. * TX circular buffer is reset.
  1008. */
  1009. static void tegra_uart_flush_buffer(struct uart_port *u)
  1010. {
  1011. struct tegra_uart_port *tup = to_tegra_uport(u);
  1012. tup->tx_bytes = 0;
  1013. if (tup->tx_dma_chan)
  1014. dmaengine_terminate_all(tup->tx_dma_chan);
  1015. return;
  1016. }
  1017. static const char *tegra_uart_type(struct uart_port *u)
  1018. {
  1019. return TEGRA_UART_TYPE;
  1020. }
  1021. static struct uart_ops tegra_uart_ops = {
  1022. .tx_empty = tegra_uart_tx_empty,
  1023. .set_mctrl = tegra_uart_set_mctrl,
  1024. .get_mctrl = tegra_uart_get_mctrl,
  1025. .stop_tx = tegra_uart_stop_tx,
  1026. .start_tx = tegra_uart_start_tx,
  1027. .stop_rx = tegra_uart_stop_rx,
  1028. .flush_buffer = tegra_uart_flush_buffer,
  1029. .enable_ms = tegra_uart_enable_ms,
  1030. .break_ctl = tegra_uart_break_ctl,
  1031. .startup = tegra_uart_startup,
  1032. .shutdown = tegra_uart_shutdown,
  1033. .set_termios = tegra_uart_set_termios,
  1034. .type = tegra_uart_type,
  1035. .request_port = tegra_uart_request_port,
  1036. .release_port = tegra_uart_release_port,
  1037. };
  1038. static struct uart_driver tegra_uart_driver = {
  1039. .owner = THIS_MODULE,
  1040. .driver_name = "tegra_hsuart",
  1041. .dev_name = "ttyTHS",
  1042. .cons = 0,
  1043. .nr = TEGRA_UART_MAXIMUM,
  1044. };
  1045. static int tegra_uart_parse_dt(struct platform_device *pdev,
  1046. struct tegra_uart_port *tup)
  1047. {
  1048. struct device_node *np = pdev->dev.of_node;
  1049. u32 of_dma[2];
  1050. int port;
  1051. if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
  1052. of_dma, 2) >= 0) {
  1053. tup->dma_req_sel = of_dma[1];
  1054. } else {
  1055. dev_err(&pdev->dev, "missing dma requestor in device tree\n");
  1056. return -EINVAL;
  1057. }
  1058. port = of_alias_get_id(np, "serial");
  1059. if (port < 0) {
  1060. dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
  1061. return port;
  1062. }
  1063. tup->uport.line = port;
  1064. tup->enable_modem_interrupt = of_property_read_bool(np,
  1065. "nvidia,enable-modem-interrupt");
  1066. return 0;
  1067. }
  1068. struct tegra_uart_chip_data tegra20_uart_chip_data = {
  1069. .tx_fifo_full_status = false,
  1070. .allow_txfifo_reset_fifo_mode = true,
  1071. .support_clk_src_div = false,
  1072. };
  1073. struct tegra_uart_chip_data tegra30_uart_chip_data = {
  1074. .tx_fifo_full_status = true,
  1075. .allow_txfifo_reset_fifo_mode = false,
  1076. .support_clk_src_div = true,
  1077. };
  1078. static struct of_device_id tegra_uart_of_match[] = {
  1079. {
  1080. .compatible = "nvidia,tegra30-hsuart",
  1081. .data = &tegra30_uart_chip_data,
  1082. }, {
  1083. .compatible = "nvidia,tegra20-hsuart",
  1084. .data = &tegra20_uart_chip_data,
  1085. }, {
  1086. },
  1087. };
  1088. MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
  1089. static int tegra_uart_probe(struct platform_device *pdev)
  1090. {
  1091. struct tegra_uart_port *tup;
  1092. struct uart_port *u;
  1093. struct resource *resource;
  1094. int ret;
  1095. const struct tegra_uart_chip_data *cdata;
  1096. const struct of_device_id *match;
  1097. match = of_match_device(tegra_uart_of_match, &pdev->dev);
  1098. if (!match) {
  1099. dev_err(&pdev->dev, "Error: No device match found\n");
  1100. return -ENODEV;
  1101. }
  1102. cdata = match->data;
  1103. tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
  1104. if (!tup) {
  1105. dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
  1106. return -ENOMEM;
  1107. }
  1108. ret = tegra_uart_parse_dt(pdev, tup);
  1109. if (ret < 0)
  1110. return ret;
  1111. u = &tup->uport;
  1112. u->dev = &pdev->dev;
  1113. u->ops = &tegra_uart_ops;
  1114. u->type = PORT_TEGRA;
  1115. u->fifosize = 32;
  1116. tup->cdata = cdata;
  1117. platform_set_drvdata(pdev, tup);
  1118. resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1119. if (!resource) {
  1120. dev_err(&pdev->dev, "No IO memory resource\n");
  1121. return -ENODEV;
  1122. }
  1123. u->mapbase = resource->start;
  1124. u->membase = devm_request_and_ioremap(&pdev->dev, resource);
  1125. if (!u->membase) {
  1126. dev_err(&pdev->dev, "memregion/iomap address req failed\n");
  1127. return -EADDRNOTAVAIL;
  1128. }
  1129. tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
  1130. if (IS_ERR(tup->uart_clk)) {
  1131. dev_err(&pdev->dev, "Couldn't get the clock\n");
  1132. return PTR_ERR(tup->uart_clk);
  1133. }
  1134. u->iotype = UPIO_MEM32;
  1135. u->irq = platform_get_irq(pdev, 0);
  1136. u->regshift = 2;
  1137. ret = uart_add_one_port(&tegra_uart_driver, u);
  1138. if (ret < 0) {
  1139. dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
  1140. return ret;
  1141. }
  1142. return ret;
  1143. }
  1144. static int tegra_uart_remove(struct platform_device *pdev)
  1145. {
  1146. struct tegra_uart_port *tup = platform_get_drvdata(pdev);
  1147. struct uart_port *u = &tup->uport;
  1148. uart_remove_one_port(&tegra_uart_driver, u);
  1149. return 0;
  1150. }
  1151. #ifdef CONFIG_PM_SLEEP
  1152. static int tegra_uart_suspend(struct device *dev)
  1153. {
  1154. struct tegra_uart_port *tup = dev_get_drvdata(dev);
  1155. struct uart_port *u = &tup->uport;
  1156. return uart_suspend_port(&tegra_uart_driver, u);
  1157. }
  1158. static int tegra_uart_resume(struct device *dev)
  1159. {
  1160. struct tegra_uart_port *tup = dev_get_drvdata(dev);
  1161. struct uart_port *u = &tup->uport;
  1162. return uart_resume_port(&tegra_uart_driver, u);
  1163. }
  1164. #endif
  1165. static const struct dev_pm_ops tegra_uart_pm_ops = {
  1166. SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
  1167. };
  1168. static struct platform_driver tegra_uart_platform_driver = {
  1169. .probe = tegra_uart_probe,
  1170. .remove = tegra_uart_remove,
  1171. .driver = {
  1172. .name = "serial-tegra",
  1173. .of_match_table = tegra_uart_of_match,
  1174. .pm = &tegra_uart_pm_ops,
  1175. },
  1176. };
  1177. static int __init tegra_uart_init(void)
  1178. {
  1179. int ret;
  1180. ret = uart_register_driver(&tegra_uart_driver);
  1181. if (ret < 0) {
  1182. pr_err("Could not register %s driver\n",
  1183. tegra_uart_driver.driver_name);
  1184. return ret;
  1185. }
  1186. ret = platform_driver_register(&tegra_uart_platform_driver);
  1187. if (ret < 0) {
  1188. pr_err("Uart platfrom driver register failed, e = %d\n", ret);
  1189. uart_unregister_driver(&tegra_uart_driver);
  1190. return ret;
  1191. }
  1192. return 0;
  1193. }
  1194. static void __exit tegra_uart_exit(void)
  1195. {
  1196. pr_info("Unloading tegra uart driver\n");
  1197. platform_driver_unregister(&tegra_uart_platform_driver);
  1198. uart_unregister_driver(&tegra_uart_driver);
  1199. }
  1200. module_init(tegra_uart_init);
  1201. module_exit(tegra_uart_exit);
  1202. MODULE_ALIAS("platform:serial-tegra");
  1203. MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
  1204. MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
  1205. MODULE_LICENSE("GPL v2");