serial-tegra.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400
  1. /*
  2. * serial_tegra.c
  3. *
  4. * High-speed serial driver for NVIDIA Tegra SoCs
  5. *
  6. * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
  7. *
  8. * Author: Laxman Dewangan <ldewangan@nvidia.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms and conditions of the GNU General Public License,
  12. * version 2, as published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope it will be useful, but WITHOUT
  15. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  16. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  17. * more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  21. */
  22. #include <linux/clk.h>
  23. #include <linux/debugfs.h>
  24. #include <linux/delay.h>
  25. #include <linux/dmaengine.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/dmapool.h>
  28. #include <linux/err.h>
  29. #include <linux/io.h>
  30. #include <linux/irq.h>
  31. #include <linux/module.h>
  32. #include <linux/of.h>
  33. #include <linux/of_device.h>
  34. #include <linux/pagemap.h>
  35. #include <linux/platform_device.h>
  36. #include <linux/serial.h>
  37. #include <linux/serial_8250.h>
  38. #include <linux/serial_core.h>
  39. #include <linux/serial_reg.h>
  40. #include <linux/slab.h>
  41. #include <linux/string.h>
  42. #include <linux/termios.h>
  43. #include <linux/tty.h>
  44. #include <linux/tty_flip.h>
  45. #include <linux/clk/tegra.h>
  46. #define TEGRA_UART_TYPE "TEGRA_UART"
  47. #define TX_EMPTY_STATUS (UART_LSR_TEMT | UART_LSR_THRE)
  48. #define BYTES_TO_ALIGN(x) ((unsigned long)(x) & 0x3)
  49. #define TEGRA_UART_RX_DMA_BUFFER_SIZE 4096
  50. #define TEGRA_UART_LSR_TXFIFO_FULL 0x100
  51. #define TEGRA_UART_IER_EORD 0x20
  52. #define TEGRA_UART_MCR_RTS_EN 0x40
  53. #define TEGRA_UART_MCR_CTS_EN 0x20
  54. #define TEGRA_UART_LSR_ANY (UART_LSR_OE | UART_LSR_BI | \
  55. UART_LSR_PE | UART_LSR_FE)
  56. #define TEGRA_UART_IRDA_CSR 0x08
  57. #define TEGRA_UART_SIR_ENABLED 0x80
  58. #define TEGRA_UART_TX_PIO 1
  59. #define TEGRA_UART_TX_DMA 2
  60. #define TEGRA_UART_MIN_DMA 16
  61. #define TEGRA_UART_FIFO_SIZE 32
  62. /*
  63. * Tx fifo trigger level setting in tegra uart is in
  64. * reverse way then conventional uart.
  65. */
  66. #define TEGRA_UART_TX_TRIG_16B 0x00
  67. #define TEGRA_UART_TX_TRIG_8B 0x10
  68. #define TEGRA_UART_TX_TRIG_4B 0x20
  69. #define TEGRA_UART_TX_TRIG_1B 0x30
  70. #define TEGRA_UART_MAXIMUM 5
  71. /* Default UART setting when started: 115200 no parity, stop, 8 data bits */
  72. #define TEGRA_UART_DEFAULT_BAUD 115200
  73. #define TEGRA_UART_DEFAULT_LSR UART_LCR_WLEN8
  74. /* Tx transfer mode */
  75. #define TEGRA_TX_PIO 1
  76. #define TEGRA_TX_DMA 2
  77. /**
  78. * tegra_uart_chip_data: SOC specific data.
  79. *
  80. * @tx_fifo_full_status: Status flag available for checking tx fifo full.
  81. * @allow_txfifo_reset_fifo_mode: allow_tx fifo reset with fifo mode or not.
  82. * Tegra30 does not allow this.
  83. * @support_clk_src_div: Clock source support the clock divider.
  84. */
  85. struct tegra_uart_chip_data {
  86. bool tx_fifo_full_status;
  87. bool allow_txfifo_reset_fifo_mode;
  88. bool support_clk_src_div;
  89. };
  90. struct tegra_uart_port {
  91. struct uart_port uport;
  92. const struct tegra_uart_chip_data *cdata;
  93. struct clk *uart_clk;
  94. unsigned int current_baud;
  95. /* Register shadow */
  96. unsigned long fcr_shadow;
  97. unsigned long mcr_shadow;
  98. unsigned long lcr_shadow;
  99. unsigned long ier_shadow;
  100. bool rts_active;
  101. int tx_in_progress;
  102. unsigned int tx_bytes;
  103. bool enable_modem_interrupt;
  104. bool rx_timeout;
  105. int rx_in_progress;
  106. int symb_bit;
  107. int dma_req_sel;
  108. struct dma_chan *rx_dma_chan;
  109. struct dma_chan *tx_dma_chan;
  110. dma_addr_t rx_dma_buf_phys;
  111. dma_addr_t tx_dma_buf_phys;
  112. unsigned char *rx_dma_buf_virt;
  113. unsigned char *tx_dma_buf_virt;
  114. struct dma_async_tx_descriptor *tx_dma_desc;
  115. struct dma_async_tx_descriptor *rx_dma_desc;
  116. dma_cookie_t tx_cookie;
  117. dma_cookie_t rx_cookie;
  118. int tx_bytes_requested;
  119. int rx_bytes_requested;
  120. };
  121. static void tegra_uart_start_next_tx(struct tegra_uart_port *tup);
  122. static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup);
  123. static inline unsigned long tegra_uart_read(struct tegra_uart_port *tup,
  124. unsigned long reg)
  125. {
  126. return readl(tup->uport.membase + (reg << tup->uport.regshift));
  127. }
  128. static inline void tegra_uart_write(struct tegra_uart_port *tup, unsigned val,
  129. unsigned long reg)
  130. {
  131. writel(val, tup->uport.membase + (reg << tup->uport.regshift));
  132. }
  133. static inline struct tegra_uart_port *to_tegra_uport(struct uart_port *u)
  134. {
  135. return container_of(u, struct tegra_uart_port, uport);
  136. }
  137. static unsigned int tegra_uart_get_mctrl(struct uart_port *u)
  138. {
  139. struct tegra_uart_port *tup = to_tegra_uport(u);
  140. /*
  141. * RI - Ring detector is active
  142. * CD/DCD/CAR - Carrier detect is always active. For some reason
  143. * linux has different names for carrier detect.
  144. * DSR - Data Set ready is active as the hardware doesn't support it.
  145. * Don't know if the linux support this yet?
  146. * CTS - Clear to send. Always set to active, as the hardware handles
  147. * CTS automatically.
  148. */
  149. if (tup->enable_modem_interrupt)
  150. return TIOCM_RI | TIOCM_CD | TIOCM_DSR | TIOCM_CTS;
  151. return TIOCM_CTS;
  152. }
  153. static void set_rts(struct tegra_uart_port *tup, bool active)
  154. {
  155. unsigned long mcr;
  156. mcr = tup->mcr_shadow;
  157. if (active)
  158. mcr |= TEGRA_UART_MCR_RTS_EN;
  159. else
  160. mcr &= ~TEGRA_UART_MCR_RTS_EN;
  161. if (mcr != tup->mcr_shadow) {
  162. tegra_uart_write(tup, mcr, UART_MCR);
  163. tup->mcr_shadow = mcr;
  164. }
  165. return;
  166. }
  167. static void set_dtr(struct tegra_uart_port *tup, bool active)
  168. {
  169. unsigned long mcr;
  170. mcr = tup->mcr_shadow;
  171. if (active)
  172. mcr |= UART_MCR_DTR;
  173. else
  174. mcr &= ~UART_MCR_DTR;
  175. if (mcr != tup->mcr_shadow) {
  176. tegra_uart_write(tup, mcr, UART_MCR);
  177. tup->mcr_shadow = mcr;
  178. }
  179. return;
  180. }
  181. static void tegra_uart_set_mctrl(struct uart_port *u, unsigned int mctrl)
  182. {
  183. struct tegra_uart_port *tup = to_tegra_uport(u);
  184. unsigned long mcr;
  185. int dtr_enable;
  186. mcr = tup->mcr_shadow;
  187. tup->rts_active = !!(mctrl & TIOCM_RTS);
  188. set_rts(tup, tup->rts_active);
  189. dtr_enable = !!(mctrl & TIOCM_DTR);
  190. set_dtr(tup, dtr_enable);
  191. return;
  192. }
  193. static void tegra_uart_break_ctl(struct uart_port *u, int break_ctl)
  194. {
  195. struct tegra_uart_port *tup = to_tegra_uport(u);
  196. unsigned long lcr;
  197. lcr = tup->lcr_shadow;
  198. if (break_ctl)
  199. lcr |= UART_LCR_SBC;
  200. else
  201. lcr &= ~UART_LCR_SBC;
  202. tegra_uart_write(tup, lcr, UART_LCR);
  203. tup->lcr_shadow = lcr;
  204. }
  205. /* Wait for a symbol-time. */
  206. static void tegra_uart_wait_sym_time(struct tegra_uart_port *tup,
  207. unsigned int syms)
  208. {
  209. if (tup->current_baud)
  210. udelay(DIV_ROUND_UP(syms * tup->symb_bit * 1000000,
  211. tup->current_baud));
  212. }
  213. static void tegra_uart_fifo_reset(struct tegra_uart_port *tup, u8 fcr_bits)
  214. {
  215. unsigned long fcr = tup->fcr_shadow;
  216. if (tup->cdata->allow_txfifo_reset_fifo_mode) {
  217. fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
  218. tegra_uart_write(tup, fcr, UART_FCR);
  219. } else {
  220. fcr &= ~UART_FCR_ENABLE_FIFO;
  221. tegra_uart_write(tup, fcr, UART_FCR);
  222. udelay(60);
  223. fcr |= fcr_bits & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
  224. tegra_uart_write(tup, fcr, UART_FCR);
  225. fcr |= UART_FCR_ENABLE_FIFO;
  226. tegra_uart_write(tup, fcr, UART_FCR);
  227. }
  228. /* Dummy read to ensure the write is posted */
  229. tegra_uart_read(tup, UART_SCR);
  230. /* Wait for the flush to propagate. */
  231. tegra_uart_wait_sym_time(tup, 1);
  232. }
  233. static int tegra_set_baudrate(struct tegra_uart_port *tup, unsigned int baud)
  234. {
  235. unsigned long rate;
  236. unsigned int divisor;
  237. unsigned long lcr;
  238. int ret;
  239. if (tup->current_baud == baud)
  240. return 0;
  241. if (tup->cdata->support_clk_src_div) {
  242. rate = baud * 16;
  243. ret = clk_set_rate(tup->uart_clk, rate);
  244. if (ret < 0) {
  245. dev_err(tup->uport.dev,
  246. "clk_set_rate() failed for rate %lu\n", rate);
  247. return ret;
  248. }
  249. divisor = 1;
  250. } else {
  251. rate = clk_get_rate(tup->uart_clk);
  252. divisor = DIV_ROUND_CLOSEST(rate, baud * 16);
  253. }
  254. lcr = tup->lcr_shadow;
  255. lcr |= UART_LCR_DLAB;
  256. tegra_uart_write(tup, lcr, UART_LCR);
  257. tegra_uart_write(tup, divisor & 0xFF, UART_TX);
  258. tegra_uart_write(tup, ((divisor >> 8) & 0xFF), UART_IER);
  259. lcr &= ~UART_LCR_DLAB;
  260. tegra_uart_write(tup, lcr, UART_LCR);
  261. /* Dummy read to ensure the write is posted */
  262. tegra_uart_read(tup, UART_SCR);
  263. tup->current_baud = baud;
  264. /* wait two character intervals at new rate */
  265. tegra_uart_wait_sym_time(tup, 2);
  266. return 0;
  267. }
  268. static char tegra_uart_decode_rx_error(struct tegra_uart_port *tup,
  269. unsigned long lsr)
  270. {
  271. char flag = TTY_NORMAL;
  272. if (unlikely(lsr & TEGRA_UART_LSR_ANY)) {
  273. if (lsr & UART_LSR_OE) {
  274. /* Overrrun error */
  275. flag |= TTY_OVERRUN;
  276. tup->uport.icount.overrun++;
  277. dev_err(tup->uport.dev, "Got overrun errors\n");
  278. } else if (lsr & UART_LSR_PE) {
  279. /* Parity error */
  280. flag |= TTY_PARITY;
  281. tup->uport.icount.parity++;
  282. dev_err(tup->uport.dev, "Got Parity errors\n");
  283. } else if (lsr & UART_LSR_FE) {
  284. flag |= TTY_FRAME;
  285. tup->uport.icount.frame++;
  286. dev_err(tup->uport.dev, "Got frame errors\n");
  287. } else if (lsr & UART_LSR_BI) {
  288. dev_err(tup->uport.dev, "Got Break\n");
  289. tup->uport.icount.brk++;
  290. /* If FIFO read error without any data, reset Rx FIFO */
  291. if (!(lsr & UART_LSR_DR) && (lsr & UART_LSR_FIFOE))
  292. tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_RCVR);
  293. }
  294. }
  295. return flag;
  296. }
  297. static int tegra_uart_request_port(struct uart_port *u)
  298. {
  299. return 0;
  300. }
  301. static void tegra_uart_release_port(struct uart_port *u)
  302. {
  303. /* Nothing to do here */
  304. }
  305. static void tegra_uart_fill_tx_fifo(struct tegra_uart_port *tup, int max_bytes)
  306. {
  307. struct circ_buf *xmit = &tup->uport.state->xmit;
  308. int i;
  309. for (i = 0; i < max_bytes; i++) {
  310. BUG_ON(uart_circ_empty(xmit));
  311. if (tup->cdata->tx_fifo_full_status) {
  312. unsigned long lsr = tegra_uart_read(tup, UART_LSR);
  313. if ((lsr & TEGRA_UART_LSR_TXFIFO_FULL))
  314. break;
  315. }
  316. tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX);
  317. xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
  318. tup->uport.icount.tx++;
  319. }
  320. }
  321. static void tegra_uart_start_pio_tx(struct tegra_uart_port *tup,
  322. unsigned int bytes)
  323. {
  324. if (bytes > TEGRA_UART_MIN_DMA)
  325. bytes = TEGRA_UART_MIN_DMA;
  326. tup->tx_in_progress = TEGRA_UART_TX_PIO;
  327. tup->tx_bytes = bytes;
  328. tup->ier_shadow |= UART_IER_THRI;
  329. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  330. }
  331. static void tegra_uart_tx_dma_complete(void *args)
  332. {
  333. struct tegra_uart_port *tup = args;
  334. struct circ_buf *xmit = &tup->uport.state->xmit;
  335. struct dma_tx_state state;
  336. unsigned long flags;
  337. int count;
  338. dmaengine_tx_status(tup->tx_dma_chan, tup->rx_cookie, &state);
  339. count = tup->tx_bytes_requested - state.residue;
  340. async_tx_ack(tup->tx_dma_desc);
  341. spin_lock_irqsave(&tup->uport.lock, flags);
  342. xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
  343. tup->tx_in_progress = 0;
  344. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  345. uart_write_wakeup(&tup->uport);
  346. tegra_uart_start_next_tx(tup);
  347. spin_unlock_irqrestore(&tup->uport.lock, flags);
  348. }
  349. static int tegra_uart_start_tx_dma(struct tegra_uart_port *tup,
  350. unsigned long count)
  351. {
  352. struct circ_buf *xmit = &tup->uport.state->xmit;
  353. dma_addr_t tx_phys_addr;
  354. dma_sync_single_for_device(tup->uport.dev, tup->tx_dma_buf_phys,
  355. UART_XMIT_SIZE, DMA_TO_DEVICE);
  356. tup->tx_bytes = count & ~(0xF);
  357. tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail;
  358. tup->tx_dma_desc = dmaengine_prep_slave_single(tup->tx_dma_chan,
  359. tx_phys_addr, tup->tx_bytes, DMA_MEM_TO_DEV,
  360. DMA_PREP_INTERRUPT);
  361. if (!tup->tx_dma_desc) {
  362. dev_err(tup->uport.dev, "Not able to get desc for Tx\n");
  363. return -EIO;
  364. }
  365. tup->tx_dma_desc->callback = tegra_uart_tx_dma_complete;
  366. tup->tx_dma_desc->callback_param = tup;
  367. tup->tx_in_progress = TEGRA_UART_TX_DMA;
  368. tup->tx_bytes_requested = tup->tx_bytes;
  369. tup->tx_cookie = dmaengine_submit(tup->tx_dma_desc);
  370. dma_async_issue_pending(tup->tx_dma_chan);
  371. return 0;
  372. }
  373. static void tegra_uart_start_next_tx(struct tegra_uart_port *tup)
  374. {
  375. unsigned long tail;
  376. unsigned long count;
  377. struct circ_buf *xmit = &tup->uport.state->xmit;
  378. tail = (unsigned long)&xmit->buf[xmit->tail];
  379. count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
  380. if (!count)
  381. return;
  382. if (count < TEGRA_UART_MIN_DMA)
  383. tegra_uart_start_pio_tx(tup, count);
  384. else if (BYTES_TO_ALIGN(tail) > 0)
  385. tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail));
  386. else
  387. tegra_uart_start_tx_dma(tup, count);
  388. }
  389. /* Called by serial core driver with u->lock taken. */
  390. static void tegra_uart_start_tx(struct uart_port *u)
  391. {
  392. struct tegra_uart_port *tup = to_tegra_uport(u);
  393. struct circ_buf *xmit = &u->state->xmit;
  394. if (!uart_circ_empty(xmit) && !tup->tx_in_progress)
  395. tegra_uart_start_next_tx(tup);
  396. }
  397. static unsigned int tegra_uart_tx_empty(struct uart_port *u)
  398. {
  399. struct tegra_uart_port *tup = to_tegra_uport(u);
  400. unsigned int ret = 0;
  401. unsigned long flags;
  402. spin_lock_irqsave(&u->lock, flags);
  403. if (!tup->tx_in_progress) {
  404. unsigned long lsr = tegra_uart_read(tup, UART_LSR);
  405. if ((lsr & TX_EMPTY_STATUS) == TX_EMPTY_STATUS)
  406. ret = TIOCSER_TEMT;
  407. }
  408. spin_unlock_irqrestore(&u->lock, flags);
  409. return ret;
  410. }
  411. static void tegra_uart_stop_tx(struct uart_port *u)
  412. {
  413. struct tegra_uart_port *tup = to_tegra_uport(u);
  414. struct circ_buf *xmit = &tup->uport.state->xmit;
  415. struct dma_tx_state state;
  416. int count;
  417. dmaengine_terminate_all(tup->tx_dma_chan);
  418. dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
  419. count = tup->tx_bytes_requested - state.residue;
  420. async_tx_ack(tup->tx_dma_desc);
  421. xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
  422. tup->tx_in_progress = 0;
  423. return;
  424. }
  425. static void tegra_uart_handle_tx_pio(struct tegra_uart_port *tup)
  426. {
  427. struct circ_buf *xmit = &tup->uport.state->xmit;
  428. tegra_uart_fill_tx_fifo(tup, tup->tx_bytes);
  429. tup->tx_in_progress = 0;
  430. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  431. uart_write_wakeup(&tup->uport);
  432. tegra_uart_start_next_tx(tup);
  433. return;
  434. }
  435. static void tegra_uart_handle_rx_pio(struct tegra_uart_port *tup,
  436. struct tty_port *tty)
  437. {
  438. do {
  439. char flag = TTY_NORMAL;
  440. unsigned long lsr = 0;
  441. unsigned char ch;
  442. lsr = tegra_uart_read(tup, UART_LSR);
  443. if (!(lsr & UART_LSR_DR))
  444. break;
  445. flag = tegra_uart_decode_rx_error(tup, lsr);
  446. ch = (unsigned char) tegra_uart_read(tup, UART_RX);
  447. tup->uport.icount.rx++;
  448. if (!uart_handle_sysrq_char(&tup->uport, ch) && tty)
  449. tty_insert_flip_char(tty, ch, flag);
  450. } while (1);
  451. return;
  452. }
  453. static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
  454. struct tty_port *tty, int count)
  455. {
  456. int copied;
  457. tup->uport.icount.rx += count;
  458. if (!tty) {
  459. dev_err(tup->uport.dev, "No tty port\n");
  460. return;
  461. }
  462. dma_sync_single_for_cpu(tup->uport.dev, tup->rx_dma_buf_phys,
  463. TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_FROM_DEVICE);
  464. copied = tty_insert_flip_string(tty,
  465. ((unsigned char *)(tup->rx_dma_buf_virt)), count);
  466. if (copied != count) {
  467. WARN_ON(1);
  468. dev_err(tup->uport.dev, "RxData copy to tty layer failed\n");
  469. }
  470. dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
  471. TEGRA_UART_RX_DMA_BUFFER_SIZE, DMA_TO_DEVICE);
  472. }
  473. static void tegra_uart_rx_dma_complete(void *args)
  474. {
  475. struct tegra_uart_port *tup = args;
  476. struct uart_port *u = &tup->uport;
  477. int count = tup->rx_bytes_requested;
  478. struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
  479. struct tty_port *port = &u->state->port;
  480. unsigned long flags;
  481. async_tx_ack(tup->rx_dma_desc);
  482. spin_lock_irqsave(&u->lock, flags);
  483. /* Deactivate flow control to stop sender */
  484. if (tup->rts_active)
  485. set_rts(tup, false);
  486. /* If we are here, DMA is stopped */
  487. if (count)
  488. tegra_uart_copy_rx_to_tty(tup, port, count);
  489. tegra_uart_handle_rx_pio(tup, port);
  490. if (tty) {
  491. tty_flip_buffer_push(port);
  492. tty_kref_put(tty);
  493. }
  494. tegra_uart_start_rx_dma(tup);
  495. /* Activate flow control to start transfer */
  496. if (tup->rts_active)
  497. set_rts(tup, true);
  498. spin_unlock_irqrestore(&u->lock, flags);
  499. }
  500. static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
  501. {
  502. struct dma_tx_state state;
  503. struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
  504. struct tty_port *port = &tup->uport.state->port;
  505. int count;
  506. /* Deactivate flow control to stop sender */
  507. if (tup->rts_active)
  508. set_rts(tup, false);
  509. dmaengine_terminate_all(tup->rx_dma_chan);
  510. dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
  511. count = tup->rx_bytes_requested - state.residue;
  512. /* If we are here, DMA is stopped */
  513. if (count)
  514. tegra_uart_copy_rx_to_tty(tup, port, count);
  515. tegra_uart_handle_rx_pio(tup, port);
  516. if (tty) {
  517. tty_flip_buffer_push(port);
  518. tty_kref_put(tty);
  519. }
  520. tegra_uart_start_rx_dma(tup);
  521. if (tup->rts_active)
  522. set_rts(tup, true);
  523. }
  524. static int tegra_uart_start_rx_dma(struct tegra_uart_port *tup)
  525. {
  526. unsigned int count = TEGRA_UART_RX_DMA_BUFFER_SIZE;
  527. tup->rx_dma_desc = dmaengine_prep_slave_single(tup->rx_dma_chan,
  528. tup->rx_dma_buf_phys, count, DMA_DEV_TO_MEM,
  529. DMA_PREP_INTERRUPT);
  530. if (!tup->rx_dma_desc) {
  531. dev_err(tup->uport.dev, "Not able to get desc for Rx\n");
  532. return -EIO;
  533. }
  534. tup->rx_dma_desc->callback = tegra_uart_rx_dma_complete;
  535. tup->rx_dma_desc->callback_param = tup;
  536. dma_sync_single_for_device(tup->uport.dev, tup->rx_dma_buf_phys,
  537. count, DMA_TO_DEVICE);
  538. tup->rx_bytes_requested = count;
  539. tup->rx_cookie = dmaengine_submit(tup->rx_dma_desc);
  540. dma_async_issue_pending(tup->rx_dma_chan);
  541. return 0;
  542. }
  543. static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
  544. {
  545. struct tegra_uart_port *tup = to_tegra_uport(u);
  546. unsigned long msr;
  547. msr = tegra_uart_read(tup, UART_MSR);
  548. if (!(msr & UART_MSR_ANY_DELTA))
  549. return;
  550. if (msr & UART_MSR_TERI)
  551. tup->uport.icount.rng++;
  552. if (msr & UART_MSR_DDSR)
  553. tup->uport.icount.dsr++;
  554. /* We may only get DDCD when HW init and reset */
  555. if (msr & UART_MSR_DDCD)
  556. uart_handle_dcd_change(&tup->uport, msr & UART_MSR_DCD);
  557. /* Will start/stop_tx accordingly */
  558. if (msr & UART_MSR_DCTS)
  559. uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
  560. return;
  561. }
  562. static irqreturn_t tegra_uart_isr(int irq, void *data)
  563. {
  564. struct tegra_uart_port *tup = data;
  565. struct uart_port *u = &tup->uport;
  566. unsigned long iir;
  567. unsigned long ier;
  568. bool is_rx_int = false;
  569. unsigned long flags;
  570. spin_lock_irqsave(&u->lock, flags);
  571. while (1) {
  572. iir = tegra_uart_read(tup, UART_IIR);
  573. if (iir & UART_IIR_NO_INT) {
  574. if (is_rx_int) {
  575. tegra_uart_handle_rx_dma(tup);
  576. if (tup->rx_in_progress) {
  577. ier = tup->ier_shadow;
  578. ier |= (UART_IER_RLSI | UART_IER_RTOIE |
  579. TEGRA_UART_IER_EORD);
  580. tup->ier_shadow = ier;
  581. tegra_uart_write(tup, ier, UART_IER);
  582. }
  583. }
  584. spin_unlock_irqrestore(&u->lock, flags);
  585. return IRQ_HANDLED;
  586. }
  587. switch ((iir >> 1) & 0x7) {
  588. case 0: /* Modem signal change interrupt */
  589. tegra_uart_handle_modem_signal_change(u);
  590. break;
  591. case 1: /* Transmit interrupt only triggered when using PIO */
  592. tup->ier_shadow &= ~UART_IER_THRI;
  593. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  594. tegra_uart_handle_tx_pio(tup);
  595. break;
  596. case 4: /* End of data */
  597. case 6: /* Rx timeout */
  598. case 2: /* Receive */
  599. if (!is_rx_int) {
  600. is_rx_int = true;
  601. /* Disable Rx interrupts */
  602. ier = tup->ier_shadow;
  603. ier |= UART_IER_RDI;
  604. tegra_uart_write(tup, ier, UART_IER);
  605. ier &= ~(UART_IER_RDI | UART_IER_RLSI |
  606. UART_IER_RTOIE | TEGRA_UART_IER_EORD);
  607. tup->ier_shadow = ier;
  608. tegra_uart_write(tup, ier, UART_IER);
  609. }
  610. break;
  611. case 3: /* Receive error */
  612. tegra_uart_decode_rx_error(tup,
  613. tegra_uart_read(tup, UART_LSR));
  614. break;
  615. case 5: /* break nothing to handle */
  616. case 7: /* break nothing to handle */
  617. break;
  618. }
  619. }
  620. }
  621. static void tegra_uart_stop_rx(struct uart_port *u)
  622. {
  623. struct tegra_uart_port *tup = to_tegra_uport(u);
  624. struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
  625. struct tty_port *port = &u->state->port;
  626. struct dma_tx_state state;
  627. unsigned long ier;
  628. int count;
  629. if (tup->rts_active)
  630. set_rts(tup, false);
  631. if (!tup->rx_in_progress)
  632. return;
  633. tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */
  634. ier = tup->ier_shadow;
  635. ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
  636. TEGRA_UART_IER_EORD);
  637. tup->ier_shadow = ier;
  638. tegra_uart_write(tup, ier, UART_IER);
  639. tup->rx_in_progress = 0;
  640. if (tup->rx_dma_chan) {
  641. dmaengine_terminate_all(tup->rx_dma_chan);
  642. dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
  643. async_tx_ack(tup->rx_dma_desc);
  644. count = tup->rx_bytes_requested - state.residue;
  645. tegra_uart_copy_rx_to_tty(tup, port, count);
  646. tegra_uart_handle_rx_pio(tup, port);
  647. } else {
  648. tegra_uart_handle_rx_pio(tup, port);
  649. }
  650. if (tty) {
  651. tty_flip_buffer_push(port);
  652. tty_kref_put(tty);
  653. }
  654. return;
  655. }
  656. static void tegra_uart_hw_deinit(struct tegra_uart_port *tup)
  657. {
  658. unsigned long flags;
  659. unsigned long char_time = DIV_ROUND_UP(10000000, tup->current_baud);
  660. unsigned long fifo_empty_time = tup->uport.fifosize * char_time;
  661. unsigned long wait_time;
  662. unsigned long lsr;
  663. unsigned long msr;
  664. unsigned long mcr;
  665. /* Disable interrupts */
  666. tegra_uart_write(tup, 0, UART_IER);
  667. lsr = tegra_uart_read(tup, UART_LSR);
  668. if ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
  669. msr = tegra_uart_read(tup, UART_MSR);
  670. mcr = tegra_uart_read(tup, UART_MCR);
  671. if ((mcr & TEGRA_UART_MCR_CTS_EN) && (msr & UART_MSR_CTS))
  672. dev_err(tup->uport.dev,
  673. "Tx Fifo not empty, CTS disabled, waiting\n");
  674. /* Wait for Tx fifo to be empty */
  675. while ((lsr & UART_LSR_TEMT) != UART_LSR_TEMT) {
  676. wait_time = min(fifo_empty_time, 100lu);
  677. udelay(wait_time);
  678. fifo_empty_time -= wait_time;
  679. if (!fifo_empty_time) {
  680. msr = tegra_uart_read(tup, UART_MSR);
  681. mcr = tegra_uart_read(tup, UART_MCR);
  682. if ((mcr & TEGRA_UART_MCR_CTS_EN) &&
  683. (msr & UART_MSR_CTS))
  684. dev_err(tup->uport.dev,
  685. "Slave not ready\n");
  686. break;
  687. }
  688. lsr = tegra_uart_read(tup, UART_LSR);
  689. }
  690. }
  691. spin_lock_irqsave(&tup->uport.lock, flags);
  692. /* Reset the Rx and Tx FIFOs */
  693. tegra_uart_fifo_reset(tup, UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR);
  694. tup->current_baud = 0;
  695. spin_unlock_irqrestore(&tup->uport.lock, flags);
  696. clk_disable_unprepare(tup->uart_clk);
  697. }
  698. static int tegra_uart_hw_init(struct tegra_uart_port *tup)
  699. {
  700. int ret;
  701. tup->fcr_shadow = 0;
  702. tup->mcr_shadow = 0;
  703. tup->lcr_shadow = 0;
  704. tup->ier_shadow = 0;
  705. tup->current_baud = 0;
  706. clk_prepare_enable(tup->uart_clk);
  707. /* Reset the UART controller to clear all previous status.*/
  708. tegra_periph_reset_assert(tup->uart_clk);
  709. udelay(10);
  710. tegra_periph_reset_deassert(tup->uart_clk);
  711. tup->rx_in_progress = 0;
  712. tup->tx_in_progress = 0;
  713. /*
  714. * Set the trigger level
  715. *
  716. * For PIO mode:
  717. *
  718. * For receive, this will interrupt the CPU after that many number of
  719. * bytes are received, for the remaining bytes the receive timeout
  720. * interrupt is received. Rx high watermark is set to 4.
  721. *
  722. * For transmit, if the trasnmit interrupt is enabled, this will
  723. * interrupt the CPU when the number of entries in the FIFO reaches the
  724. * low watermark. Tx low watermark is set to 16 bytes.
  725. *
  726. * For DMA mode:
  727. *
  728. * Set the Tx trigger to 16. This should match the DMA burst size that
  729. * programmed in the DMA registers.
  730. */
  731. tup->fcr_shadow = UART_FCR_ENABLE_FIFO;
  732. tup->fcr_shadow |= UART_FCR_R_TRIG_01;
  733. tup->fcr_shadow |= TEGRA_UART_TX_TRIG_16B;
  734. tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
  735. /*
  736. * Initialize the UART with default configuration
  737. * (115200, N, 8, 1) so that the receive DMA buffer may be
  738. * enqueued
  739. */
  740. tup->lcr_shadow = TEGRA_UART_DEFAULT_LSR;
  741. tegra_set_baudrate(tup, TEGRA_UART_DEFAULT_BAUD);
  742. tup->fcr_shadow |= UART_FCR_DMA_SELECT;
  743. tegra_uart_write(tup, tup->fcr_shadow, UART_FCR);
  744. ret = tegra_uart_start_rx_dma(tup);
  745. if (ret < 0) {
  746. dev_err(tup->uport.dev, "Not able to start Rx DMA\n");
  747. return ret;
  748. }
  749. tup->rx_in_progress = 1;
  750. /*
  751. * Enable IE_RXS for the receive status interrupts like line errros.
  752. * Enable IE_RX_TIMEOUT to get the bytes which cannot be DMA'd.
  753. *
  754. * If using DMA mode, enable EORD instead of receive interrupt which
  755. * will interrupt after the UART is done with the receive instead of
  756. * the interrupt when the FIFO "threshold" is reached.
  757. *
  758. * EORD is different interrupt than RX_TIMEOUT - RX_TIMEOUT occurs when
  759. * the DATA is sitting in the FIFO and couldn't be transferred to the
  760. * DMA as the DMA size alignment(4 bytes) is not met. EORD will be
  761. * triggered when there is a pause of the incomming data stream for 4
  762. * characters long.
  763. *
  764. * For pauses in the data which is not aligned to 4 bytes, we get
  765. * both the EORD as well as RX_TIMEOUT - SW sees RX_TIMEOUT first
  766. * then the EORD.
  767. */
  768. tup->ier_shadow = UART_IER_RLSI | UART_IER_RTOIE | TEGRA_UART_IER_EORD;
  769. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  770. return 0;
  771. }
  772. static int tegra_uart_dma_channel_allocate(struct tegra_uart_port *tup,
  773. bool dma_to_memory)
  774. {
  775. struct dma_chan *dma_chan;
  776. unsigned char *dma_buf;
  777. dma_addr_t dma_phys;
  778. int ret;
  779. struct dma_slave_config dma_sconfig;
  780. dma_cap_mask_t mask;
  781. dma_cap_zero(mask);
  782. dma_cap_set(DMA_SLAVE, mask);
  783. dma_chan = dma_request_channel(mask, NULL, NULL);
  784. if (!dma_chan) {
  785. dev_err(tup->uport.dev,
  786. "Dma channel is not available, will try later\n");
  787. return -EPROBE_DEFER;
  788. }
  789. if (dma_to_memory) {
  790. dma_buf = dma_alloc_coherent(tup->uport.dev,
  791. TEGRA_UART_RX_DMA_BUFFER_SIZE,
  792. &dma_phys, GFP_KERNEL);
  793. if (!dma_buf) {
  794. dev_err(tup->uport.dev,
  795. "Not able to allocate the dma buffer\n");
  796. dma_release_channel(dma_chan);
  797. return -ENOMEM;
  798. }
  799. } else {
  800. dma_phys = dma_map_single(tup->uport.dev,
  801. tup->uport.state->xmit.buf, UART_XMIT_SIZE,
  802. DMA_TO_DEVICE);
  803. dma_buf = tup->uport.state->xmit.buf;
  804. }
  805. dma_sconfig.slave_id = tup->dma_req_sel;
  806. if (dma_to_memory) {
  807. dma_sconfig.src_addr = tup->uport.mapbase;
  808. dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  809. dma_sconfig.src_maxburst = 4;
  810. } else {
  811. dma_sconfig.dst_addr = tup->uport.mapbase;
  812. dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  813. dma_sconfig.dst_maxburst = 16;
  814. }
  815. ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
  816. if (ret < 0) {
  817. dev_err(tup->uport.dev,
  818. "Dma slave config failed, err = %d\n", ret);
  819. goto scrub;
  820. }
  821. if (dma_to_memory) {
  822. tup->rx_dma_chan = dma_chan;
  823. tup->rx_dma_buf_virt = dma_buf;
  824. tup->rx_dma_buf_phys = dma_phys;
  825. } else {
  826. tup->tx_dma_chan = dma_chan;
  827. tup->tx_dma_buf_virt = dma_buf;
  828. tup->tx_dma_buf_phys = dma_phys;
  829. }
  830. return 0;
  831. scrub:
  832. dma_release_channel(dma_chan);
  833. return ret;
  834. }
  835. static void tegra_uart_dma_channel_free(struct tegra_uart_port *tup,
  836. bool dma_to_memory)
  837. {
  838. struct dma_chan *dma_chan;
  839. if (dma_to_memory) {
  840. dma_free_coherent(tup->uport.dev, TEGRA_UART_RX_DMA_BUFFER_SIZE,
  841. tup->rx_dma_buf_virt, tup->rx_dma_buf_phys);
  842. dma_chan = tup->rx_dma_chan;
  843. tup->rx_dma_chan = NULL;
  844. tup->rx_dma_buf_phys = 0;
  845. tup->rx_dma_buf_virt = NULL;
  846. } else {
  847. dma_unmap_single(tup->uport.dev, tup->tx_dma_buf_phys,
  848. UART_XMIT_SIZE, DMA_TO_DEVICE);
  849. dma_chan = tup->tx_dma_chan;
  850. tup->tx_dma_chan = NULL;
  851. tup->tx_dma_buf_phys = 0;
  852. tup->tx_dma_buf_virt = NULL;
  853. }
  854. dma_release_channel(dma_chan);
  855. }
  856. static int tegra_uart_startup(struct uart_port *u)
  857. {
  858. struct tegra_uart_port *tup = to_tegra_uport(u);
  859. int ret;
  860. ret = tegra_uart_dma_channel_allocate(tup, false);
  861. if (ret < 0) {
  862. dev_err(u->dev, "Tx Dma allocation failed, err = %d\n", ret);
  863. return ret;
  864. }
  865. ret = tegra_uart_dma_channel_allocate(tup, true);
  866. if (ret < 0) {
  867. dev_err(u->dev, "Rx Dma allocation failed, err = %d\n", ret);
  868. goto fail_rx_dma;
  869. }
  870. ret = tegra_uart_hw_init(tup);
  871. if (ret < 0) {
  872. dev_err(u->dev, "Uart HW init failed, err = %d\n", ret);
  873. goto fail_hw_init;
  874. }
  875. ret = request_irq(u->irq, tegra_uart_isr, IRQF_DISABLED,
  876. dev_name(u->dev), tup);
  877. if (ret < 0) {
  878. dev_err(u->dev, "Failed to register ISR for IRQ %d\n", u->irq);
  879. goto fail_hw_init;
  880. }
  881. return 0;
  882. fail_hw_init:
  883. tegra_uart_dma_channel_free(tup, true);
  884. fail_rx_dma:
  885. tegra_uart_dma_channel_free(tup, false);
  886. return ret;
  887. }
  888. static void tegra_uart_shutdown(struct uart_port *u)
  889. {
  890. struct tegra_uart_port *tup = to_tegra_uport(u);
  891. tegra_uart_hw_deinit(tup);
  892. tup->rx_in_progress = 0;
  893. tup->tx_in_progress = 0;
  894. tegra_uart_dma_channel_free(tup, true);
  895. tegra_uart_dma_channel_free(tup, false);
  896. free_irq(u->irq, tup);
  897. }
  898. static void tegra_uart_enable_ms(struct uart_port *u)
  899. {
  900. struct tegra_uart_port *tup = to_tegra_uport(u);
  901. if (tup->enable_modem_interrupt) {
  902. tup->ier_shadow |= UART_IER_MSI;
  903. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  904. }
  905. }
  906. static void tegra_uart_set_termios(struct uart_port *u,
  907. struct ktermios *termios, struct ktermios *oldtermios)
  908. {
  909. struct tegra_uart_port *tup = to_tegra_uport(u);
  910. unsigned int baud;
  911. unsigned long flags;
  912. unsigned int lcr;
  913. int symb_bit = 1;
  914. struct clk *parent_clk = clk_get_parent(tup->uart_clk);
  915. unsigned long parent_clk_rate = clk_get_rate(parent_clk);
  916. int max_divider = (tup->cdata->support_clk_src_div) ? 0x7FFF : 0xFFFF;
  917. max_divider *= 16;
  918. spin_lock_irqsave(&u->lock, flags);
  919. /* Changing configuration, it is safe to stop any rx now */
  920. if (tup->rts_active)
  921. set_rts(tup, false);
  922. /* Clear all interrupts as configuration is going to be change */
  923. tegra_uart_write(tup, tup->ier_shadow | UART_IER_RDI, UART_IER);
  924. tegra_uart_read(tup, UART_IER);
  925. tegra_uart_write(tup, 0, UART_IER);
  926. tegra_uart_read(tup, UART_IER);
  927. /* Parity */
  928. lcr = tup->lcr_shadow;
  929. lcr &= ~UART_LCR_PARITY;
  930. /* CMSPAR isn't supported by this driver */
  931. termios->c_cflag &= ~CMSPAR;
  932. if ((termios->c_cflag & PARENB) == PARENB) {
  933. symb_bit++;
  934. if (termios->c_cflag & PARODD) {
  935. lcr |= UART_LCR_PARITY;
  936. lcr &= ~UART_LCR_EPAR;
  937. lcr &= ~UART_LCR_SPAR;
  938. } else {
  939. lcr |= UART_LCR_PARITY;
  940. lcr |= UART_LCR_EPAR;
  941. lcr &= ~UART_LCR_SPAR;
  942. }
  943. }
  944. lcr &= ~UART_LCR_WLEN8;
  945. switch (termios->c_cflag & CSIZE) {
  946. case CS5:
  947. lcr |= UART_LCR_WLEN5;
  948. symb_bit += 5;
  949. break;
  950. case CS6:
  951. lcr |= UART_LCR_WLEN6;
  952. symb_bit += 6;
  953. break;
  954. case CS7:
  955. lcr |= UART_LCR_WLEN7;
  956. symb_bit += 7;
  957. break;
  958. default:
  959. lcr |= UART_LCR_WLEN8;
  960. symb_bit += 8;
  961. break;
  962. }
  963. /* Stop bits */
  964. if (termios->c_cflag & CSTOPB) {
  965. lcr |= UART_LCR_STOP;
  966. symb_bit += 2;
  967. } else {
  968. lcr &= ~UART_LCR_STOP;
  969. symb_bit++;
  970. }
  971. tegra_uart_write(tup, lcr, UART_LCR);
  972. tup->lcr_shadow = lcr;
  973. tup->symb_bit = symb_bit;
  974. /* Baud rate. */
  975. baud = uart_get_baud_rate(u, termios, oldtermios,
  976. parent_clk_rate/max_divider,
  977. parent_clk_rate/16);
  978. spin_unlock_irqrestore(&u->lock, flags);
  979. tegra_set_baudrate(tup, baud);
  980. if (tty_termios_baud_rate(termios))
  981. tty_termios_encode_baud_rate(termios, baud, baud);
  982. spin_lock_irqsave(&u->lock, flags);
  983. /* Flow control */
  984. if (termios->c_cflag & CRTSCTS) {
  985. tup->mcr_shadow |= TEGRA_UART_MCR_CTS_EN;
  986. tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
  987. tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
  988. /* if top layer has asked to set rts active then do so here */
  989. if (tup->rts_active)
  990. set_rts(tup, true);
  991. } else {
  992. tup->mcr_shadow &= ~TEGRA_UART_MCR_CTS_EN;
  993. tup->mcr_shadow &= ~TEGRA_UART_MCR_RTS_EN;
  994. tegra_uart_write(tup, tup->mcr_shadow, UART_MCR);
  995. }
  996. /* update the port timeout based on new settings */
  997. uart_update_timeout(u, termios->c_cflag, baud);
  998. /* Make sure all write has completed */
  999. tegra_uart_read(tup, UART_IER);
  1000. /* Reenable interrupt */
  1001. tegra_uart_write(tup, tup->ier_shadow, UART_IER);
  1002. tegra_uart_read(tup, UART_IER);
  1003. spin_unlock_irqrestore(&u->lock, flags);
  1004. return;
  1005. }
  1006. /*
  1007. * Flush any TX data submitted for DMA and PIO. Called when the
  1008. * TX circular buffer is reset.
  1009. */
  1010. static void tegra_uart_flush_buffer(struct uart_port *u)
  1011. {
  1012. struct tegra_uart_port *tup = to_tegra_uport(u);
  1013. tup->tx_bytes = 0;
  1014. if (tup->tx_dma_chan)
  1015. dmaengine_terminate_all(tup->tx_dma_chan);
  1016. return;
  1017. }
  1018. static const char *tegra_uart_type(struct uart_port *u)
  1019. {
  1020. return TEGRA_UART_TYPE;
  1021. }
  1022. static struct uart_ops tegra_uart_ops = {
  1023. .tx_empty = tegra_uart_tx_empty,
  1024. .set_mctrl = tegra_uart_set_mctrl,
  1025. .get_mctrl = tegra_uart_get_mctrl,
  1026. .stop_tx = tegra_uart_stop_tx,
  1027. .start_tx = tegra_uart_start_tx,
  1028. .stop_rx = tegra_uart_stop_rx,
  1029. .flush_buffer = tegra_uart_flush_buffer,
  1030. .enable_ms = tegra_uart_enable_ms,
  1031. .break_ctl = tegra_uart_break_ctl,
  1032. .startup = tegra_uart_startup,
  1033. .shutdown = tegra_uart_shutdown,
  1034. .set_termios = tegra_uart_set_termios,
  1035. .type = tegra_uart_type,
  1036. .request_port = tegra_uart_request_port,
  1037. .release_port = tegra_uart_release_port,
  1038. };
  1039. static struct uart_driver tegra_uart_driver = {
  1040. .owner = THIS_MODULE,
  1041. .driver_name = "tegra_hsuart",
  1042. .dev_name = "ttyTHS",
  1043. .cons = 0,
  1044. .nr = TEGRA_UART_MAXIMUM,
  1045. };
  1046. static int tegra_uart_parse_dt(struct platform_device *pdev,
  1047. struct tegra_uart_port *tup)
  1048. {
  1049. struct device_node *np = pdev->dev.of_node;
  1050. u32 of_dma[2];
  1051. int port;
  1052. if (of_property_read_u32_array(np, "nvidia,dma-request-selector",
  1053. of_dma, 2) >= 0) {
  1054. tup->dma_req_sel = of_dma[1];
  1055. } else {
  1056. dev_err(&pdev->dev, "missing dma requestor in device tree\n");
  1057. return -EINVAL;
  1058. }
  1059. port = of_alias_get_id(np, "serial");
  1060. if (port < 0) {
  1061. dev_err(&pdev->dev, "failed to get alias id, errno %d\n", port);
  1062. return port;
  1063. }
  1064. tup->uport.line = port;
  1065. tup->enable_modem_interrupt = of_property_read_bool(np,
  1066. "nvidia,enable-modem-interrupt");
  1067. return 0;
  1068. }
  1069. struct tegra_uart_chip_data tegra20_uart_chip_data = {
  1070. .tx_fifo_full_status = false,
  1071. .allow_txfifo_reset_fifo_mode = true,
  1072. .support_clk_src_div = false,
  1073. };
  1074. struct tegra_uart_chip_data tegra30_uart_chip_data = {
  1075. .tx_fifo_full_status = true,
  1076. .allow_txfifo_reset_fifo_mode = false,
  1077. .support_clk_src_div = true,
  1078. };
  1079. static struct of_device_id tegra_uart_of_match[] = {
  1080. {
  1081. .compatible = "nvidia,tegra30-hsuart",
  1082. .data = &tegra30_uart_chip_data,
  1083. }, {
  1084. .compatible = "nvidia,tegra20-hsuart",
  1085. .data = &tegra20_uart_chip_data,
  1086. }, {
  1087. },
  1088. };
  1089. MODULE_DEVICE_TABLE(of, tegra_uart_of_match);
  1090. static int tegra_uart_probe(struct platform_device *pdev)
  1091. {
  1092. struct tegra_uart_port *tup;
  1093. struct uart_port *u;
  1094. struct resource *resource;
  1095. int ret;
  1096. const struct tegra_uart_chip_data *cdata;
  1097. const struct of_device_id *match;
  1098. match = of_match_device(tegra_uart_of_match, &pdev->dev);
  1099. if (!match) {
  1100. dev_err(&pdev->dev, "Error: No device match found\n");
  1101. return -ENODEV;
  1102. }
  1103. cdata = match->data;
  1104. tup = devm_kzalloc(&pdev->dev, sizeof(*tup), GFP_KERNEL);
  1105. if (!tup) {
  1106. dev_err(&pdev->dev, "Failed to allocate memory for tup\n");
  1107. return -ENOMEM;
  1108. }
  1109. ret = tegra_uart_parse_dt(pdev, tup);
  1110. if (ret < 0)
  1111. return ret;
  1112. u = &tup->uport;
  1113. u->dev = &pdev->dev;
  1114. u->ops = &tegra_uart_ops;
  1115. u->type = PORT_TEGRA;
  1116. u->fifosize = 32;
  1117. tup->cdata = cdata;
  1118. platform_set_drvdata(pdev, tup);
  1119. resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1120. if (!resource) {
  1121. dev_err(&pdev->dev, "No IO memory resource\n");
  1122. return -ENODEV;
  1123. }
  1124. u->mapbase = resource->start;
  1125. u->membase = devm_ioremap_resource(&pdev->dev, resource);
  1126. if (IS_ERR(u->membase))
  1127. return PTR_ERR(u->membase);
  1128. tup->uart_clk = devm_clk_get(&pdev->dev, NULL);
  1129. if (IS_ERR(tup->uart_clk)) {
  1130. dev_err(&pdev->dev, "Couldn't get the clock\n");
  1131. return PTR_ERR(tup->uart_clk);
  1132. }
  1133. u->iotype = UPIO_MEM32;
  1134. u->irq = platform_get_irq(pdev, 0);
  1135. u->regshift = 2;
  1136. ret = uart_add_one_port(&tegra_uart_driver, u);
  1137. if (ret < 0) {
  1138. dev_err(&pdev->dev, "Failed to add uart port, err %d\n", ret);
  1139. return ret;
  1140. }
  1141. return ret;
  1142. }
  1143. static int tegra_uart_remove(struct platform_device *pdev)
  1144. {
  1145. struct tegra_uart_port *tup = platform_get_drvdata(pdev);
  1146. struct uart_port *u = &tup->uport;
  1147. uart_remove_one_port(&tegra_uart_driver, u);
  1148. return 0;
  1149. }
  1150. #ifdef CONFIG_PM_SLEEP
  1151. static int tegra_uart_suspend(struct device *dev)
  1152. {
  1153. struct tegra_uart_port *tup = dev_get_drvdata(dev);
  1154. struct uart_port *u = &tup->uport;
  1155. return uart_suspend_port(&tegra_uart_driver, u);
  1156. }
  1157. static int tegra_uart_resume(struct device *dev)
  1158. {
  1159. struct tegra_uart_port *tup = dev_get_drvdata(dev);
  1160. struct uart_port *u = &tup->uport;
  1161. return uart_resume_port(&tegra_uart_driver, u);
  1162. }
  1163. #endif
  1164. static const struct dev_pm_ops tegra_uart_pm_ops = {
  1165. SET_SYSTEM_SLEEP_PM_OPS(tegra_uart_suspend, tegra_uart_resume)
  1166. };
  1167. static struct platform_driver tegra_uart_platform_driver = {
  1168. .probe = tegra_uart_probe,
  1169. .remove = tegra_uart_remove,
  1170. .driver = {
  1171. .name = "serial-tegra",
  1172. .of_match_table = tegra_uart_of_match,
  1173. .pm = &tegra_uart_pm_ops,
  1174. },
  1175. };
  1176. static int __init tegra_uart_init(void)
  1177. {
  1178. int ret;
  1179. ret = uart_register_driver(&tegra_uart_driver);
  1180. if (ret < 0) {
  1181. pr_err("Could not register %s driver\n",
  1182. tegra_uart_driver.driver_name);
  1183. return ret;
  1184. }
  1185. ret = platform_driver_register(&tegra_uart_platform_driver);
  1186. if (ret < 0) {
  1187. pr_err("Uart platform driver register failed, e = %d\n", ret);
  1188. uart_unregister_driver(&tegra_uart_driver);
  1189. return ret;
  1190. }
  1191. return 0;
  1192. }
  1193. static void __exit tegra_uart_exit(void)
  1194. {
  1195. pr_info("Unloading tegra uart driver\n");
  1196. platform_driver_unregister(&tegra_uart_platform_driver);
  1197. uart_unregister_driver(&tegra_uart_driver);
  1198. }
  1199. module_init(tegra_uart_init);
  1200. module_exit(tegra_uart_exit);
  1201. MODULE_ALIAS("platform:serial-tegra");
  1202. MODULE_DESCRIPTION("High speed UART driver for tegra chipset");
  1203. MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
  1204. MODULE_LICENSE("GPL v2");