sirfsoc_uart.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561
  1. /*
  2. * Driver for CSR SiRFprimaII onboard UARTs.
  3. *
  4. * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
  5. *
  6. * Licensed under GPLv2 or later.
  7. */
  8. #include <linux/module.h>
  9. #include <linux/ioport.h>
  10. #include <linux/platform_device.h>
  11. #include <linux/init.h>
  12. #include <linux/sysrq.h>
  13. #include <linux/console.h>
  14. #include <linux/tty.h>
  15. #include <linux/tty_flip.h>
  16. #include <linux/serial_core.h>
  17. #include <linux/serial.h>
  18. #include <linux/clk.h>
  19. #include <linux/of.h>
  20. #include <linux/slab.h>
  21. #include <linux/io.h>
  22. #include <linux/of_gpio.h>
  23. #include <linux/dmaengine.h>
  24. #include <linux/dma-direction.h>
  25. #include <linux/dma-mapping.h>
  26. #include <linux/sirfsoc_dma.h>
  27. #include <asm/irq.h>
  28. #include <asm/mach/irq.h>
  29. #include "sirfsoc_uart.h"
  30. static unsigned int
  31. sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count);
  32. static unsigned int
  33. sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count);
  34. static struct uart_driver sirfsoc_uart_drv;
  35. static void sirfsoc_uart_tx_dma_complete_callback(void *param);
  36. static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port);
  37. static void sirfsoc_uart_rx_dma_complete_callback(void *param);
  38. static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
  39. {4000000, 2359296},
  40. {3500000, 1310721},
  41. {3000000, 1572865},
  42. {2500000, 1245186},
  43. {2000000, 1572866},
  44. {1500000, 1245188},
  45. {1152000, 1638404},
  46. {1000000, 1572869},
  47. {921600, 1114120},
  48. {576000, 1245196},
  49. {500000, 1245198},
  50. {460800, 1572876},
  51. {230400, 1310750},
  52. {115200, 1310781},
  53. {57600, 1310843},
  54. {38400, 1114328},
  55. {19200, 1114545},
  56. {9600, 1114979},
  57. };
  58. static struct sirfsoc_uart_port sirfsoc_uart_ports[SIRFSOC_UART_NR] = {
  59. [0] = {
  60. .port = {
  61. .iotype = UPIO_MEM,
  62. .flags = UPF_BOOT_AUTOCONF,
  63. .line = 0,
  64. },
  65. },
  66. [1] = {
  67. .port = {
  68. .iotype = UPIO_MEM,
  69. .flags = UPF_BOOT_AUTOCONF,
  70. .line = 1,
  71. },
  72. },
  73. [2] = {
  74. .port = {
  75. .iotype = UPIO_MEM,
  76. .flags = UPF_BOOT_AUTOCONF,
  77. .line = 2,
  78. },
  79. },
  80. [3] = {
  81. .port = {
  82. .iotype = UPIO_MEM,
  83. .flags = UPF_BOOT_AUTOCONF,
  84. .line = 3,
  85. },
  86. },
  87. [4] = {
  88. .port = {
  89. .iotype = UPIO_MEM,
  90. .flags = UPF_BOOT_AUTOCONF,
  91. .line = 4,
  92. },
  93. },
  94. };
  95. static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
  96. {
  97. return container_of(port, struct sirfsoc_uart_port, port);
  98. }
  99. static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port)
  100. {
  101. unsigned long reg;
  102. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  103. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  104. struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
  105. reg = rd_regl(port, ureg->sirfsoc_tx_fifo_status);
  106. return (reg & ufifo_st->ff_empty(port->line)) ? TIOCSER_TEMT : 0;
  107. }
  108. static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port)
  109. {
  110. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  111. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  112. if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
  113. goto cts_asserted;
  114. if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
  115. if (!(rd_regl(port, ureg->sirfsoc_afc_ctrl) &
  116. SIRFUART_AFC_CTS_STATUS))
  117. goto cts_asserted;
  118. else
  119. goto cts_deasserted;
  120. } else {
  121. if (!gpio_get_value(sirfport->cts_gpio))
  122. goto cts_asserted;
  123. else
  124. goto cts_deasserted;
  125. }
  126. cts_deasserted:
  127. return TIOCM_CAR | TIOCM_DSR;
  128. cts_asserted:
  129. return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
  130. }
  131. static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
  132. {
  133. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  134. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  135. unsigned int assert = mctrl & TIOCM_RTS;
  136. unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0;
  137. unsigned int current_val;
  138. if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
  139. return;
  140. if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
  141. current_val = rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0xFF;
  142. val |= current_val;
  143. wr_regl(port, ureg->sirfsoc_afc_ctrl, val);
  144. } else {
  145. if (!val)
  146. gpio_set_value(sirfport->rts_gpio, 1);
  147. else
  148. gpio_set_value(sirfport->rts_gpio, 0);
  149. }
  150. }
  151. static void sirfsoc_uart_stop_tx(struct uart_port *port)
  152. {
  153. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  154. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  155. struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
  156. if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
  157. if (sirfport->tx_dma_state == TX_DMA_RUNNING) {
  158. dmaengine_pause(sirfport->tx_dma_chan);
  159. sirfport->tx_dma_state = TX_DMA_PAUSE;
  160. } else {
  161. if (!sirfport->is_marco)
  162. wr_regl(port, ureg->sirfsoc_int_en_reg,
  163. rd_regl(port, ureg->sirfsoc_int_en_reg) &
  164. ~uint_en->sirfsoc_txfifo_empty_en);
  165. else
  166. wr_regl(port, SIRFUART_INT_EN_CLR,
  167. uint_en->sirfsoc_txfifo_empty_en);
  168. }
  169. } else {
  170. if (!sirfport->is_marco)
  171. wr_regl(port, ureg->sirfsoc_int_en_reg,
  172. rd_regl(port, ureg->sirfsoc_int_en_reg) &
  173. ~uint_en->sirfsoc_txfifo_empty_en);
  174. else
  175. wr_regl(port, SIRFUART_INT_EN_CLR,
  176. uint_en->sirfsoc_txfifo_empty_en);
  177. }
  178. }
  179. static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport)
  180. {
  181. struct uart_port *port = &sirfport->port;
  182. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  183. struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
  184. struct circ_buf *xmit = &port->state->xmit;
  185. unsigned long tran_size;
  186. unsigned long tran_start;
  187. unsigned long pio_tx_size;
  188. tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
  189. tran_start = (unsigned long)(xmit->buf + xmit->tail);
  190. if (uart_circ_empty(xmit) || uart_tx_stopped(port) ||
  191. !tran_size)
  192. return;
  193. if (sirfport->tx_dma_state == TX_DMA_PAUSE) {
  194. dmaengine_resume(sirfport->tx_dma_chan);
  195. return;
  196. }
  197. if (sirfport->tx_dma_state == TX_DMA_RUNNING)
  198. return;
  199. if (!sirfport->is_marco)
  200. wr_regl(port, ureg->sirfsoc_int_en_reg,
  201. rd_regl(port, ureg->sirfsoc_int_en_reg)&
  202. ~(uint_en->sirfsoc_txfifo_empty_en));
  203. else
  204. wr_regl(port, SIRFUART_INT_EN_CLR,
  205. uint_en->sirfsoc_txfifo_empty_en);
  206. /*
  207. * DMA requires buffer address and buffer length are both aligned with
  208. * 4 bytes, so we use PIO for
  209. * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
  210. * bytes, and move to DMA for the left part aligned with 4bytes
  211. * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
  212. * part first, move to PIO for the left 1~3 bytes
  213. */
  214. if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) {
  215. wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
  216. wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
  217. rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)|
  218. SIRFUART_IO_MODE);
  219. if (BYTES_TO_ALIGN(tran_start)) {
  220. pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport,
  221. BYTES_TO_ALIGN(tran_start));
  222. tran_size -= pio_tx_size;
  223. }
  224. if (tran_size < 4)
  225. sirfsoc_uart_pio_tx_chars(sirfport, tran_size);
  226. if (!sirfport->is_marco)
  227. wr_regl(port, ureg->sirfsoc_int_en_reg,
  228. rd_regl(port, ureg->sirfsoc_int_en_reg)|
  229. uint_en->sirfsoc_txfifo_empty_en);
  230. else
  231. wr_regl(port, ureg->sirfsoc_int_en_reg,
  232. uint_en->sirfsoc_txfifo_empty_en);
  233. wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
  234. } else {
  235. /* tx transfer mode switch into dma mode */
  236. wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
  237. wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
  238. rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)&
  239. ~SIRFUART_IO_MODE);
  240. wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
  241. tran_size &= ~(0x3);
  242. sirfport->tx_dma_addr = dma_map_single(port->dev,
  243. xmit->buf + xmit->tail,
  244. tran_size, DMA_TO_DEVICE);
  245. sirfport->tx_dma_desc = dmaengine_prep_slave_single(
  246. sirfport->tx_dma_chan, sirfport->tx_dma_addr,
  247. tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
  248. if (!sirfport->tx_dma_desc) {
  249. dev_err(port->dev, "DMA prep slave single fail\n");
  250. return;
  251. }
  252. sirfport->tx_dma_desc->callback =
  253. sirfsoc_uart_tx_dma_complete_callback;
  254. sirfport->tx_dma_desc->callback_param = (void *)sirfport;
  255. sirfport->transfer_size = tran_size;
  256. dmaengine_submit(sirfport->tx_dma_desc);
  257. dma_async_issue_pending(sirfport->tx_dma_chan);
  258. sirfport->tx_dma_state = TX_DMA_RUNNING;
  259. }
  260. }
  261. static void sirfsoc_uart_start_tx(struct uart_port *port)
  262. {
  263. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  264. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  265. struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
  266. if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
  267. sirfsoc_uart_tx_with_dma(sirfport);
  268. else {
  269. sirfsoc_uart_pio_tx_chars(sirfport, 1);
  270. wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
  271. if (!sirfport->is_marco)
  272. wr_regl(port, ureg->sirfsoc_int_en_reg,
  273. rd_regl(port, ureg->sirfsoc_int_en_reg)|
  274. uint_en->sirfsoc_txfifo_empty_en);
  275. else
  276. wr_regl(port, ureg->sirfsoc_int_en_reg,
  277. uint_en->sirfsoc_txfifo_empty_en);
  278. }
  279. }
  280. static void sirfsoc_uart_stop_rx(struct uart_port *port)
  281. {
  282. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  283. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  284. struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
  285. wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
  286. if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
  287. if (!sirfport->is_marco)
  288. wr_regl(port, ureg->sirfsoc_int_en_reg,
  289. rd_regl(port, ureg->sirfsoc_int_en_reg) &
  290. ~(SIRFUART_RX_DMA_INT_EN(port, uint_en) |
  291. uint_en->sirfsoc_rx_done_en));
  292. else
  293. wr_regl(port, SIRFUART_INT_EN_CLR,
  294. SIRFUART_RX_DMA_INT_EN(port, uint_en)|
  295. uint_en->sirfsoc_rx_done_en);
  296. dmaengine_terminate_all(sirfport->rx_dma_chan);
  297. } else {
  298. if (!sirfport->is_marco)
  299. wr_regl(port, ureg->sirfsoc_int_en_reg,
  300. rd_regl(port, ureg->sirfsoc_int_en_reg)&
  301. ~(SIRFUART_RX_IO_INT_EN(port, uint_en)));
  302. else
  303. wr_regl(port, SIRFUART_INT_EN_CLR,
  304. SIRFUART_RX_IO_INT_EN(port, uint_en));
  305. }
  306. }
  307. static void sirfsoc_uart_disable_ms(struct uart_port *port)
  308. {
  309. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  310. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  311. struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
  312. if (!sirfport->hw_flow_ctrl)
  313. return;
  314. sirfport->ms_enabled = false;
  315. if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
  316. wr_regl(port, ureg->sirfsoc_afc_ctrl,
  317. rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0x3FF);
  318. if (!sirfport->is_marco)
  319. wr_regl(port, ureg->sirfsoc_int_en_reg,
  320. rd_regl(port, ureg->sirfsoc_int_en_reg)&
  321. ~uint_en->sirfsoc_cts_en);
  322. else
  323. wr_regl(port, SIRFUART_INT_EN_CLR,
  324. uint_en->sirfsoc_cts_en);
  325. } else
  326. disable_irq(gpio_to_irq(sirfport->cts_gpio));
  327. }
  328. static irqreturn_t sirfsoc_uart_usp_cts_handler(int irq, void *dev_id)
  329. {
  330. struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
  331. struct uart_port *port = &sirfport->port;
  332. if (gpio_is_valid(sirfport->cts_gpio) && sirfport->ms_enabled)
  333. uart_handle_cts_change(port,
  334. !gpio_get_value(sirfport->cts_gpio));
  335. return IRQ_HANDLED;
  336. }
  337. static void sirfsoc_uart_enable_ms(struct uart_port *port)
  338. {
  339. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  340. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  341. struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
  342. if (!sirfport->hw_flow_ctrl)
  343. return;
  344. sirfport->ms_enabled = true;
  345. if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
  346. wr_regl(port, ureg->sirfsoc_afc_ctrl,
  347. rd_regl(port, ureg->sirfsoc_afc_ctrl) |
  348. SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN);
  349. if (!sirfport->is_marco)
  350. wr_regl(port, ureg->sirfsoc_int_en_reg,
  351. rd_regl(port, ureg->sirfsoc_int_en_reg)
  352. | uint_en->sirfsoc_cts_en);
  353. else
  354. wr_regl(port, ureg->sirfsoc_int_en_reg,
  355. uint_en->sirfsoc_cts_en);
  356. } else
  357. enable_irq(gpio_to_irq(sirfport->cts_gpio));
  358. }
  359. static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state)
  360. {
  361. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  362. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  363. if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
  364. unsigned long ulcon = rd_regl(port, ureg->sirfsoc_line_ctrl);
  365. if (break_state)
  366. ulcon |= SIRFUART_SET_BREAK;
  367. else
  368. ulcon &= ~SIRFUART_SET_BREAK;
  369. wr_regl(port, ureg->sirfsoc_line_ctrl, ulcon);
  370. }
  371. }
  372. static unsigned int
  373. sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
  374. {
  375. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  376. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  377. struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
  378. unsigned int ch, rx_count = 0;
  379. struct tty_struct *tty;
  380. tty = tty_port_tty_get(&port->state->port);
  381. if (!tty)
  382. return -ENODEV;
  383. while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
  384. ufifo_st->ff_empty(port->line))) {
  385. ch = rd_regl(port, ureg->sirfsoc_rx_fifo_data) |
  386. SIRFUART_DUMMY_READ;
  387. if (unlikely(uart_handle_sysrq_char(port, ch)))
  388. continue;
  389. uart_insert_char(port, 0, 0, ch, TTY_NORMAL);
  390. rx_count++;
  391. if (rx_count >= max_rx_count)
  392. break;
  393. }
  394. sirfport->rx_io_count += rx_count;
  395. port->icount.rx += rx_count;
  396. spin_unlock(&port->lock);
  397. tty_flip_buffer_push(&port->state->port);
  398. spin_lock(&port->lock);
  399. return rx_count;
  400. }
  401. static unsigned int
  402. sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
  403. {
  404. struct uart_port *port = &sirfport->port;
  405. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  406. struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
  407. struct circ_buf *xmit = &port->state->xmit;
  408. unsigned int num_tx = 0;
  409. while (!uart_circ_empty(xmit) &&
  410. !(rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
  411. ufifo_st->ff_full(port->line)) &&
  412. count--) {
  413. wr_regl(port, ureg->sirfsoc_tx_fifo_data,
  414. xmit->buf[xmit->tail]);
  415. xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
  416. port->icount.tx++;
  417. num_tx++;
  418. }
  419. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  420. uart_write_wakeup(port);
  421. return num_tx;
  422. }
  423. static void sirfsoc_uart_tx_dma_complete_callback(void *param)
  424. {
  425. struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
  426. struct uart_port *port = &sirfport->port;
  427. struct circ_buf *xmit = &port->state->xmit;
  428. unsigned long flags;
  429. xmit->tail = (xmit->tail + sirfport->transfer_size) &
  430. (UART_XMIT_SIZE - 1);
  431. port->icount.tx += sirfport->transfer_size;
  432. if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
  433. uart_write_wakeup(port);
  434. if (sirfport->tx_dma_addr)
  435. dma_unmap_single(port->dev, sirfport->tx_dma_addr,
  436. sirfport->transfer_size, DMA_TO_DEVICE);
  437. spin_lock_irqsave(&sirfport->tx_lock, flags);
  438. sirfport->tx_dma_state = TX_DMA_IDLE;
  439. sirfsoc_uart_tx_with_dma(sirfport);
  440. spin_unlock_irqrestore(&sirfport->tx_lock, flags);
  441. }
  442. static void sirfsoc_uart_insert_rx_buf_to_tty(
  443. struct sirfsoc_uart_port *sirfport, int count)
  444. {
  445. struct uart_port *port = &sirfport->port;
  446. struct tty_port *tport = &port->state->port;
  447. int inserted;
  448. inserted = tty_insert_flip_string(tport,
  449. sirfport->rx_dma_items[sirfport->rx_completed].xmit.buf, count);
  450. port->icount.rx += inserted;
  451. tty_flip_buffer_push(tport);
  452. }
  453. static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index)
  454. {
  455. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  456. sirfport->rx_dma_items[index].xmit.tail =
  457. sirfport->rx_dma_items[index].xmit.head = 0;
  458. sirfport->rx_dma_items[index].desc =
  459. dmaengine_prep_slave_single(sirfport->rx_dma_chan,
  460. sirfport->rx_dma_items[index].dma_addr, SIRFSOC_RX_DMA_BUF_SIZE,
  461. DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
  462. if (!sirfport->rx_dma_items[index].desc) {
  463. dev_err(port->dev, "DMA slave single fail\n");
  464. return;
  465. }
  466. sirfport->rx_dma_items[index].desc->callback =
  467. sirfsoc_uart_rx_dma_complete_callback;
  468. sirfport->rx_dma_items[index].desc->callback_param = sirfport;
  469. sirfport->rx_dma_items[index].cookie =
  470. dmaengine_submit(sirfport->rx_dma_items[index].desc);
  471. dma_async_issue_pending(sirfport->rx_dma_chan);
  472. }
  473. static void sirfsoc_rx_tmo_process_tl(unsigned long param)
  474. {
  475. struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
  476. struct uart_port *port = &sirfport->port;
  477. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  478. struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
  479. struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
  480. unsigned int count;
  481. unsigned long flags;
  482. spin_lock_irqsave(&sirfport->rx_lock, flags);
  483. while (sirfport->rx_completed != sirfport->rx_issued) {
  484. sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
  485. SIRFSOC_RX_DMA_BUF_SIZE);
  486. sirfsoc_rx_submit_one_dma_desc(port, sirfport->rx_completed++);
  487. sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
  488. }
  489. count = CIRC_CNT(sirfport->rx_dma_items[sirfport->rx_issued].xmit.head,
  490. sirfport->rx_dma_items[sirfport->rx_issued].xmit.tail,
  491. SIRFSOC_RX_DMA_BUF_SIZE);
  492. if (count > 0)
  493. sirfsoc_uart_insert_rx_buf_to_tty(sirfport, count);
  494. wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
  495. rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
  496. SIRFUART_IO_MODE);
  497. sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
  498. spin_unlock_irqrestore(&sirfport->rx_lock, flags);
  499. if (sirfport->rx_io_count == 4) {
  500. spin_lock_irqsave(&sirfport->rx_lock, flags);
  501. sirfport->rx_io_count = 0;
  502. wr_regl(port, ureg->sirfsoc_int_st_reg,
  503. uint_st->sirfsoc_rx_done);
  504. if (!sirfport->is_marco)
  505. wr_regl(port, ureg->sirfsoc_int_en_reg,
  506. rd_regl(port, ureg->sirfsoc_int_en_reg) &
  507. ~(uint_en->sirfsoc_rx_done_en));
  508. else
  509. wr_regl(port, SIRFUART_INT_EN_CLR,
  510. uint_en->sirfsoc_rx_done_en);
  511. spin_unlock_irqrestore(&sirfport->rx_lock, flags);
  512. sirfsoc_uart_start_next_rx_dma(port);
  513. } else {
  514. spin_lock_irqsave(&sirfport->rx_lock, flags);
  515. wr_regl(port, ureg->sirfsoc_int_st_reg,
  516. uint_st->sirfsoc_rx_done);
  517. if (!sirfport->is_marco)
  518. wr_regl(port, ureg->sirfsoc_int_en_reg,
  519. rd_regl(port, ureg->sirfsoc_int_en_reg) |
  520. (uint_en->sirfsoc_rx_done_en));
  521. else
  522. wr_regl(port, ureg->sirfsoc_int_en_reg,
  523. uint_en->sirfsoc_rx_done_en);
  524. spin_unlock_irqrestore(&sirfport->rx_lock, flags);
  525. }
  526. }
  527. static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
  528. {
  529. struct uart_port *port = &sirfport->port;
  530. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  531. struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
  532. struct dma_tx_state tx_state;
  533. spin_lock(&sirfport->rx_lock);
  534. dmaengine_tx_status(sirfport->rx_dma_chan,
  535. sirfport->rx_dma_items[sirfport->rx_issued].cookie, &tx_state);
  536. dmaengine_terminate_all(sirfport->rx_dma_chan);
  537. sirfport->rx_dma_items[sirfport->rx_issued].xmit.head =
  538. SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
  539. if (!sirfport->is_marco)
  540. wr_regl(port, ureg->sirfsoc_int_en_reg,
  541. rd_regl(port, ureg->sirfsoc_int_en_reg) &
  542. ~(uint_en->sirfsoc_rx_timeout_en));
  543. else
  544. wr_regl(port, SIRFUART_INT_EN_CLR,
  545. uint_en->sirfsoc_rx_timeout_en);
  546. spin_unlock(&sirfport->rx_lock);
  547. tasklet_schedule(&sirfport->rx_tmo_process_tasklet);
  548. }
  549. static void sirfsoc_uart_handle_rx_done(struct sirfsoc_uart_port *sirfport)
  550. {
  551. struct uart_port *port = &sirfport->port;
  552. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  553. struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
  554. struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
  555. sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
  556. if (sirfport->rx_io_count == 4) {
  557. sirfport->rx_io_count = 0;
  558. if (!sirfport->is_marco)
  559. wr_regl(port, ureg->sirfsoc_int_en_reg,
  560. rd_regl(port, ureg->sirfsoc_int_en_reg) &
  561. ~(uint_en->sirfsoc_rx_done_en));
  562. else
  563. wr_regl(port, SIRFUART_INT_EN_CLR,
  564. uint_en->sirfsoc_rx_done_en);
  565. wr_regl(port, ureg->sirfsoc_int_st_reg,
  566. uint_st->sirfsoc_rx_timeout);
  567. sirfsoc_uart_start_next_rx_dma(port);
  568. }
  569. }
  570. static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
  571. {
  572. unsigned long intr_status;
  573. unsigned long cts_status;
  574. unsigned long flag = TTY_NORMAL;
  575. struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
  576. struct uart_port *port = &sirfport->port;
  577. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  578. struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
  579. struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
  580. struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
  581. struct uart_state *state = port->state;
  582. struct circ_buf *xmit = &port->state->xmit;
  583. spin_lock(&port->lock);
  584. intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg);
  585. wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status);
  586. intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg);
  587. if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(port, uint_st)))) {
  588. if (intr_status & uint_st->sirfsoc_rxd_brk) {
  589. port->icount.brk++;
  590. if (uart_handle_break(port))
  591. goto recv_char;
  592. }
  593. if (intr_status & uint_st->sirfsoc_rx_oflow)
  594. port->icount.overrun++;
  595. if (intr_status & uint_st->sirfsoc_frm_err) {
  596. port->icount.frame++;
  597. flag = TTY_FRAME;
  598. }
  599. if (intr_status & uint_st->sirfsoc_parity_err)
  600. flag = TTY_PARITY;
  601. wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
  602. wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
  603. wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
  604. intr_status &= port->read_status_mask;
  605. uart_insert_char(port, intr_status,
  606. uint_en->sirfsoc_rx_oflow_en, 0, flag);
  607. tty_flip_buffer_push(&state->port);
  608. }
  609. recv_char:
  610. if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) &&
  611. (intr_status & SIRFUART_CTS_INT_ST(uint_st)) &&
  612. !sirfport->tx_dma_state) {
  613. cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) &
  614. SIRFUART_AFC_CTS_STATUS;
  615. if (cts_status != 0)
  616. cts_status = 0;
  617. else
  618. cts_status = 1;
  619. uart_handle_cts_change(port, cts_status);
  620. wake_up_interruptible(&state->port.delta_msr_wait);
  621. }
  622. if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
  623. if (intr_status & uint_st->sirfsoc_rx_timeout)
  624. sirfsoc_uart_handle_rx_tmo(sirfport);
  625. if (intr_status & uint_st->sirfsoc_rx_done)
  626. sirfsoc_uart_handle_rx_done(sirfport);
  627. } else {
  628. if (intr_status & SIRFUART_RX_IO_INT_ST(uint_st))
  629. sirfsoc_uart_pio_rx_chars(port,
  630. SIRFSOC_UART_IO_RX_MAX_CNT);
  631. }
  632. if (intr_status & uint_st->sirfsoc_txfifo_empty) {
  633. if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
  634. sirfsoc_uart_tx_with_dma(sirfport);
  635. else {
  636. if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
  637. spin_unlock(&port->lock);
  638. return IRQ_HANDLED;
  639. } else {
  640. sirfsoc_uart_pio_tx_chars(sirfport,
  641. SIRFSOC_UART_IO_TX_REASONABLE_CNT);
  642. if ((uart_circ_empty(xmit)) &&
  643. (rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
  644. ufifo_st->ff_empty(port->line)))
  645. sirfsoc_uart_stop_tx(port);
  646. }
  647. }
  648. }
  649. spin_unlock(&port->lock);
  650. return IRQ_HANDLED;
  651. }
  652. static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param)
  653. {
  654. struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
  655. struct uart_port *port = &sirfport->port;
  656. unsigned long flags;
  657. spin_lock_irqsave(&sirfport->rx_lock, flags);
  658. while (sirfport->rx_completed != sirfport->rx_issued) {
  659. sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
  660. SIRFSOC_RX_DMA_BUF_SIZE);
  661. sirfsoc_rx_submit_one_dma_desc(port, sirfport->rx_completed++);
  662. sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
  663. }
  664. spin_unlock_irqrestore(&sirfport->rx_lock, flags);
  665. }
  666. static void sirfsoc_uart_rx_dma_complete_callback(void *param)
  667. {
  668. struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
  669. spin_lock(&sirfport->rx_lock);
  670. sirfport->rx_issued++;
  671. sirfport->rx_issued %= SIRFSOC_RX_LOOP_BUF_CNT;
  672. spin_unlock(&sirfport->rx_lock);
  673. tasklet_schedule(&sirfport->rx_dma_complete_tasklet);
  674. }
  675. /* submit rx dma task into dmaengine */
  676. static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
  677. {
  678. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  679. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  680. struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
  681. unsigned long flags;
  682. int i;
  683. spin_lock_irqsave(&sirfport->rx_lock, flags);
  684. sirfport->rx_io_count = 0;
  685. wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
  686. rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
  687. ~SIRFUART_IO_MODE);
  688. spin_unlock_irqrestore(&sirfport->rx_lock, flags);
  689. for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
  690. sirfsoc_rx_submit_one_dma_desc(port, i);
  691. sirfport->rx_completed = sirfport->rx_issued = 0;
  692. spin_lock_irqsave(&sirfport->rx_lock, flags);
  693. if (!sirfport->is_marco)
  694. wr_regl(port, ureg->sirfsoc_int_en_reg,
  695. rd_regl(port, ureg->sirfsoc_int_en_reg) |
  696. SIRFUART_RX_DMA_INT_EN(port, uint_en));
  697. else
  698. wr_regl(port, ureg->sirfsoc_int_en_reg,
  699. SIRFUART_RX_DMA_INT_EN(port, uint_en));
  700. spin_unlock_irqrestore(&sirfport->rx_lock, flags);
  701. }
  702. static void sirfsoc_uart_start_rx(struct uart_port *port)
  703. {
  704. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  705. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  706. struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
  707. sirfport->rx_io_count = 0;
  708. wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
  709. wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
  710. wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
  711. if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
  712. sirfsoc_uart_start_next_rx_dma(port);
  713. else {
  714. if (!sirfport->is_marco)
  715. wr_regl(port, ureg->sirfsoc_int_en_reg,
  716. rd_regl(port, ureg->sirfsoc_int_en_reg) |
  717. SIRFUART_RX_IO_INT_EN(port, uint_en));
  718. else
  719. wr_regl(port, ureg->sirfsoc_int_en_reg,
  720. SIRFUART_RX_IO_INT_EN(port, uint_en));
  721. }
  722. }
  723. static unsigned int
  724. sirfsoc_usp_calc_sample_div(unsigned long set_rate,
  725. unsigned long ioclk_rate, unsigned long *sample_reg)
  726. {
  727. unsigned long min_delta = ~0UL;
  728. unsigned short sample_div;
  729. unsigned long ioclk_div = 0;
  730. unsigned long temp_delta;
  731. for (sample_div = SIRF_MIN_SAMPLE_DIV;
  732. sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
  733. temp_delta = ioclk_rate -
  734. (ioclk_rate + (set_rate * sample_div) / 2)
  735. / (set_rate * sample_div) * set_rate * sample_div;
  736. temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
  737. if (temp_delta < min_delta) {
  738. ioclk_div = (2 * ioclk_rate /
  739. (set_rate * sample_div) + 1) / 2 - 1;
  740. if (ioclk_div > SIRF_IOCLK_DIV_MAX)
  741. continue;
  742. min_delta = temp_delta;
  743. *sample_reg = sample_div;
  744. if (!temp_delta)
  745. break;
  746. }
  747. }
  748. return ioclk_div;
  749. }
  750. static unsigned int
  751. sirfsoc_uart_calc_sample_div(unsigned long baud_rate,
  752. unsigned long ioclk_rate, unsigned long *set_baud)
  753. {
  754. unsigned long min_delta = ~0UL;
  755. unsigned short sample_div;
  756. unsigned int regv = 0;
  757. unsigned long ioclk_div;
  758. unsigned long baud_tmp;
  759. int temp_delta;
  760. for (sample_div = SIRF_MIN_SAMPLE_DIV;
  761. sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
  762. ioclk_div = (ioclk_rate / (baud_rate * (sample_div + 1))) - 1;
  763. if (ioclk_div > SIRF_IOCLK_DIV_MAX)
  764. continue;
  765. baud_tmp = ioclk_rate / ((ioclk_div + 1) * (sample_div + 1));
  766. temp_delta = baud_tmp - baud_rate;
  767. temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
  768. if (temp_delta < min_delta) {
  769. regv = regv & (~SIRF_IOCLK_DIV_MASK);
  770. regv = regv | ioclk_div;
  771. regv = regv & (~SIRF_SAMPLE_DIV_MASK);
  772. regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT);
  773. min_delta = temp_delta;
  774. *set_baud = baud_tmp;
  775. }
  776. }
  777. return regv;
  778. }
  779. static void sirfsoc_uart_set_termios(struct uart_port *port,
  780. struct ktermios *termios,
  781. struct ktermios *old)
  782. {
  783. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  784. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  785. struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
  786. unsigned long config_reg = 0;
  787. unsigned long baud_rate;
  788. unsigned long set_baud;
  789. unsigned long flags;
  790. unsigned long ic;
  791. unsigned int clk_div_reg = 0;
  792. unsigned long txfifo_op_reg, ioclk_rate;
  793. unsigned long rx_time_out;
  794. int threshold_div;
  795. u32 data_bit_len, stop_bit_len, len_val;
  796. unsigned long sample_div_reg = 0xf;
  797. ioclk_rate = port->uartclk;
  798. switch (termios->c_cflag & CSIZE) {
  799. default:
  800. case CS8:
  801. data_bit_len = 8;
  802. config_reg |= SIRFUART_DATA_BIT_LEN_8;
  803. break;
  804. case CS7:
  805. data_bit_len = 7;
  806. config_reg |= SIRFUART_DATA_BIT_LEN_7;
  807. break;
  808. case CS6:
  809. data_bit_len = 6;
  810. config_reg |= SIRFUART_DATA_BIT_LEN_6;
  811. break;
  812. case CS5:
  813. data_bit_len = 5;
  814. config_reg |= SIRFUART_DATA_BIT_LEN_5;
  815. break;
  816. }
  817. if (termios->c_cflag & CSTOPB) {
  818. config_reg |= SIRFUART_STOP_BIT_LEN_2;
  819. stop_bit_len = 2;
  820. } else
  821. stop_bit_len = 1;
  822. spin_lock_irqsave(&port->lock, flags);
  823. port->read_status_mask = uint_en->sirfsoc_rx_oflow_en;
  824. port->ignore_status_mask = 0;
  825. if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
  826. if (termios->c_iflag & INPCK)
  827. port->read_status_mask |= uint_en->sirfsoc_frm_err_en |
  828. uint_en->sirfsoc_parity_err_en;
  829. } else {
  830. if (termios->c_iflag & INPCK)
  831. port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
  832. }
  833. if (termios->c_iflag & (BRKINT | PARMRK))
  834. port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
  835. if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
  836. if (termios->c_iflag & IGNPAR)
  837. port->ignore_status_mask |=
  838. uint_en->sirfsoc_frm_err_en |
  839. uint_en->sirfsoc_parity_err_en;
  840. if (termios->c_cflag & PARENB) {
  841. if (termios->c_cflag & CMSPAR) {
  842. if (termios->c_cflag & PARODD)
  843. config_reg |= SIRFUART_STICK_BIT_MARK;
  844. else
  845. config_reg |= SIRFUART_STICK_BIT_SPACE;
  846. } else if (termios->c_cflag & PARODD) {
  847. config_reg |= SIRFUART_STICK_BIT_ODD;
  848. } else {
  849. config_reg |= SIRFUART_STICK_BIT_EVEN;
  850. }
  851. }
  852. } else {
  853. if (termios->c_iflag & IGNPAR)
  854. port->ignore_status_mask |=
  855. uint_en->sirfsoc_frm_err_en;
  856. if (termios->c_cflag & PARENB)
  857. dev_warn(port->dev,
  858. "USP-UART not support parity err\n");
  859. }
  860. if (termios->c_iflag & IGNBRK) {
  861. port->ignore_status_mask |=
  862. uint_en->sirfsoc_rxd_brk_en;
  863. if (termios->c_iflag & IGNPAR)
  864. port->ignore_status_mask |=
  865. uint_en->sirfsoc_rx_oflow_en;
  866. }
  867. if ((termios->c_cflag & CREAD) == 0)
  868. port->ignore_status_mask |= SIRFUART_DUMMY_READ;
  869. /* Hardware Flow Control Settings */
  870. if (UART_ENABLE_MS(port, termios->c_cflag)) {
  871. if (!sirfport->ms_enabled)
  872. sirfsoc_uart_enable_ms(port);
  873. } else {
  874. if (sirfport->ms_enabled)
  875. sirfsoc_uart_disable_ms(port);
  876. }
  877. baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000);
  878. if (ioclk_rate == 150000000) {
  879. for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
  880. if (baud_rate == baudrate_to_regv[ic].baud_rate)
  881. clk_div_reg = baudrate_to_regv[ic].reg_val;
  882. }
  883. set_baud = baud_rate;
  884. if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
  885. if (unlikely(clk_div_reg == 0))
  886. clk_div_reg = sirfsoc_uart_calc_sample_div(baud_rate,
  887. ioclk_rate, &set_baud);
  888. wr_regl(port, ureg->sirfsoc_divisor, clk_div_reg);
  889. } else {
  890. clk_div_reg = sirfsoc_usp_calc_sample_div(baud_rate,
  891. ioclk_rate, &sample_div_reg);
  892. sample_div_reg--;
  893. set_baud = ((ioclk_rate / (clk_div_reg+1) - 1) /
  894. (sample_div_reg + 1));
  895. /* setting usp mode 2 */
  896. len_val = ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET) |
  897. (1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET));
  898. len_val |= ((clk_div_reg & SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK)
  899. << SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET);
  900. wr_regl(port, ureg->sirfsoc_mode2, len_val);
  901. }
  902. if (tty_termios_baud_rate(termios))
  903. tty_termios_encode_baud_rate(termios, set_baud, set_baud);
  904. /* set receive timeout && data bits len */
  905. rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000);
  906. rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out);
  907. txfifo_op_reg = rd_regl(port, ureg->sirfsoc_tx_fifo_op);
  908. wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_STOP);
  909. wr_regl(port, ureg->sirfsoc_tx_fifo_op,
  910. (txfifo_op_reg & ~SIRFUART_FIFO_START));
  911. if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
  912. config_reg |= SIRFUART_RECV_TIMEOUT(port, rx_time_out);
  913. wr_regl(port, ureg->sirfsoc_line_ctrl, config_reg);
  914. } else {
  915. /*tx frame ctrl*/
  916. len_val = (data_bit_len - 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET;
  917. len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
  918. SIRFSOC_USP_TX_FRAME_LEN_OFFSET;
  919. len_val |= ((data_bit_len - 1) <<
  920. SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET);
  921. len_val |= (((clk_div_reg & 0xc00) >> 10) <<
  922. SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET);
  923. wr_regl(port, ureg->sirfsoc_tx_frame_ctrl, len_val);
  924. /*rx frame ctrl*/
  925. len_val = (data_bit_len - 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET;
  926. len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
  927. SIRFSOC_USP_RX_FRAME_LEN_OFFSET;
  928. len_val |= (data_bit_len - 1) <<
  929. SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET;
  930. len_val |= (((clk_div_reg & 0xf000) >> 12) <<
  931. SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET);
  932. wr_regl(port, ureg->sirfsoc_rx_frame_ctrl, len_val);
  933. /*async param*/
  934. wr_regl(port, ureg->sirfsoc_async_param_reg,
  935. (SIRFUART_RECV_TIMEOUT(port, rx_time_out)) |
  936. (sample_div_reg & SIRFSOC_USP_ASYNC_DIV2_MASK) <<
  937. SIRFSOC_USP_ASYNC_DIV2_OFFSET);
  938. }
  939. if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
  940. wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_DMA_MODE);
  941. else
  942. wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE);
  943. if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
  944. wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE);
  945. else
  946. wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE);
  947. /* Reset Rx/Tx FIFO Threshold level for proper baudrate */
  948. if (set_baud < 1000000)
  949. threshold_div = 1;
  950. else
  951. threshold_div = 2;
  952. wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl,
  953. SIRFUART_FIFO_THD(port) / threshold_div);
  954. wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl,
  955. SIRFUART_FIFO_THD(port) / threshold_div);
  956. txfifo_op_reg |= SIRFUART_FIFO_START;
  957. wr_regl(port, ureg->sirfsoc_tx_fifo_op, txfifo_op_reg);
  958. uart_update_timeout(port, termios->c_cflag, set_baud);
  959. sirfsoc_uart_start_rx(port);
  960. wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN);
  961. spin_unlock_irqrestore(&port->lock, flags);
  962. }
  963. static unsigned int sirfsoc_uart_init_tx_dma(struct uart_port *port)
  964. {
  965. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  966. dma_cap_mask_t dma_mask;
  967. struct dma_slave_config tx_slv_cfg = {
  968. .dst_maxburst = 2,
  969. };
  970. dma_cap_zero(dma_mask);
  971. dma_cap_set(DMA_SLAVE, dma_mask);
  972. sirfport->tx_dma_chan = dma_request_channel(dma_mask,
  973. (dma_filter_fn)sirfsoc_dma_filter_id,
  974. (void *)sirfport->tx_dma_no);
  975. if (!sirfport->tx_dma_chan) {
  976. dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
  977. sirfport->tx_dma_no);
  978. return -EPROBE_DEFER;
  979. }
  980. dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
  981. return 0;
  982. }
  983. static unsigned int sirfsoc_uart_init_rx_dma(struct uart_port *port)
  984. {
  985. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  986. dma_cap_mask_t dma_mask;
  987. int ret;
  988. int i, j;
  989. struct dma_slave_config slv_cfg = {
  990. .src_maxburst = 2,
  991. };
  992. dma_cap_zero(dma_mask);
  993. dma_cap_set(DMA_SLAVE, dma_mask);
  994. sirfport->rx_dma_chan = dma_request_channel(dma_mask,
  995. (dma_filter_fn)sirfsoc_dma_filter_id,
  996. (void *)sirfport->rx_dma_no);
  997. if (!sirfport->rx_dma_chan) {
  998. dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
  999. sirfport->rx_dma_no);
  1000. ret = -EPROBE_DEFER;
  1001. goto request_err;
  1002. }
  1003. for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) {
  1004. sirfport->rx_dma_items[i].xmit.buf =
  1005. dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
  1006. &sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL);
  1007. if (!sirfport->rx_dma_items[i].xmit.buf) {
  1008. dev_err(port->dev, "Uart alloc bufa failed\n");
  1009. ret = -ENOMEM;
  1010. goto alloc_coherent_err;
  1011. }
  1012. sirfport->rx_dma_items[i].xmit.head =
  1013. sirfport->rx_dma_items[i].xmit.tail = 0;
  1014. }
  1015. dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
  1016. return 0;
  1017. alloc_coherent_err:
  1018. for (j = 0; j < i; j++)
  1019. dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
  1020. sirfport->rx_dma_items[j].xmit.buf,
  1021. sirfport->rx_dma_items[j].dma_addr);
  1022. dma_release_channel(sirfport->rx_dma_chan);
  1023. request_err:
  1024. return ret;
  1025. }
  1026. static void sirfsoc_uart_uninit_tx_dma(struct sirfsoc_uart_port *sirfport)
  1027. {
  1028. dmaengine_terminate_all(sirfport->tx_dma_chan);
  1029. dma_release_channel(sirfport->tx_dma_chan);
  1030. }
  1031. static void sirfsoc_uart_uninit_rx_dma(struct sirfsoc_uart_port *sirfport)
  1032. {
  1033. int i;
  1034. struct uart_port *port = &sirfport->port;
  1035. dmaengine_terminate_all(sirfport->rx_dma_chan);
  1036. dma_release_channel(sirfport->rx_dma_chan);
  1037. for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
  1038. dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
  1039. sirfport->rx_dma_items[i].xmit.buf,
  1040. sirfport->rx_dma_items[i].dma_addr);
  1041. }
  1042. static int sirfsoc_uart_startup(struct uart_port *port)
  1043. {
  1044. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  1045. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  1046. unsigned int index = port->line;
  1047. int ret;
  1048. set_irq_flags(port->irq, IRQF_VALID | IRQF_NOAUTOEN);
  1049. ret = request_irq(port->irq,
  1050. sirfsoc_uart_isr,
  1051. 0,
  1052. SIRFUART_PORT_NAME,
  1053. sirfport);
  1054. if (ret != 0) {
  1055. dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n",
  1056. index, port->irq);
  1057. goto irq_err;
  1058. }
  1059. /* initial hardware settings */
  1060. wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
  1061. rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl) |
  1062. SIRFUART_IO_MODE);
  1063. wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
  1064. rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
  1065. SIRFUART_IO_MODE);
  1066. wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0);
  1067. wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0);
  1068. wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN);
  1069. if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
  1070. wr_regl(port, ureg->sirfsoc_mode1,
  1071. SIRFSOC_USP_ENDIAN_CTRL_LSBF |
  1072. SIRFSOC_USP_EN);
  1073. wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_RESET);
  1074. wr_regl(port, ureg->sirfsoc_tx_fifo_op, 0);
  1075. wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
  1076. wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
  1077. wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port));
  1078. wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port));
  1079. if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
  1080. ret = sirfsoc_uart_init_rx_dma(port);
  1081. if (ret)
  1082. goto init_rx_err;
  1083. wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk,
  1084. SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) |
  1085. SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) |
  1086. SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b));
  1087. }
  1088. if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
  1089. sirfsoc_uart_init_tx_dma(port);
  1090. sirfport->tx_dma_state = TX_DMA_IDLE;
  1091. wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk,
  1092. SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) |
  1093. SIRFUART_TX_FIFO_CHK_LC(port->line, 0xe) |
  1094. SIRFUART_TX_FIFO_CHK_HC(port->line, 0x4));
  1095. }
  1096. sirfport->ms_enabled = false;
  1097. if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
  1098. sirfport->hw_flow_ctrl) {
  1099. set_irq_flags(gpio_to_irq(sirfport->cts_gpio),
  1100. IRQF_VALID | IRQF_NOAUTOEN);
  1101. ret = request_irq(gpio_to_irq(sirfport->cts_gpio),
  1102. sirfsoc_uart_usp_cts_handler, IRQF_TRIGGER_FALLING |
  1103. IRQF_TRIGGER_RISING, "usp_cts_irq", sirfport);
  1104. if (ret != 0) {
  1105. dev_err(port->dev, "UART-USP:request gpio irq fail\n");
  1106. goto init_rx_err;
  1107. }
  1108. }
  1109. enable_irq(port->irq);
  1110. return 0;
  1111. init_rx_err:
  1112. free_irq(port->irq, sirfport);
  1113. irq_err:
  1114. return ret;
  1115. }
  1116. static void sirfsoc_uart_shutdown(struct uart_port *port)
  1117. {
  1118. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  1119. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  1120. if (!sirfport->is_marco)
  1121. wr_regl(port, ureg->sirfsoc_int_en_reg, 0);
  1122. else
  1123. wr_regl(port, SIRFUART_INT_EN_CLR, ~0UL);
  1124. free_irq(port->irq, sirfport);
  1125. if (sirfport->ms_enabled)
  1126. sirfsoc_uart_disable_ms(port);
  1127. if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
  1128. sirfport->hw_flow_ctrl) {
  1129. gpio_set_value(sirfport->rts_gpio, 1);
  1130. free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport);
  1131. }
  1132. if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
  1133. sirfsoc_uart_uninit_rx_dma(sirfport);
  1134. if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
  1135. sirfsoc_uart_uninit_tx_dma(sirfport);
  1136. sirfport->tx_dma_state = TX_DMA_IDLE;
  1137. }
  1138. }
  1139. static const char *sirfsoc_uart_type(struct uart_port *port)
  1140. {
  1141. return port->type == SIRFSOC_PORT_TYPE ? SIRFUART_PORT_NAME : NULL;
  1142. }
  1143. static int sirfsoc_uart_request_port(struct uart_port *port)
  1144. {
  1145. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  1146. struct sirfsoc_uart_param *uart_param = &sirfport->uart_reg->uart_param;
  1147. void *ret;
  1148. ret = request_mem_region(port->mapbase,
  1149. SIRFUART_MAP_SIZE, uart_param->port_name);
  1150. return ret ? 0 : -EBUSY;
  1151. }
  1152. static void sirfsoc_uart_release_port(struct uart_port *port)
  1153. {
  1154. release_mem_region(port->mapbase, SIRFUART_MAP_SIZE);
  1155. }
  1156. static void sirfsoc_uart_config_port(struct uart_port *port, int flags)
  1157. {
  1158. if (flags & UART_CONFIG_TYPE) {
  1159. port->type = SIRFSOC_PORT_TYPE;
  1160. sirfsoc_uart_request_port(port);
  1161. }
  1162. }
  1163. static struct uart_ops sirfsoc_uart_ops = {
  1164. .tx_empty = sirfsoc_uart_tx_empty,
  1165. .get_mctrl = sirfsoc_uart_get_mctrl,
  1166. .set_mctrl = sirfsoc_uart_set_mctrl,
  1167. .stop_tx = sirfsoc_uart_stop_tx,
  1168. .start_tx = sirfsoc_uart_start_tx,
  1169. .stop_rx = sirfsoc_uart_stop_rx,
  1170. .enable_ms = sirfsoc_uart_enable_ms,
  1171. .break_ctl = sirfsoc_uart_break_ctl,
  1172. .startup = sirfsoc_uart_startup,
  1173. .shutdown = sirfsoc_uart_shutdown,
  1174. .set_termios = sirfsoc_uart_set_termios,
  1175. .type = sirfsoc_uart_type,
  1176. .release_port = sirfsoc_uart_release_port,
  1177. .request_port = sirfsoc_uart_request_port,
  1178. .config_port = sirfsoc_uart_config_port,
  1179. };
  1180. #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
  1181. static int __init
  1182. sirfsoc_uart_console_setup(struct console *co, char *options)
  1183. {
  1184. unsigned int baud = 115200;
  1185. unsigned int bits = 8;
  1186. unsigned int parity = 'n';
  1187. unsigned int flow = 'n';
  1188. struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
  1189. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  1190. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  1191. if (co->index < 0 || co->index >= SIRFSOC_UART_NR)
  1192. return -EINVAL;
  1193. if (!port->mapbase)
  1194. return -ENODEV;
  1195. /* enable usp in mode1 register */
  1196. if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
  1197. wr_regl(port, ureg->sirfsoc_mode1, SIRFSOC_USP_EN |
  1198. SIRFSOC_USP_ENDIAN_CTRL_LSBF);
  1199. if (options)
  1200. uart_parse_options(options, &baud, &parity, &bits, &flow);
  1201. port->cons = co;
  1202. /* default console tx/rx transfer using io mode */
  1203. sirfport->rx_dma_no = UNVALID_DMA_CHAN;
  1204. sirfport->tx_dma_no = UNVALID_DMA_CHAN;
  1205. return uart_set_options(port, co, baud, parity, bits, flow);
  1206. }
  1207. static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
  1208. {
  1209. struct sirfsoc_uart_port *sirfport = to_sirfport(port);
  1210. struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
  1211. struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
  1212. while (rd_regl(port,
  1213. ureg->sirfsoc_tx_fifo_status) & ufifo_st->ff_full(port->line))
  1214. cpu_relax();
  1215. wr_regb(port, ureg->sirfsoc_tx_fifo_data, ch);
  1216. }
  1217. static void sirfsoc_uart_console_write(struct console *co, const char *s,
  1218. unsigned int count)
  1219. {
  1220. struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
  1221. uart_console_write(port, s, count, sirfsoc_uart_console_putchar);
  1222. }
  1223. static struct console sirfsoc_uart_console = {
  1224. .name = SIRFSOC_UART_NAME,
  1225. .device = uart_console_device,
  1226. .flags = CON_PRINTBUFFER,
  1227. .index = -1,
  1228. .write = sirfsoc_uart_console_write,
  1229. .setup = sirfsoc_uart_console_setup,
  1230. .data = &sirfsoc_uart_drv,
  1231. };
  1232. static int __init sirfsoc_uart_console_init(void)
  1233. {
  1234. register_console(&sirfsoc_uart_console);
  1235. return 0;
  1236. }
  1237. console_initcall(sirfsoc_uart_console_init);
  1238. #endif
  1239. static struct uart_driver sirfsoc_uart_drv = {
  1240. .owner = THIS_MODULE,
  1241. .driver_name = SIRFUART_PORT_NAME,
  1242. .nr = SIRFSOC_UART_NR,
  1243. .dev_name = SIRFSOC_UART_NAME,
  1244. .major = SIRFSOC_UART_MAJOR,
  1245. .minor = SIRFSOC_UART_MINOR,
  1246. #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
  1247. .cons = &sirfsoc_uart_console,
  1248. #else
  1249. .cons = NULL,
  1250. #endif
  1251. };
  1252. static struct of_device_id sirfsoc_uart_ids[] = {
  1253. { .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,},
  1254. { .compatible = "sirf,marco-uart", .data = &sirfsoc_uart},
  1255. { .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp},
  1256. {}
  1257. };
  1258. MODULE_DEVICE_TABLE(of, sirfsoc_uart_ids);
  1259. static int sirfsoc_uart_probe(struct platform_device *pdev)
  1260. {
  1261. struct sirfsoc_uart_port *sirfport;
  1262. struct uart_port *port;
  1263. struct resource *res;
  1264. int ret;
  1265. const struct of_device_id *match;
  1266. match = of_match_node(sirfsoc_uart_ids, pdev->dev.of_node);
  1267. if (of_property_read_u32(pdev->dev.of_node, "cell-index", &pdev->id)) {
  1268. dev_err(&pdev->dev,
  1269. "Unable to find cell-index in uart node.\n");
  1270. ret = -EFAULT;
  1271. goto err;
  1272. }
  1273. if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart"))
  1274. pdev->id += ((struct sirfsoc_uart_register *)
  1275. match->data)->uart_param.register_uart_nr;
  1276. sirfport = &sirfsoc_uart_ports[pdev->id];
  1277. port = &sirfport->port;
  1278. port->dev = &pdev->dev;
  1279. port->private_data = sirfport;
  1280. sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data;
  1281. sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node,
  1282. "sirf,uart-has-rtscts");
  1283. if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart")) {
  1284. sirfport->uart_reg->uart_type = SIRF_REAL_UART;
  1285. if (of_property_read_u32(pdev->dev.of_node,
  1286. "sirf,uart-dma-rx-channel",
  1287. &sirfport->rx_dma_no))
  1288. sirfport->rx_dma_no = UNVALID_DMA_CHAN;
  1289. if (of_property_read_u32(pdev->dev.of_node,
  1290. "sirf,uart-dma-tx-channel",
  1291. &sirfport->tx_dma_no))
  1292. sirfport->tx_dma_no = UNVALID_DMA_CHAN;
  1293. }
  1294. if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) {
  1295. sirfport->uart_reg->uart_type = SIRF_USP_UART;
  1296. if (of_property_read_u32(pdev->dev.of_node,
  1297. "sirf,usp-dma-rx-channel",
  1298. &sirfport->rx_dma_no))
  1299. sirfport->rx_dma_no = UNVALID_DMA_CHAN;
  1300. if (of_property_read_u32(pdev->dev.of_node,
  1301. "sirf,usp-dma-tx-channel",
  1302. &sirfport->tx_dma_no))
  1303. sirfport->tx_dma_no = UNVALID_DMA_CHAN;
  1304. if (!sirfport->hw_flow_ctrl)
  1305. goto usp_no_flow_control;
  1306. if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL))
  1307. sirfport->cts_gpio = of_get_named_gpio(
  1308. pdev->dev.of_node, "cts-gpios", 0);
  1309. else
  1310. sirfport->cts_gpio = -1;
  1311. if (of_find_property(pdev->dev.of_node, "rts-gpios", NULL))
  1312. sirfport->rts_gpio = of_get_named_gpio(
  1313. pdev->dev.of_node, "rts-gpios", 0);
  1314. else
  1315. sirfport->rts_gpio = -1;
  1316. if ((!gpio_is_valid(sirfport->cts_gpio) ||
  1317. !gpio_is_valid(sirfport->rts_gpio))) {
  1318. ret = -EINVAL;
  1319. dev_err(&pdev->dev,
  1320. "Usp flow control must have cts and rts gpio");
  1321. goto err;
  1322. }
  1323. ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio,
  1324. "usp-cts-gpio");
  1325. if (ret) {
  1326. dev_err(&pdev->dev, "Unable request cts gpio");
  1327. goto err;
  1328. }
  1329. gpio_direction_input(sirfport->cts_gpio);
  1330. ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio,
  1331. "usp-rts-gpio");
  1332. if (ret) {
  1333. dev_err(&pdev->dev, "Unable request rts gpio");
  1334. goto err;
  1335. }
  1336. gpio_direction_output(sirfport->rts_gpio, 1);
  1337. }
  1338. usp_no_flow_control:
  1339. if (of_device_is_compatible(pdev->dev.of_node, "sirf,marco-uart"))
  1340. sirfport->is_marco = true;
  1341. if (of_property_read_u32(pdev->dev.of_node,
  1342. "fifosize",
  1343. &port->fifosize)) {
  1344. dev_err(&pdev->dev,
  1345. "Unable to find fifosize in uart node.\n");
  1346. ret = -EFAULT;
  1347. goto err;
  1348. }
  1349. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1350. if (res == NULL) {
  1351. dev_err(&pdev->dev, "Insufficient resources.\n");
  1352. ret = -EFAULT;
  1353. goto err;
  1354. }
  1355. spin_lock_init(&sirfport->rx_lock);
  1356. spin_lock_init(&sirfport->tx_lock);
  1357. tasklet_init(&sirfport->rx_dma_complete_tasklet,
  1358. sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport);
  1359. tasklet_init(&sirfport->rx_tmo_process_tasklet,
  1360. sirfsoc_rx_tmo_process_tl, (unsigned long)sirfport);
  1361. port->mapbase = res->start;
  1362. port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
  1363. if (!port->membase) {
  1364. dev_err(&pdev->dev, "Cannot remap resource.\n");
  1365. ret = -ENOMEM;
  1366. goto err;
  1367. }
  1368. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1369. if (res == NULL) {
  1370. dev_err(&pdev->dev, "Insufficient resources.\n");
  1371. ret = -EFAULT;
  1372. goto err;
  1373. }
  1374. port->irq = res->start;
  1375. sirfport->clk = clk_get(&pdev->dev, NULL);
  1376. if (IS_ERR(sirfport->clk)) {
  1377. ret = PTR_ERR(sirfport->clk);
  1378. goto err;
  1379. }
  1380. clk_prepare_enable(sirfport->clk);
  1381. port->uartclk = clk_get_rate(sirfport->clk);
  1382. port->ops = &sirfsoc_uart_ops;
  1383. spin_lock_init(&port->lock);
  1384. platform_set_drvdata(pdev, sirfport);
  1385. ret = uart_add_one_port(&sirfsoc_uart_drv, port);
  1386. if (ret != 0) {
  1387. dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
  1388. goto port_err;
  1389. }
  1390. return 0;
  1391. port_err:
  1392. clk_disable_unprepare(sirfport->clk);
  1393. clk_put(sirfport->clk);
  1394. err:
  1395. return ret;
  1396. }
  1397. static int sirfsoc_uart_remove(struct platform_device *pdev)
  1398. {
  1399. struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
  1400. struct uart_port *port = &sirfport->port;
  1401. clk_disable_unprepare(sirfport->clk);
  1402. clk_put(sirfport->clk);
  1403. uart_remove_one_port(&sirfsoc_uart_drv, port);
  1404. return 0;
  1405. }
  1406. static int
  1407. sirfsoc_uart_suspend(struct platform_device *pdev, pm_message_t state)
  1408. {
  1409. struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
  1410. struct uart_port *port = &sirfport->port;
  1411. uart_suspend_port(&sirfsoc_uart_drv, port);
  1412. return 0;
  1413. }
  1414. static int sirfsoc_uart_resume(struct platform_device *pdev)
  1415. {
  1416. struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
  1417. struct uart_port *port = &sirfport->port;
  1418. uart_resume_port(&sirfsoc_uart_drv, port);
  1419. return 0;
  1420. }
  1421. static struct platform_driver sirfsoc_uart_driver = {
  1422. .probe = sirfsoc_uart_probe,
  1423. .remove = sirfsoc_uart_remove,
  1424. .suspend = sirfsoc_uart_suspend,
  1425. .resume = sirfsoc_uart_resume,
  1426. .driver = {
  1427. .name = SIRFUART_PORT_NAME,
  1428. .owner = THIS_MODULE,
  1429. .of_match_table = sirfsoc_uart_ids,
  1430. },
  1431. };
  1432. static int __init sirfsoc_uart_init(void)
  1433. {
  1434. int ret = 0;
  1435. ret = uart_register_driver(&sirfsoc_uart_drv);
  1436. if (ret)
  1437. goto out;
  1438. ret = platform_driver_register(&sirfsoc_uart_driver);
  1439. if (ret)
  1440. uart_unregister_driver(&sirfsoc_uart_drv);
  1441. out:
  1442. return ret;
  1443. }
  1444. module_init(sirfsoc_uart_init);
  1445. static void __exit sirfsoc_uart_exit(void)
  1446. {
  1447. platform_driver_unregister(&sirfsoc_uart_driver);
  1448. uart_unregister_driver(&sirfsoc_uart_drv);
  1449. }
  1450. module_exit(sirfsoc_uart_exit);
  1451. MODULE_LICENSE("GPL v2");
  1452. MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
  1453. MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");