sync_serial.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554
  1. /*
  2. * Simple synchronous serial port driver for ETRAX FS and Artpec-3.
  3. *
  4. * Copyright (c) 2005 Axis Communications AB
  5. *
  6. * Author: Mikael Starvik
  7. *
  8. */
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/types.h>
  12. #include <linux/errno.h>
  13. #include <linux/major.h>
  14. #include <linux/sched.h>
  15. #include <linux/smp_lock.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/poll.h>
  18. #include <linux/init.h>
  19. #include <linux/timer.h>
  20. #include <linux/spinlock.h>
  21. #include <asm/io.h>
  22. #include <dma.h>
  23. #include <pinmux.h>
  24. #include <hwregs/reg_rdwr.h>
  25. #include <hwregs/sser_defs.h>
  26. #include <hwregs/dma_defs.h>
  27. #include <hwregs/dma.h>
  28. #include <hwregs/intr_vect_defs.h>
  29. #include <hwregs/intr_vect.h>
  30. #include <hwregs/reg_map.h>
  31. #include <asm/sync_serial.h>
  32. /* The receiver is a bit tricky beacuse of the continuous stream of data.*/
  33. /* */
  34. /* Three DMA descriptors are linked together. Each DMA descriptor is */
  35. /* responsible for port->bufchunk of a common buffer. */
  36. /* */
  37. /* +---------------------------------------------+ */
  38. /* | +----------+ +----------+ +----------+ | */
  39. /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */
  40. /* +----------+ +----------+ +----------+ */
  41. /* | | | */
  42. /* v v v */
  43. /* +-------------------------------------+ */
  44. /* | BUFFER | */
  45. /* +-------------------------------------+ */
  46. /* |<- data_avail ->| */
  47. /* readp writep */
  48. /* */
  49. /* If the application keeps up the pace readp will be right after writep.*/
  50. /* If the application can't keep the pace we have to throw away data. */
  51. /* The idea is that readp should be ready with the data pointed out by */
  52. /* Descr[i] when the DMA has filled in Descr[i+1]. */
  53. /* Otherwise we will discard */
  54. /* the rest of the data pointed out by Descr1 and set readp to the start */
  55. /* of Descr2 */
  56. #define SYNC_SERIAL_MAJOR 125
  57. /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
  58. /* words can be handled */
  59. #define IN_BUFFER_SIZE 12288
  60. #define IN_DESCR_SIZE 256
  61. #define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
  62. #define OUT_BUFFER_SIZE 1024*8
  63. #define NBR_OUT_DESCR 8
  64. #define DEFAULT_FRAME_RATE 0
  65. #define DEFAULT_WORD_RATE 7
  66. /* NOTE: Enabling some debug will likely cause overrun or underrun,
  67. * especially if manual mode is use.
  68. */
  69. #define DEBUG(x)
  70. #define DEBUGREAD(x)
  71. #define DEBUGWRITE(x)
  72. #define DEBUGPOLL(x)
  73. #define DEBUGRXINT(x)
  74. #define DEBUGTXINT(x)
  75. #define DEBUGTRDMA(x)
  76. #define DEBUGOUTBUF(x)
  77. typedef struct sync_port
  78. {
  79. reg_scope_instances regi_sser;
  80. reg_scope_instances regi_dmain;
  81. reg_scope_instances regi_dmaout;
  82. char started; /* 1 if port has been started */
  83. char port_nbr; /* Port 0 or 1 */
  84. char busy; /* 1 if port is busy */
  85. char enabled; /* 1 if port is enabled */
  86. char use_dma; /* 1 if port uses dma */
  87. char tr_running;
  88. char init_irqs;
  89. int output;
  90. int input;
  91. /* Next byte to be read by application */
  92. volatile unsigned char *volatile readp;
  93. /* Next byte to be written by etrax */
  94. volatile unsigned char *volatile writep;
  95. unsigned int in_buffer_size;
  96. unsigned int inbufchunk;
  97. unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32)));
  98. unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32)));
  99. unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32)));
  100. struct dma_descr_data* next_rx_desc;
  101. struct dma_descr_data* prev_rx_desc;
  102. /* Pointer to the first available descriptor in the ring,
  103. * unless active_tr_descr == catch_tr_descr and a dma
  104. * transfer is active */
  105. struct dma_descr_data *active_tr_descr;
  106. /* Pointer to the first allocated descriptor in the ring */
  107. struct dma_descr_data *catch_tr_descr;
  108. /* Pointer to the descriptor with the current end-of-list */
  109. struct dma_descr_data *prev_tr_descr;
  110. int full;
  111. /* Pointer to the first byte being read by DMA
  112. * or current position in out_buffer if not using DMA. */
  113. unsigned char *out_rd_ptr;
  114. /* Number of bytes currently locked for being read by DMA */
  115. int out_buf_count;
  116. dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16)));
  117. dma_descr_context in_context __attribute__ ((__aligned__(32)));
  118. dma_descr_data out_descr[NBR_OUT_DESCR]
  119. __attribute__ ((__aligned__(16)));
  120. dma_descr_context out_context __attribute__ ((__aligned__(32)));
  121. wait_queue_head_t out_wait_q;
  122. wait_queue_head_t in_wait_q;
  123. spinlock_t lock;
  124. } sync_port;
  125. static int etrax_sync_serial_init(void);
  126. static void initialize_port(int portnbr);
  127. static inline int sync_data_avail(struct sync_port *port);
  128. static int sync_serial_open(struct inode *, struct file*);
  129. static int sync_serial_release(struct inode*, struct file*);
  130. static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
  131. static int sync_serial_ioctl(struct file *,
  132. unsigned int cmd, unsigned long arg);
  133. static ssize_t sync_serial_write(struct file * file, const char * buf,
  134. size_t count, loff_t *ppos);
  135. static ssize_t sync_serial_read(struct file *file, char *buf,
  136. size_t count, loff_t *ppos);
  137. #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
  138. defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
  139. (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
  140. defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
  141. #define SYNC_SER_DMA
  142. #endif
  143. static void send_word(sync_port* port);
  144. static void start_dma_out(struct sync_port *port, const char *data, int count);
  145. static void start_dma_in(sync_port* port);
  146. #ifdef SYNC_SER_DMA
  147. static irqreturn_t tr_interrupt(int irq, void *dev_id);
  148. static irqreturn_t rx_interrupt(int irq, void *dev_id);
  149. #endif
  150. #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
  151. !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
  152. (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
  153. !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
  154. #define SYNC_SER_MANUAL
  155. #endif
  156. #ifdef SYNC_SER_MANUAL
  157. static irqreturn_t manual_interrupt(int irq, void *dev_id);
  158. #endif
  159. #ifdef CONFIG_ETRAXFS /* ETRAX FS */
  160. #define OUT_DMA_NBR 4
  161. #define IN_DMA_NBR 5
  162. #define PINMUX_SSER pinmux_sser0
  163. #define SYNCSER_INST regi_sser0
  164. #define SYNCSER_INTR_VECT SSER0_INTR_VECT
  165. #define OUT_DMA_INST regi_dma4
  166. #define IN_DMA_INST regi_dma5
  167. #define DMA_OUT_INTR_VECT DMA4_INTR_VECT
  168. #define DMA_IN_INTR_VECT DMA5_INTR_VECT
  169. #define REQ_DMA_SYNCSER dma_sser0
  170. #else /* Artpec-3 */
  171. #define OUT_DMA_NBR 6
  172. #define IN_DMA_NBR 7
  173. #define PINMUX_SSER pinmux_sser
  174. #define SYNCSER_INST regi_sser
  175. #define SYNCSER_INTR_VECT SSER_INTR_VECT
  176. #define OUT_DMA_INST regi_dma6
  177. #define IN_DMA_INST regi_dma7
  178. #define DMA_OUT_INTR_VECT DMA6_INTR_VECT
  179. #define DMA_IN_INTR_VECT DMA7_INTR_VECT
  180. #define REQ_DMA_SYNCSER dma_sser
  181. #endif
  182. /* The ports */
  183. static struct sync_port ports[]=
  184. {
  185. {
  186. .regi_sser = SYNCSER_INST,
  187. .regi_dmaout = OUT_DMA_INST,
  188. .regi_dmain = IN_DMA_INST,
  189. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
  190. .use_dma = 1,
  191. #else
  192. .use_dma = 0,
  193. #endif
  194. }
  195. #ifdef CONFIG_ETRAXFS
  196. ,
  197. {
  198. .regi_sser = regi_sser1,
  199. .regi_dmaout = regi_dma6,
  200. .regi_dmain = regi_dma7,
  201. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
  202. .use_dma = 1,
  203. #else
  204. .use_dma = 0,
  205. #endif
  206. }
  207. #endif
  208. };
  209. #define NBR_PORTS ARRAY_SIZE(ports)
  210. static const struct file_operations sync_serial_fops = {
  211. .owner = THIS_MODULE,
  212. .write = sync_serial_write,
  213. .read = sync_serial_read,
  214. .poll = sync_serial_poll,
  215. .unlocked_ioctl = sync_serial_ioctl,
  216. .open = sync_serial_open,
  217. .release = sync_serial_release
  218. };
  219. static int __init etrax_sync_serial_init(void)
  220. {
  221. ports[0].enabled = 0;
  222. #ifdef CONFIG_ETRAXFS
  223. ports[1].enabled = 0;
  224. #endif
  225. if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial",
  226. &sync_serial_fops) < 0) {
  227. printk(KERN_WARNING
  228. "Unable to get major for synchronous serial port\n");
  229. return -EBUSY;
  230. }
  231. /* Initialize Ports */
  232. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
  233. if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) {
  234. printk(KERN_WARNING
  235. "Unable to alloc pins for synchronous serial port 0\n");
  236. return -EIO;
  237. }
  238. ports[0].enabled = 1;
  239. initialize_port(0);
  240. #endif
  241. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
  242. if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) {
  243. printk(KERN_WARNING
  244. "Unable to alloc pins for synchronous serial port 0\n");
  245. return -EIO;
  246. }
  247. ports[1].enabled = 1;
  248. initialize_port(1);
  249. #endif
  250. #ifdef CONFIG_ETRAXFS
  251. printk(KERN_INFO "ETRAX FS synchronous serial port driver\n");
  252. #else
  253. printk(KERN_INFO "Artpec-3 synchronous serial port driver\n");
  254. #endif
  255. return 0;
  256. }
  257. static void __init initialize_port(int portnbr)
  258. {
  259. int __attribute__((unused)) i;
  260. struct sync_port *port = &ports[portnbr];
  261. reg_sser_rw_cfg cfg = {0};
  262. reg_sser_rw_frm_cfg frm_cfg = {0};
  263. reg_sser_rw_tr_cfg tr_cfg = {0};
  264. reg_sser_rw_rec_cfg rec_cfg = {0};
  265. DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr));
  266. port->port_nbr = portnbr;
  267. port->init_irqs = 1;
  268. port->out_rd_ptr = port->out_buffer;
  269. port->out_buf_count = 0;
  270. port->output = 1;
  271. port->input = 0;
  272. port->readp = port->flip;
  273. port->writep = port->flip;
  274. port->in_buffer_size = IN_BUFFER_SIZE;
  275. port->inbufchunk = IN_DESCR_SIZE;
  276. port->next_rx_desc = &port->in_descr[0];
  277. port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1];
  278. port->prev_rx_desc->eol = 1;
  279. init_waitqueue_head(&port->out_wait_q);
  280. init_waitqueue_head(&port->in_wait_q);
  281. spin_lock_init(&port->lock);
  282. cfg.out_clk_src = regk_sser_intern_clk;
  283. cfg.out_clk_pol = regk_sser_pos;
  284. cfg.clk_od_mode = regk_sser_no;
  285. cfg.clk_dir = regk_sser_out;
  286. cfg.gate_clk = regk_sser_no;
  287. cfg.base_freq = regk_sser_f29_493;
  288. cfg.clk_div = 256;
  289. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  290. frm_cfg.wordrate = DEFAULT_WORD_RATE;
  291. frm_cfg.type = regk_sser_edge;
  292. frm_cfg.frame_pin_dir = regk_sser_out;
  293. frm_cfg.frame_pin_use = regk_sser_frm;
  294. frm_cfg.status_pin_dir = regk_sser_in;
  295. frm_cfg.status_pin_use = regk_sser_hold;
  296. frm_cfg.out_on = regk_sser_tr;
  297. frm_cfg.tr_delay = 1;
  298. REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
  299. tr_cfg.urun_stop = regk_sser_no;
  300. tr_cfg.sample_size = 7;
  301. tr_cfg.sh_dir = regk_sser_msbfirst;
  302. tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
  303. #if 0
  304. tr_cfg.rate_ctrl = regk_sser_bulk;
  305. tr_cfg.data_pin_use = regk_sser_dout;
  306. #else
  307. tr_cfg.rate_ctrl = regk_sser_iso;
  308. tr_cfg.data_pin_use = regk_sser_dout;
  309. #endif
  310. tr_cfg.bulk_wspace = 1;
  311. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  312. rec_cfg.sample_size = 7;
  313. rec_cfg.sh_dir = regk_sser_msbfirst;
  314. rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
  315. rec_cfg.fifo_thr = regk_sser_inf;
  316. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  317. #ifdef SYNC_SER_DMA
  318. /* Setup the descriptor ring for dma out/transmit. */
  319. for (i = 0; i < NBR_OUT_DESCR; i++) {
  320. port->out_descr[i].wait = 0;
  321. port->out_descr[i].intr = 1;
  322. port->out_descr[i].eol = 0;
  323. port->out_descr[i].out_eop = 0;
  324. port->out_descr[i].next =
  325. (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]);
  326. }
  327. /* Create a ring from the list. */
  328. port->out_descr[NBR_OUT_DESCR-1].next =
  329. (dma_descr_data *)virt_to_phys(&port->out_descr[0]);
  330. /* Setup context for traversing the ring. */
  331. port->active_tr_descr = &port->out_descr[0];
  332. port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1];
  333. port->catch_tr_descr = &port->out_descr[0];
  334. #endif
  335. }
  336. static inline int sync_data_avail(struct sync_port *port)
  337. {
  338. int avail;
  339. unsigned char *start;
  340. unsigned char *end;
  341. start = (unsigned char*)port->readp; /* cast away volatile */
  342. end = (unsigned char*)port->writep; /* cast away volatile */
  343. /* 0123456789 0123456789
  344. * ----- - -----
  345. * ^rp ^wp ^wp ^rp
  346. */
  347. if (end >= start)
  348. avail = end - start;
  349. else
  350. avail = port->in_buffer_size - (start - end);
  351. return avail;
  352. }
  353. static inline int sync_data_avail_to_end(struct sync_port *port)
  354. {
  355. int avail;
  356. unsigned char *start;
  357. unsigned char *end;
  358. start = (unsigned char*)port->readp; /* cast away volatile */
  359. end = (unsigned char*)port->writep; /* cast away volatile */
  360. /* 0123456789 0123456789
  361. * ----- -----
  362. * ^rp ^wp ^wp ^rp
  363. */
  364. if (end >= start)
  365. avail = end - start;
  366. else
  367. avail = port->flip + port->in_buffer_size - start;
  368. return avail;
  369. }
  370. static int sync_serial_open(struct inode *inode, struct file *file)
  371. {
  372. int dev = iminor(inode);
  373. int ret = -EBUSY;
  374. sync_port *port;
  375. reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
  376. reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
  377. lock_kernel();
  378. DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
  379. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
  380. {
  381. DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev));
  382. ret = -ENODEV;
  383. goto out;
  384. }
  385. port = &ports[dev];
  386. /* Allow open this device twice (assuming one reader and one writer) */
  387. if (port->busy == 2)
  388. {
  389. DEBUG(printk(KERN_DEBUG "Device is busy.. \n"));
  390. goto out;
  391. }
  392. if (port->init_irqs) {
  393. if (port->use_dma) {
  394. if (port == &ports[0]) {
  395. #ifdef SYNC_SER_DMA
  396. if (request_irq(DMA_OUT_INTR_VECT,
  397. tr_interrupt,
  398. 0,
  399. "synchronous serial 0 dma tr",
  400. &ports[0])) {
  401. printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
  402. goto out;
  403. } else if (request_irq(DMA_IN_INTR_VECT,
  404. rx_interrupt,
  405. 0,
  406. "synchronous serial 1 dma rx",
  407. &ports[0])) {
  408. free_irq(DMA_OUT_INTR_VECT, &port[0]);
  409. printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
  410. goto out;
  411. } else if (crisv32_request_dma(OUT_DMA_NBR,
  412. "synchronous serial 0 dma tr",
  413. DMA_VERBOSE_ON_ERROR,
  414. 0,
  415. REQ_DMA_SYNCSER)) {
  416. free_irq(DMA_OUT_INTR_VECT, &port[0]);
  417. free_irq(DMA_IN_INTR_VECT, &port[0]);
  418. printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
  419. goto out;
  420. } else if (crisv32_request_dma(IN_DMA_NBR,
  421. "synchronous serial 0 dma rec",
  422. DMA_VERBOSE_ON_ERROR,
  423. 0,
  424. REQ_DMA_SYNCSER)) {
  425. crisv32_free_dma(OUT_DMA_NBR);
  426. free_irq(DMA_OUT_INTR_VECT, &port[0]);
  427. free_irq(DMA_IN_INTR_VECT, &port[0]);
  428. printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
  429. goto out;
  430. }
  431. #endif
  432. }
  433. #ifdef CONFIG_ETRAXFS
  434. else if (port == &ports[1]) {
  435. #ifdef SYNC_SER_DMA
  436. if (request_irq(DMA6_INTR_VECT,
  437. tr_interrupt,
  438. 0,
  439. "synchronous serial 1 dma tr",
  440. &ports[1])) {
  441. printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
  442. goto out;
  443. } else if (request_irq(DMA7_INTR_VECT,
  444. rx_interrupt,
  445. 0,
  446. "synchronous serial 1 dma rx",
  447. &ports[1])) {
  448. free_irq(DMA6_INTR_VECT, &ports[1]);
  449. printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
  450. goto out;
  451. } else if (crisv32_request_dma(
  452. SYNC_SER1_TX_DMA_NBR,
  453. "synchronous serial 1 dma tr",
  454. DMA_VERBOSE_ON_ERROR,
  455. 0,
  456. dma_sser1)) {
  457. free_irq(DMA6_INTR_VECT, &ports[1]);
  458. free_irq(DMA7_INTR_VECT, &ports[1]);
  459. printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
  460. goto out;
  461. } else if (crisv32_request_dma(
  462. SYNC_SER1_RX_DMA_NBR,
  463. "synchronous serial 3 dma rec",
  464. DMA_VERBOSE_ON_ERROR,
  465. 0,
  466. dma_sser1)) {
  467. crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
  468. free_irq(DMA6_INTR_VECT, &ports[1]);
  469. free_irq(DMA7_INTR_VECT, &ports[1]);
  470. printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
  471. goto out;
  472. }
  473. #endif
  474. }
  475. #endif
  476. /* Enable DMAs */
  477. REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
  478. REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
  479. /* Enable DMA IRQs */
  480. REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
  481. REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
  482. /* Set up wordsize = 1 for DMAs. */
  483. DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
  484. DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
  485. start_dma_in(port);
  486. port->init_irqs = 0;
  487. } else { /* !port->use_dma */
  488. #ifdef SYNC_SER_MANUAL
  489. if (port == &ports[0]) {
  490. if (request_irq(SYNCSER_INTR_VECT,
  491. manual_interrupt,
  492. 0,
  493. "synchronous serial manual irq",
  494. &ports[0])) {
  495. printk("Can't allocate sync serial manual irq");
  496. goto out;
  497. }
  498. }
  499. #ifdef CONFIG_ETRAXFS
  500. else if (port == &ports[1]) {
  501. if (request_irq(SSER1_INTR_VECT,
  502. manual_interrupt,
  503. 0,
  504. "synchronous serial manual irq",
  505. &ports[1])) {
  506. printk(KERN_CRIT "Can't allocate sync serial manual irq");
  507. goto out;
  508. }
  509. }
  510. #endif
  511. port->init_irqs = 0;
  512. #else
  513. panic("sync_serial: Manual mode not supported.\n");
  514. #endif /* SYNC_SER_MANUAL */
  515. }
  516. } /* port->init_irqs */
  517. port->busy++;
  518. ret = 0;
  519. out:
  520. unlock_kernel();
  521. return ret;
  522. }
  523. static int sync_serial_release(struct inode *inode, struct file *file)
  524. {
  525. int dev = iminor(inode);
  526. sync_port *port;
  527. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
  528. {
  529. DEBUG(printk("Invalid minor %d\n", dev));
  530. return -ENODEV;
  531. }
  532. port = &ports[dev];
  533. if (port->busy)
  534. port->busy--;
  535. if (!port->busy)
  536. /* XXX */ ;
  537. return 0;
  538. }
  539. static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
  540. {
  541. int dev = iminor(file->f_path.dentry->d_inode);
  542. unsigned int mask = 0;
  543. sync_port *port;
  544. DEBUGPOLL( static unsigned int prev_mask = 0; );
  545. port = &ports[dev];
  546. if (!port->started) {
  547. reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  548. reg_sser_rw_rec_cfg rec_cfg =
  549. REG_RD(sser, port->regi_sser, rw_rec_cfg);
  550. cfg.en = regk_sser_yes;
  551. rec_cfg.rec_en = port->input;
  552. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  553. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  554. port->started = 1;
  555. }
  556. poll_wait(file, &port->out_wait_q, wait);
  557. poll_wait(file, &port->in_wait_q, wait);
  558. /* No active transfer, descriptors are available */
  559. if (port->output && !port->tr_running)
  560. mask |= POLLOUT | POLLWRNORM;
  561. /* Descriptor and buffer space available. */
  562. if (port->output &&
  563. port->active_tr_descr != port->catch_tr_descr &&
  564. port->out_buf_count < OUT_BUFFER_SIZE)
  565. mask |= POLLOUT | POLLWRNORM;
  566. /* At least an inbufchunk of data */
  567. if (port->input && sync_data_avail(port) >= port->inbufchunk)
  568. mask |= POLLIN | POLLRDNORM;
  569. DEBUGPOLL(if (mask != prev_mask)
  570. printk("sync_serial_poll: mask 0x%08X %s %s\n", mask,
  571. mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":"");
  572. prev_mask = mask;
  573. );
  574. return mask;
  575. }
  576. static int sync_serial_ioctl(struct file *file,
  577. unsigned int cmd, unsigned long arg)
  578. {
  579. int return_val = 0;
  580. int dma_w_size = regk_dma_set_w_size1;
  581. int dev = iminor(file->f_path.dentry->d_inode);
  582. sync_port *port;
  583. reg_sser_rw_tr_cfg tr_cfg;
  584. reg_sser_rw_rec_cfg rec_cfg;
  585. reg_sser_rw_frm_cfg frm_cfg;
  586. reg_sser_rw_cfg gen_cfg;
  587. reg_sser_rw_intr_mask intr_mask;
  588. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
  589. {
  590. DEBUG(printk("Invalid minor %d\n", dev));
  591. return -1;
  592. }
  593. port = &ports[dev];
  594. spin_lock_irq(&port->lock);
  595. tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
  596. rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  597. frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg);
  598. gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  599. intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
  600. switch(cmd)
  601. {
  602. case SSP_SPEED:
  603. if (GET_SPEED(arg) == CODEC)
  604. {
  605. unsigned int freq;
  606. gen_cfg.base_freq = regk_sser_f32;
  607. /* Clock divider will internally be
  608. * gen_cfg.clk_div + 1.
  609. */
  610. freq = GET_FREQ(arg);
  611. switch (freq) {
  612. case FREQ_32kHz:
  613. case FREQ_64kHz:
  614. case FREQ_128kHz:
  615. case FREQ_256kHz:
  616. gen_cfg.clk_div = 125 *
  617. (1 << (freq - FREQ_256kHz)) - 1;
  618. break;
  619. case FREQ_512kHz:
  620. gen_cfg.clk_div = 62;
  621. break;
  622. case FREQ_1MHz:
  623. case FREQ_2MHz:
  624. case FREQ_4MHz:
  625. gen_cfg.clk_div = 8 * (1 << freq) - 1;
  626. break;
  627. }
  628. } else {
  629. gen_cfg.base_freq = regk_sser_f29_493;
  630. switch (GET_SPEED(arg)) {
  631. case SSP150:
  632. gen_cfg.clk_div = 29493000 / (150 * 8) - 1;
  633. break;
  634. case SSP300:
  635. gen_cfg.clk_div = 29493000 / (300 * 8) - 1;
  636. break;
  637. case SSP600:
  638. gen_cfg.clk_div = 29493000 / (600 * 8) - 1;
  639. break;
  640. case SSP1200:
  641. gen_cfg.clk_div = 29493000 / (1200 * 8) - 1;
  642. break;
  643. case SSP2400:
  644. gen_cfg.clk_div = 29493000 / (2400 * 8) - 1;
  645. break;
  646. case SSP4800:
  647. gen_cfg.clk_div = 29493000 / (4800 * 8) - 1;
  648. break;
  649. case SSP9600:
  650. gen_cfg.clk_div = 29493000 / (9600 * 8) - 1;
  651. break;
  652. case SSP19200:
  653. gen_cfg.clk_div = 29493000 / (19200 * 8) - 1;
  654. break;
  655. case SSP28800:
  656. gen_cfg.clk_div = 29493000 / (28800 * 8) - 1;
  657. break;
  658. case SSP57600:
  659. gen_cfg.clk_div = 29493000 / (57600 * 8) - 1;
  660. break;
  661. case SSP115200:
  662. gen_cfg.clk_div = 29493000 / (115200 * 8) - 1;
  663. break;
  664. case SSP230400:
  665. gen_cfg.clk_div = 29493000 / (230400 * 8) - 1;
  666. break;
  667. case SSP460800:
  668. gen_cfg.clk_div = 29493000 / (460800 * 8) - 1;
  669. break;
  670. case SSP921600:
  671. gen_cfg.clk_div = 29493000 / (921600 * 8) - 1;
  672. break;
  673. case SSP3125000:
  674. gen_cfg.base_freq = regk_sser_f100;
  675. gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1;
  676. break;
  677. }
  678. }
  679. frm_cfg.wordrate = GET_WORD_RATE(arg);
  680. break;
  681. case SSP_MODE:
  682. switch(arg)
  683. {
  684. case MASTER_OUTPUT:
  685. port->output = 1;
  686. port->input = 0;
  687. frm_cfg.out_on = regk_sser_tr;
  688. frm_cfg.frame_pin_dir = regk_sser_out;
  689. gen_cfg.clk_dir = regk_sser_out;
  690. break;
  691. case SLAVE_OUTPUT:
  692. port->output = 1;
  693. port->input = 0;
  694. frm_cfg.frame_pin_dir = regk_sser_in;
  695. gen_cfg.clk_dir = regk_sser_in;
  696. break;
  697. case MASTER_INPUT:
  698. port->output = 0;
  699. port->input = 1;
  700. frm_cfg.frame_pin_dir = regk_sser_out;
  701. frm_cfg.out_on = regk_sser_intern_tb;
  702. gen_cfg.clk_dir = regk_sser_out;
  703. break;
  704. case SLAVE_INPUT:
  705. port->output = 0;
  706. port->input = 1;
  707. frm_cfg.frame_pin_dir = regk_sser_in;
  708. gen_cfg.clk_dir = regk_sser_in;
  709. break;
  710. case MASTER_BIDIR:
  711. port->output = 1;
  712. port->input = 1;
  713. frm_cfg.frame_pin_dir = regk_sser_out;
  714. frm_cfg.out_on = regk_sser_intern_tb;
  715. gen_cfg.clk_dir = regk_sser_out;
  716. break;
  717. case SLAVE_BIDIR:
  718. port->output = 1;
  719. port->input = 1;
  720. frm_cfg.frame_pin_dir = regk_sser_in;
  721. gen_cfg.clk_dir = regk_sser_in;
  722. break;
  723. default:
  724. spin_unlock_irq(&port->lock);
  725. return -EINVAL;
  726. }
  727. if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT))
  728. intr_mask.rdav = regk_sser_yes;
  729. break;
  730. case SSP_FRAME_SYNC:
  731. if (arg & NORMAL_SYNC) {
  732. frm_cfg.rec_delay = 1;
  733. frm_cfg.tr_delay = 1;
  734. }
  735. else if (arg & EARLY_SYNC)
  736. frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
  737. else if (arg & SECOND_WORD_SYNC) {
  738. frm_cfg.rec_delay = 7;
  739. frm_cfg.tr_delay = 1;
  740. }
  741. tr_cfg.bulk_wspace = frm_cfg.tr_delay;
  742. frm_cfg.early_wend = regk_sser_yes;
  743. if (arg & BIT_SYNC)
  744. frm_cfg.type = regk_sser_edge;
  745. else if (arg & WORD_SYNC)
  746. frm_cfg.type = regk_sser_level;
  747. else if (arg & EXTENDED_SYNC)
  748. frm_cfg.early_wend = regk_sser_no;
  749. if (arg & SYNC_ON)
  750. frm_cfg.frame_pin_use = regk_sser_frm;
  751. else if (arg & SYNC_OFF)
  752. frm_cfg.frame_pin_use = regk_sser_gio0;
  753. dma_w_size = regk_dma_set_w_size2;
  754. if (arg & WORD_SIZE_8) {
  755. rec_cfg.sample_size = tr_cfg.sample_size = 7;
  756. dma_w_size = regk_dma_set_w_size1;
  757. } else if (arg & WORD_SIZE_12)
  758. rec_cfg.sample_size = tr_cfg.sample_size = 11;
  759. else if (arg & WORD_SIZE_16)
  760. rec_cfg.sample_size = tr_cfg.sample_size = 15;
  761. else if (arg & WORD_SIZE_24)
  762. rec_cfg.sample_size = tr_cfg.sample_size = 23;
  763. else if (arg & WORD_SIZE_32)
  764. rec_cfg.sample_size = tr_cfg.sample_size = 31;
  765. if (arg & BIT_ORDER_MSB)
  766. rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
  767. else if (arg & BIT_ORDER_LSB)
  768. rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst;
  769. if (arg & FLOW_CONTROL_ENABLE) {
  770. frm_cfg.status_pin_use = regk_sser_frm;
  771. rec_cfg.fifo_thr = regk_sser_thr16;
  772. } else if (arg & FLOW_CONTROL_DISABLE) {
  773. frm_cfg.status_pin_use = regk_sser_gio0;
  774. rec_cfg.fifo_thr = regk_sser_inf;
  775. }
  776. if (arg & CLOCK_NOT_GATED)
  777. gen_cfg.gate_clk = regk_sser_no;
  778. else if (arg & CLOCK_GATED)
  779. gen_cfg.gate_clk = regk_sser_yes;
  780. break;
  781. case SSP_IPOLARITY:
  782. /* NOTE!! negedge is considered NORMAL */
  783. if (arg & CLOCK_NORMAL)
  784. rec_cfg.clk_pol = regk_sser_neg;
  785. else if (arg & CLOCK_INVERT)
  786. rec_cfg.clk_pol = regk_sser_pos;
  787. if (arg & FRAME_NORMAL)
  788. frm_cfg.level = regk_sser_pos_hi;
  789. else if (arg & FRAME_INVERT)
  790. frm_cfg.level = regk_sser_neg_lo;
  791. if (arg & STATUS_NORMAL)
  792. gen_cfg.hold_pol = regk_sser_pos;
  793. else if (arg & STATUS_INVERT)
  794. gen_cfg.hold_pol = regk_sser_neg;
  795. break;
  796. case SSP_OPOLARITY:
  797. if (arg & CLOCK_NORMAL)
  798. gen_cfg.out_clk_pol = regk_sser_pos;
  799. else if (arg & CLOCK_INVERT)
  800. gen_cfg.out_clk_pol = regk_sser_neg;
  801. if (arg & FRAME_NORMAL)
  802. frm_cfg.level = regk_sser_pos_hi;
  803. else if (arg & FRAME_INVERT)
  804. frm_cfg.level = regk_sser_neg_lo;
  805. if (arg & STATUS_NORMAL)
  806. gen_cfg.hold_pol = regk_sser_pos;
  807. else if (arg & STATUS_INVERT)
  808. gen_cfg.hold_pol = regk_sser_neg;
  809. break;
  810. case SSP_SPI:
  811. rec_cfg.fifo_thr = regk_sser_inf;
  812. rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
  813. rec_cfg.sample_size = tr_cfg.sample_size = 7;
  814. frm_cfg.frame_pin_use = regk_sser_frm;
  815. frm_cfg.type = regk_sser_level;
  816. frm_cfg.tr_delay = 1;
  817. frm_cfg.level = regk_sser_neg_lo;
  818. if (arg & SPI_SLAVE)
  819. {
  820. rec_cfg.clk_pol = regk_sser_neg;
  821. gen_cfg.clk_dir = regk_sser_in;
  822. port->input = 1;
  823. port->output = 0;
  824. }
  825. else
  826. {
  827. gen_cfg.out_clk_pol = regk_sser_pos;
  828. port->input = 0;
  829. port->output = 1;
  830. gen_cfg.clk_dir = regk_sser_out;
  831. }
  832. break;
  833. case SSP_INBUFCHUNK:
  834. break;
  835. default:
  836. return_val = -1;
  837. }
  838. if (port->started) {
  839. rec_cfg.rec_en = port->input;
  840. gen_cfg.en = (port->output | port->input);
  841. }
  842. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  843. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  844. REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
  845. REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
  846. REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
  847. if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 |
  848. WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) {
  849. int en = gen_cfg.en;
  850. gen_cfg.en = 0;
  851. REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
  852. /* ##### Should DMA be stoped before we change dma size? */
  853. DMA_WR_CMD(port->regi_dmain, dma_w_size);
  854. DMA_WR_CMD(port->regi_dmaout, dma_w_size);
  855. gen_cfg.en = en;
  856. REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
  857. }
  858. spin_unlock_irq(&port->lock);
  859. return return_val;
  860. }
  861. static long sync_serial_ioctl(struct file *file,
  862. unsigned int cmd, unsigned long arg)
  863. {
  864. long ret;
  865. lock_kernel();
  866. ret = sync_serial_ioctl_unlocked(file, cmd, arg);
  867. unlock_kernel();
  868. return ret;
  869. }
  870. /* NOTE: sync_serial_write does not support concurrency */
  871. static ssize_t sync_serial_write(struct file *file, const char *buf,
  872. size_t count, loff_t *ppos)
  873. {
  874. int dev = iminor(file->f_path.dentry->d_inode);
  875. DECLARE_WAITQUEUE(wait, current);
  876. struct sync_port *port;
  877. int trunc_count;
  878. unsigned long flags;
  879. int bytes_free;
  880. int out_buf_count;
  881. unsigned char *rd_ptr; /* First allocated byte in the buffer */
  882. unsigned char *wr_ptr; /* First free byte in the buffer */
  883. unsigned char *buf_stop_ptr; /* Last byte + 1 */
  884. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
  885. DEBUG(printk("Invalid minor %d\n", dev));
  886. return -ENODEV;
  887. }
  888. port = &ports[dev];
  889. /* |<- OUT_BUFFER_SIZE ->|
  890. * |<- out_buf_count ->|
  891. * |<- trunc_count ->| ...->|
  892. * ______________________________________________________
  893. * | free | data | free |
  894. * |_________|___________________|________________________|
  895. * ^ rd_ptr ^ wr_ptr
  896. */
  897. DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n",
  898. port->port_nbr, count, port->active_tr_descr,
  899. port->catch_tr_descr));
  900. /* Read variables that may be updated by interrupts */
  901. spin_lock_irqsave(&port->lock, flags);
  902. rd_ptr = port->out_rd_ptr;
  903. out_buf_count = port->out_buf_count;
  904. spin_unlock_irqrestore(&port->lock, flags);
  905. /* Check if resources are available */
  906. if (port->tr_running &&
  907. ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
  908. out_buf_count >= OUT_BUFFER_SIZE)) {
  909. DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev));
  910. return -EAGAIN;
  911. }
  912. buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE;
  913. /* Determine pointer to the first free byte, before copying. */
  914. wr_ptr = rd_ptr + out_buf_count;
  915. if (wr_ptr >= buf_stop_ptr)
  916. wr_ptr -= OUT_BUFFER_SIZE;
  917. /* If we wrap the ring buffer, let the user space program handle it by
  918. * truncating the data. This could be more elegant, small buffer
  919. * fragments may occur.
  920. */
  921. bytes_free = OUT_BUFFER_SIZE - out_buf_count;
  922. if (wr_ptr + bytes_free > buf_stop_ptr)
  923. bytes_free = buf_stop_ptr - wr_ptr;
  924. trunc_count = (count < bytes_free) ? count : bytes_free;
  925. if (copy_from_user(wr_ptr, buf, trunc_count))
  926. return -EFAULT;
  927. DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n",
  928. out_buf_count, trunc_count,
  929. port->out_buf_count, port->out_buffer,
  930. wr_ptr, buf_stop_ptr));
  931. /* Make sure transmitter/receiver is running */
  932. if (!port->started) {
  933. reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  934. reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  935. cfg.en = regk_sser_yes;
  936. rec_cfg.rec_en = port->input;
  937. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  938. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  939. port->started = 1;
  940. }
  941. /* Setup wait if blocking */
  942. if (!(file->f_flags & O_NONBLOCK)) {
  943. add_wait_queue(&port->out_wait_q, &wait);
  944. set_current_state(TASK_INTERRUPTIBLE);
  945. }
  946. spin_lock_irqsave(&port->lock, flags);
  947. port->out_buf_count += trunc_count;
  948. if (port->use_dma) {
  949. start_dma_out(port, wr_ptr, trunc_count);
  950. } else if (!port->tr_running) {
  951. reg_sser_rw_intr_mask intr_mask;
  952. intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
  953. /* Start sender by writing data */
  954. send_word(port);
  955. /* and enable transmitter ready IRQ */
  956. intr_mask.trdy = 1;
  957. REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
  958. }
  959. spin_unlock_irqrestore(&port->lock, flags);
  960. /* Exit if non blocking */
  961. if (file->f_flags & O_NONBLOCK) {
  962. DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n",
  963. port->port_nbr, trunc_count,
  964. REG_RD_INT(dma, port->regi_dmaout, r_intr)));
  965. return trunc_count;
  966. }
  967. schedule();
  968. set_current_state(TASK_RUNNING);
  969. remove_wait_queue(&port->out_wait_q, &wait);
  970. if (signal_pending(current))
  971. return -EINTR;
  972. DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n",
  973. port->port_nbr, trunc_count));
  974. return trunc_count;
  975. }
  976. static ssize_t sync_serial_read(struct file * file, char * buf,
  977. size_t count, loff_t *ppos)
  978. {
  979. int dev = iminor(file->f_path.dentry->d_inode);
  980. int avail;
  981. sync_port *port;
  982. unsigned char* start;
  983. unsigned char* end;
  984. unsigned long flags;
  985. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
  986. {
  987. DEBUG(printk("Invalid minor %d\n", dev));
  988. return -ENODEV;
  989. }
  990. port = &ports[dev];
  991. DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
  992. if (!port->started)
  993. {
  994. reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  995. reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
  996. reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  997. cfg.en = regk_sser_yes;
  998. tr_cfg.tr_en = regk_sser_yes;
  999. rec_cfg.rec_en = regk_sser_yes;
  1000. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  1001. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  1002. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  1003. port->started = 1;
  1004. }
  1005. /* Calculate number of available bytes */
  1006. /* Save pointers to avoid that they are modified by interrupt */
  1007. spin_lock_irqsave(&port->lock, flags);
  1008. start = (unsigned char*)port->readp; /* cast away volatile */
  1009. end = (unsigned char*)port->writep; /* cast away volatile */
  1010. spin_unlock_irqrestore(&port->lock, flags);
  1011. while ((start == end) && !port->full) /* No data */
  1012. {
  1013. DEBUGREAD(printk(KERN_DEBUG "&"));
  1014. if (file->f_flags & O_NONBLOCK)
  1015. return -EAGAIN;
  1016. interruptible_sleep_on(&port->in_wait_q);
  1017. if (signal_pending(current))
  1018. return -EINTR;
  1019. spin_lock_irqsave(&port->lock, flags);
  1020. start = (unsigned char*)port->readp; /* cast away volatile */
  1021. end = (unsigned char*)port->writep; /* cast away volatile */
  1022. spin_unlock_irqrestore(&port->lock, flags);
  1023. }
  1024. /* Lazy read, never return wrapped data. */
  1025. if (port->full)
  1026. avail = port->in_buffer_size;
  1027. else if (end > start)
  1028. avail = end - start;
  1029. else
  1030. avail = port->flip + port->in_buffer_size - start;
  1031. count = count > avail ? avail : count;
  1032. if (copy_to_user(buf, start, count))
  1033. return -EFAULT;
  1034. /* Disable interrupts while updating readp */
  1035. spin_lock_irqsave(&port->lock, flags);
  1036. port->readp += count;
  1037. if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
  1038. port->readp = port->flip;
  1039. port->full = 0;
  1040. spin_unlock_irqrestore(&port->lock, flags);
  1041. DEBUGREAD(printk("r %d\n", count));
  1042. return count;
  1043. }
  1044. static void send_word(sync_port* port)
  1045. {
  1046. reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
  1047. reg_sser_rw_tr_data tr_data = {0};
  1048. switch(tr_cfg.sample_size)
  1049. {
  1050. case 8:
  1051. port->out_buf_count--;
  1052. tr_data.data = *port->out_rd_ptr++;
  1053. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1054. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1055. port->out_rd_ptr = port->out_buffer;
  1056. break;
  1057. case 12:
  1058. {
  1059. int data = (*port->out_rd_ptr++) << 8;
  1060. data |= *port->out_rd_ptr++;
  1061. port->out_buf_count -= 2;
  1062. tr_data.data = data;
  1063. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1064. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1065. port->out_rd_ptr = port->out_buffer;
  1066. }
  1067. break;
  1068. case 16:
  1069. port->out_buf_count -= 2;
  1070. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1071. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1072. port->out_rd_ptr += 2;
  1073. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1074. port->out_rd_ptr = port->out_buffer;
  1075. break;
  1076. case 24:
  1077. port->out_buf_count -= 3;
  1078. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1079. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1080. port->out_rd_ptr += 2;
  1081. tr_data.data = *port->out_rd_ptr++;
  1082. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1083. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1084. port->out_rd_ptr = port->out_buffer;
  1085. break;
  1086. case 32:
  1087. port->out_buf_count -= 4;
  1088. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1089. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1090. port->out_rd_ptr += 2;
  1091. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1092. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1093. port->out_rd_ptr += 2;
  1094. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1095. port->out_rd_ptr = port->out_buffer;
  1096. break;
  1097. }
  1098. }
  1099. static void start_dma_out(struct sync_port *port,
  1100. const char *data, int count)
  1101. {
  1102. port->active_tr_descr->buf = (char *) virt_to_phys((char *) data);
  1103. port->active_tr_descr->after = port->active_tr_descr->buf + count;
  1104. port->active_tr_descr->intr = 1;
  1105. port->active_tr_descr->eol = 1;
  1106. port->prev_tr_descr->eol = 0;
  1107. DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n",
  1108. port->prev_tr_descr, port->active_tr_descr));
  1109. port->prev_tr_descr = port->active_tr_descr;
  1110. port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next);
  1111. if (!port->tr_running) {
  1112. reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
  1113. rw_tr_cfg);
  1114. port->out_context.next = 0;
  1115. port->out_context.saved_data =
  1116. (dma_descr_data *)virt_to_phys(port->prev_tr_descr);
  1117. port->out_context.saved_data_buf = port->prev_tr_descr->buf;
  1118. DMA_START_CONTEXT(port->regi_dmaout,
  1119. virt_to_phys((char *)&port->out_context));
  1120. tr_cfg.tr_en = regk_sser_yes;
  1121. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  1122. DEBUGTRDMA(printk(KERN_DEBUG "dma s\n"););
  1123. } else {
  1124. DMA_CONTINUE_DATA(port->regi_dmaout);
  1125. DEBUGTRDMA(printk(KERN_DEBUG "dma c\n"););
  1126. }
  1127. port->tr_running = 1;
  1128. }
  1129. static void start_dma_in(sync_port *port)
  1130. {
  1131. int i;
  1132. char *buf;
  1133. port->writep = port->flip;
  1134. if (port->writep > port->flip + port->in_buffer_size) {
  1135. panic("Offset too large in sync serial driver\n");
  1136. return;
  1137. }
  1138. buf = (char*)virt_to_phys(port->in_buffer);
  1139. for (i = 0; i < NBR_IN_DESCR; i++) {
  1140. port->in_descr[i].buf = buf;
  1141. port->in_descr[i].after = buf + port->inbufchunk;
  1142. port->in_descr[i].intr = 1;
  1143. port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]);
  1144. port->in_descr[i].buf = buf;
  1145. buf += port->inbufchunk;
  1146. }
  1147. /* Link the last descriptor to the first */
  1148. port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
  1149. port->in_descr[i-1].eol = regk_sser_yes;
  1150. port->next_rx_desc = &port->in_descr[0];
  1151. port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
  1152. port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
  1153. port->in_context.saved_data_buf = port->in_descr[0].buf;
  1154. DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
  1155. }
  1156. #ifdef SYNC_SER_DMA
  1157. static irqreturn_t tr_interrupt(int irq, void *dev_id)
  1158. {
  1159. reg_dma_r_masked_intr masked;
  1160. reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
  1161. reg_dma_rw_stat stat;
  1162. int i;
  1163. int found = 0;
  1164. int stop_sser = 0;
  1165. for (i = 0; i < NBR_PORTS; i++) {
  1166. sync_port *port = &ports[i];
  1167. if (!port->enabled || !port->use_dma)
  1168. continue;
  1169. /* IRQ active for the port? */
  1170. masked = REG_RD(dma, port->regi_dmaout, r_masked_intr);
  1171. if (!masked.data)
  1172. continue;
  1173. found = 1;
  1174. /* Check if we should stop the DMA transfer */
  1175. stat = REG_RD(dma, port->regi_dmaout, rw_stat);
  1176. if (stat.list_state == regk_dma_data_at_eol)
  1177. stop_sser = 1;
  1178. /* Clear IRQ */
  1179. REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr);
  1180. if (!stop_sser) {
  1181. /* The DMA has completed a descriptor, EOL was not
  1182. * encountered, so step relevant descriptor and
  1183. * datapointers forward. */
  1184. int sent;
  1185. sent = port->catch_tr_descr->after -
  1186. port->catch_tr_descr->buf;
  1187. DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t"
  1188. "in descr %p (ac: %p)\n",
  1189. port->out_buf_count, sent,
  1190. port->out_buf_count - sent,
  1191. port->catch_tr_descr,
  1192. port->active_tr_descr););
  1193. port->out_buf_count -= sent;
  1194. port->catch_tr_descr =
  1195. phys_to_virt((int) port->catch_tr_descr->next);
  1196. port->out_rd_ptr =
  1197. phys_to_virt((int) port->catch_tr_descr->buf);
  1198. } else {
  1199. int i, sent;
  1200. /* EOL handler.
  1201. * Note that if an EOL was encountered during the irq
  1202. * locked section of sync_ser_write the DMA will be
  1203. * restarted and the eol flag will be cleared.
  1204. * The remaining descriptors will be traversed by
  1205. * the descriptor interrupts as usual.
  1206. */
  1207. i = 0;
  1208. while (!port->catch_tr_descr->eol) {
  1209. sent = port->catch_tr_descr->after -
  1210. port->catch_tr_descr->buf;
  1211. DEBUGOUTBUF(printk(KERN_DEBUG
  1212. "traversing descr %p -%d (%d)\n",
  1213. port->catch_tr_descr,
  1214. sent,
  1215. port->out_buf_count));
  1216. port->out_buf_count -= sent;
  1217. port->catch_tr_descr = phys_to_virt(
  1218. (int)port->catch_tr_descr->next);
  1219. i++;
  1220. if (i >= NBR_OUT_DESCR) {
  1221. /* TODO: Reset and recover */
  1222. panic("sync_serial: missing eol");
  1223. }
  1224. }
  1225. sent = port->catch_tr_descr->after -
  1226. port->catch_tr_descr->buf;
  1227. DEBUGOUTBUF(printk(KERN_DEBUG
  1228. "eol at descr %p -%d (%d)\n",
  1229. port->catch_tr_descr,
  1230. sent,
  1231. port->out_buf_count));
  1232. port->out_buf_count -= sent;
  1233. /* Update read pointer to first free byte, we
  1234. * may already be writing data there. */
  1235. port->out_rd_ptr =
  1236. phys_to_virt((int) port->catch_tr_descr->after);
  1237. if (port->out_rd_ptr > port->out_buffer +
  1238. OUT_BUFFER_SIZE)
  1239. port->out_rd_ptr = port->out_buffer;
  1240. reg_sser_rw_tr_cfg tr_cfg =
  1241. REG_RD(sser, port->regi_sser, rw_tr_cfg);
  1242. DEBUGTXINT(printk(KERN_DEBUG
  1243. "tr_int DMA stop %d, set catch @ %p\n",
  1244. port->out_buf_count,
  1245. port->active_tr_descr));
  1246. if (port->out_buf_count != 0)
  1247. printk(KERN_CRIT "sync_ser: buffer not "
  1248. "empty after eol.\n");
  1249. port->catch_tr_descr = port->active_tr_descr;
  1250. port->tr_running = 0;
  1251. tr_cfg.tr_en = regk_sser_no;
  1252. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  1253. }
  1254. /* wake up the waiting process */
  1255. wake_up_interruptible(&port->out_wait_q);
  1256. }
  1257. return IRQ_RETVAL(found);
  1258. } /* tr_interrupt */
  1259. static irqreturn_t rx_interrupt(int irq, void *dev_id)
  1260. {
  1261. reg_dma_r_masked_intr masked;
  1262. reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
  1263. int i;
  1264. int found = 0;
  1265. for (i = 0; i < NBR_PORTS; i++)
  1266. {
  1267. sync_port *port = &ports[i];
  1268. if (!port->enabled || !port->use_dma )
  1269. continue;
  1270. masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
  1271. if (masked.data) /* Descriptor interrupt */
  1272. {
  1273. found = 1;
  1274. while (REG_RD(dma, port->regi_dmain, rw_data) !=
  1275. virt_to_phys(port->next_rx_desc)) {
  1276. DEBUGRXINT(printk(KERN_DEBUG "!"));
  1277. if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
  1278. int first_size = port->flip + port->in_buffer_size - port->writep;
  1279. memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
  1280. memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
  1281. port->writep = port->flip + port->inbufchunk - first_size;
  1282. } else {
  1283. memcpy((char*)port->writep,
  1284. phys_to_virt((unsigned)port->next_rx_desc->buf),
  1285. port->inbufchunk);
  1286. port->writep += port->inbufchunk;
  1287. if (port->writep >= port->flip + port->in_buffer_size)
  1288. port->writep = port->flip;
  1289. }
  1290. if (port->writep == port->readp)
  1291. {
  1292. port->full = 1;
  1293. }
  1294. port->next_rx_desc->eol = 1;
  1295. port->prev_rx_desc->eol = 0;
  1296. /* Cache bug workaround */
  1297. flush_dma_descr(port->prev_rx_desc, 0);
  1298. port->prev_rx_desc = port->next_rx_desc;
  1299. port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
  1300. /* Cache bug workaround */
  1301. flush_dma_descr(port->prev_rx_desc, 1);
  1302. /* wake up the waiting process */
  1303. wake_up_interruptible(&port->in_wait_q);
  1304. DMA_CONTINUE(port->regi_dmain);
  1305. REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
  1306. }
  1307. }
  1308. }
  1309. return IRQ_RETVAL(found);
  1310. } /* rx_interrupt */
  1311. #endif /* SYNC_SER_DMA */
  1312. #ifdef SYNC_SER_MANUAL
  1313. static irqreturn_t manual_interrupt(int irq, void *dev_id)
  1314. {
  1315. int i;
  1316. int found = 0;
  1317. reg_sser_r_masked_intr masked;
  1318. for (i = 0; i < NBR_PORTS; i++)
  1319. {
  1320. sync_port *port = &ports[i];
  1321. if (!port->enabled || port->use_dma)
  1322. {
  1323. continue;
  1324. }
  1325. masked = REG_RD(sser, port->regi_sser, r_masked_intr);
  1326. if (masked.rdav) /* Data received? */
  1327. {
  1328. reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  1329. reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data);
  1330. found = 1;
  1331. /* Read data */
  1332. switch(rec_cfg.sample_size)
  1333. {
  1334. case 8:
  1335. *port->writep++ = data.data & 0xff;
  1336. break;
  1337. case 12:
  1338. *port->writep = (data.data & 0x0ff0) >> 4;
  1339. *(port->writep + 1) = data.data & 0x0f;
  1340. port->writep+=2;
  1341. break;
  1342. case 16:
  1343. *(unsigned short*)port->writep = data.data;
  1344. port->writep+=2;
  1345. break;
  1346. case 24:
  1347. *(unsigned int*)port->writep = data.data;
  1348. port->writep+=3;
  1349. break;
  1350. case 32:
  1351. *(unsigned int*)port->writep = data.data;
  1352. port->writep+=4;
  1353. break;
  1354. }
  1355. if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */
  1356. port->writep = port->flip;
  1357. if (port->writep == port->readp) {
  1358. /* receive buffer overrun, discard oldest data
  1359. */
  1360. port->readp++;
  1361. if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
  1362. port->readp = port->flip;
  1363. }
  1364. if (sync_data_avail(port) >= port->inbufchunk)
  1365. wake_up_interruptible(&port->in_wait_q); /* Wake up application */
  1366. }
  1367. if (masked.trdy) /* Transmitter ready? */
  1368. {
  1369. found = 1;
  1370. if (port->out_buf_count > 0) /* More data to send */
  1371. send_word(port);
  1372. else /* transmission finished */
  1373. {
  1374. reg_sser_rw_intr_mask intr_mask;
  1375. intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
  1376. intr_mask.trdy = 0;
  1377. REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
  1378. wake_up_interruptible(&port->out_wait_q); /* Wake up application */
  1379. }
  1380. }
  1381. }
  1382. return IRQ_RETVAL(found);
  1383. }
  1384. #endif
  1385. module_init(etrax_sync_serial_init);