sync_serial.c 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543
  1. /*
  2. * Simple synchronous serial port driver for ETRAX FS and Artpec-3.
  3. *
  4. * Copyright (c) 2005 Axis Communications AB
  5. *
  6. * Author: Mikael Starvik
  7. *
  8. */
  9. #include <linux/module.h>
  10. #include <linux/kernel.h>
  11. #include <linux/types.h>
  12. #include <linux/errno.h>
  13. #include <linux/major.h>
  14. #include <linux/sched.h>
  15. #include <linux/slab.h>
  16. #include <linux/smp_lock.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/poll.h>
  19. #include <linux/init.h>
  20. #include <linux/timer.h>
  21. #include <linux/spinlock.h>
  22. #include <asm/io.h>
  23. #include <dma.h>
  24. #include <pinmux.h>
  25. #include <hwregs/reg_rdwr.h>
  26. #include <hwregs/sser_defs.h>
  27. #include <hwregs/dma_defs.h>
  28. #include <hwregs/dma.h>
  29. #include <hwregs/intr_vect_defs.h>
  30. #include <hwregs/intr_vect.h>
  31. #include <hwregs/reg_map.h>
  32. #include <asm/sync_serial.h>
  33. /* The receiver is a bit tricky beacuse of the continuous stream of data.*/
  34. /* */
  35. /* Three DMA descriptors are linked together. Each DMA descriptor is */
  36. /* responsible for port->bufchunk of a common buffer. */
  37. /* */
  38. /* +---------------------------------------------+ */
  39. /* | +----------+ +----------+ +----------+ | */
  40. /* +-> | Descr[0] |-->| Descr[1] |-->| Descr[2] |-+ */
  41. /* +----------+ +----------+ +----------+ */
  42. /* | | | */
  43. /* v v v */
  44. /* +-------------------------------------+ */
  45. /* | BUFFER | */
  46. /* +-------------------------------------+ */
  47. /* |<- data_avail ->| */
  48. /* readp writep */
  49. /* */
  50. /* If the application keeps up the pace readp will be right after writep.*/
  51. /* If the application can't keep the pace we have to throw away data. */
  52. /* The idea is that readp should be ready with the data pointed out by */
  53. /* Descr[i] when the DMA has filled in Descr[i+1]. */
  54. /* Otherwise we will discard */
  55. /* the rest of the data pointed out by Descr1 and set readp to the start */
  56. /* of Descr2 */
  57. #define SYNC_SERIAL_MAJOR 125
  58. /* IN_BUFFER_SIZE should be a multiple of 6 to make sure that 24 bit */
  59. /* words can be handled */
  60. #define IN_BUFFER_SIZE 12288
  61. #define IN_DESCR_SIZE 256
  62. #define NBR_IN_DESCR (IN_BUFFER_SIZE/IN_DESCR_SIZE)
  63. #define OUT_BUFFER_SIZE 1024*8
  64. #define NBR_OUT_DESCR 8
  65. #define DEFAULT_FRAME_RATE 0
  66. #define DEFAULT_WORD_RATE 7
  67. /* NOTE: Enabling some debug will likely cause overrun or underrun,
  68. * especially if manual mode is use.
  69. */
  70. #define DEBUG(x)
  71. #define DEBUGREAD(x)
  72. #define DEBUGWRITE(x)
  73. #define DEBUGPOLL(x)
  74. #define DEBUGRXINT(x)
  75. #define DEBUGTXINT(x)
  76. #define DEBUGTRDMA(x)
  77. #define DEBUGOUTBUF(x)
  78. typedef struct sync_port
  79. {
  80. reg_scope_instances regi_sser;
  81. reg_scope_instances regi_dmain;
  82. reg_scope_instances regi_dmaout;
  83. char started; /* 1 if port has been started */
  84. char port_nbr; /* Port 0 or 1 */
  85. char busy; /* 1 if port is busy */
  86. char enabled; /* 1 if port is enabled */
  87. char use_dma; /* 1 if port uses dma */
  88. char tr_running;
  89. char init_irqs;
  90. int output;
  91. int input;
  92. /* Next byte to be read by application */
  93. volatile unsigned char *volatile readp;
  94. /* Next byte to be written by etrax */
  95. volatile unsigned char *volatile writep;
  96. unsigned int in_buffer_size;
  97. unsigned int inbufchunk;
  98. unsigned char out_buffer[OUT_BUFFER_SIZE] __attribute__ ((aligned(32)));
  99. unsigned char in_buffer[IN_BUFFER_SIZE]__attribute__ ((aligned(32)));
  100. unsigned char flip[IN_BUFFER_SIZE] __attribute__ ((aligned(32)));
  101. struct dma_descr_data* next_rx_desc;
  102. struct dma_descr_data* prev_rx_desc;
  103. /* Pointer to the first available descriptor in the ring,
  104. * unless active_tr_descr == catch_tr_descr and a dma
  105. * transfer is active */
  106. struct dma_descr_data *active_tr_descr;
  107. /* Pointer to the first allocated descriptor in the ring */
  108. struct dma_descr_data *catch_tr_descr;
  109. /* Pointer to the descriptor with the current end-of-list */
  110. struct dma_descr_data *prev_tr_descr;
  111. int full;
  112. /* Pointer to the first byte being read by DMA
  113. * or current position in out_buffer if not using DMA. */
  114. unsigned char *out_rd_ptr;
  115. /* Number of bytes currently locked for being read by DMA */
  116. int out_buf_count;
  117. dma_descr_data in_descr[NBR_IN_DESCR] __attribute__ ((__aligned__(16)));
  118. dma_descr_context in_context __attribute__ ((__aligned__(32)));
  119. dma_descr_data out_descr[NBR_OUT_DESCR]
  120. __attribute__ ((__aligned__(16)));
  121. dma_descr_context out_context __attribute__ ((__aligned__(32)));
  122. wait_queue_head_t out_wait_q;
  123. wait_queue_head_t in_wait_q;
  124. spinlock_t lock;
  125. } sync_port;
  126. static int etrax_sync_serial_init(void);
  127. static void initialize_port(int portnbr);
  128. static inline int sync_data_avail(struct sync_port *port);
  129. static int sync_serial_open(struct inode *, struct file*);
  130. static int sync_serial_release(struct inode*, struct file*);
  131. static unsigned int sync_serial_poll(struct file *filp, poll_table *wait);
  132. static int sync_serial_ioctl(struct inode*, struct file*,
  133. unsigned int cmd, unsigned long arg);
  134. static ssize_t sync_serial_write(struct file * file, const char * buf,
  135. size_t count, loff_t *ppos);
  136. static ssize_t sync_serial_read(struct file *file, char *buf,
  137. size_t count, loff_t *ppos);
  138. #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
  139. defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
  140. (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
  141. defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
  142. #define SYNC_SER_DMA
  143. #endif
  144. static void send_word(sync_port* port);
  145. static void start_dma_out(struct sync_port *port, const char *data, int count);
  146. static void start_dma_in(sync_port* port);
  147. #ifdef SYNC_SER_DMA
  148. static irqreturn_t tr_interrupt(int irq, void *dev_id);
  149. static irqreturn_t rx_interrupt(int irq, void *dev_id);
  150. #endif
  151. #if (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0) && \
  152. !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)) || \
  153. (defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1) && \
  154. !defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA))
  155. #define SYNC_SER_MANUAL
  156. #endif
  157. #ifdef SYNC_SER_MANUAL
  158. static irqreturn_t manual_interrupt(int irq, void *dev_id);
  159. #endif
  160. #ifdef CONFIG_ETRAXFS /* ETRAX FS */
  161. #define OUT_DMA_NBR 4
  162. #define IN_DMA_NBR 5
  163. #define PINMUX_SSER pinmux_sser0
  164. #define SYNCSER_INST regi_sser0
  165. #define SYNCSER_INTR_VECT SSER0_INTR_VECT
  166. #define OUT_DMA_INST regi_dma4
  167. #define IN_DMA_INST regi_dma5
  168. #define DMA_OUT_INTR_VECT DMA4_INTR_VECT
  169. #define DMA_IN_INTR_VECT DMA5_INTR_VECT
  170. #define REQ_DMA_SYNCSER dma_sser0
  171. #else /* Artpec-3 */
  172. #define OUT_DMA_NBR 6
  173. #define IN_DMA_NBR 7
  174. #define PINMUX_SSER pinmux_sser
  175. #define SYNCSER_INST regi_sser
  176. #define SYNCSER_INTR_VECT SSER_INTR_VECT
  177. #define OUT_DMA_INST regi_dma6
  178. #define IN_DMA_INST regi_dma7
  179. #define DMA_OUT_INTR_VECT DMA6_INTR_VECT
  180. #define DMA_IN_INTR_VECT DMA7_INTR_VECT
  181. #define REQ_DMA_SYNCSER dma_sser
  182. #endif
  183. /* The ports */
  184. static struct sync_port ports[]=
  185. {
  186. {
  187. .regi_sser = SYNCSER_INST,
  188. .regi_dmaout = OUT_DMA_INST,
  189. .regi_dmain = IN_DMA_INST,
  190. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL0_DMA)
  191. .use_dma = 1,
  192. #else
  193. .use_dma = 0,
  194. #endif
  195. }
  196. #ifdef CONFIG_ETRAXFS
  197. ,
  198. {
  199. .regi_sser = regi_sser1,
  200. .regi_dmaout = regi_dma6,
  201. .regi_dmain = regi_dma7,
  202. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL1_DMA)
  203. .use_dma = 1,
  204. #else
  205. .use_dma = 0,
  206. #endif
  207. }
  208. #endif
  209. };
  210. #define NBR_PORTS ARRAY_SIZE(ports)
  211. static const struct file_operations sync_serial_fops = {
  212. .owner = THIS_MODULE,
  213. .write = sync_serial_write,
  214. .read = sync_serial_read,
  215. .poll = sync_serial_poll,
  216. .ioctl = sync_serial_ioctl,
  217. .open = sync_serial_open,
  218. .release = sync_serial_release
  219. };
  220. static int __init etrax_sync_serial_init(void)
  221. {
  222. ports[0].enabled = 0;
  223. #ifdef CONFIG_ETRAXFS
  224. ports[1].enabled = 0;
  225. #endif
  226. if (register_chrdev(SYNC_SERIAL_MAJOR, "sync serial",
  227. &sync_serial_fops) < 0) {
  228. printk(KERN_WARNING
  229. "Unable to get major for synchronous serial port\n");
  230. return -EBUSY;
  231. }
  232. /* Initialize Ports */
  233. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT0)
  234. if (crisv32_pinmux_alloc_fixed(PINMUX_SSER)) {
  235. printk(KERN_WARNING
  236. "Unable to alloc pins for synchronous serial port 0\n");
  237. return -EIO;
  238. }
  239. ports[0].enabled = 1;
  240. initialize_port(0);
  241. #endif
  242. #if defined(CONFIG_ETRAX_SYNCHRONOUS_SERIAL_PORT1)
  243. if (crisv32_pinmux_alloc_fixed(pinmux_sser1)) {
  244. printk(KERN_WARNING
  245. "Unable to alloc pins for synchronous serial port 0\n");
  246. return -EIO;
  247. }
  248. ports[1].enabled = 1;
  249. initialize_port(1);
  250. #endif
  251. #ifdef CONFIG_ETRAXFS
  252. printk(KERN_INFO "ETRAX FS synchronous serial port driver\n");
  253. #else
  254. printk(KERN_INFO "Artpec-3 synchronous serial port driver\n");
  255. #endif
  256. return 0;
  257. }
  258. static void __init initialize_port(int portnbr)
  259. {
  260. int __attribute__((unused)) i;
  261. struct sync_port *port = &ports[portnbr];
  262. reg_sser_rw_cfg cfg = {0};
  263. reg_sser_rw_frm_cfg frm_cfg = {0};
  264. reg_sser_rw_tr_cfg tr_cfg = {0};
  265. reg_sser_rw_rec_cfg rec_cfg = {0};
  266. DEBUG(printk(KERN_DEBUG "Init sync serial port %d\n", portnbr));
  267. port->port_nbr = portnbr;
  268. port->init_irqs = 1;
  269. port->out_rd_ptr = port->out_buffer;
  270. port->out_buf_count = 0;
  271. port->output = 1;
  272. port->input = 0;
  273. port->readp = port->flip;
  274. port->writep = port->flip;
  275. port->in_buffer_size = IN_BUFFER_SIZE;
  276. port->inbufchunk = IN_DESCR_SIZE;
  277. port->next_rx_desc = &port->in_descr[0];
  278. port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR-1];
  279. port->prev_rx_desc->eol = 1;
  280. init_waitqueue_head(&port->out_wait_q);
  281. init_waitqueue_head(&port->in_wait_q);
  282. spin_lock_init(&port->lock);
  283. cfg.out_clk_src = regk_sser_intern_clk;
  284. cfg.out_clk_pol = regk_sser_pos;
  285. cfg.clk_od_mode = regk_sser_no;
  286. cfg.clk_dir = regk_sser_out;
  287. cfg.gate_clk = regk_sser_no;
  288. cfg.base_freq = regk_sser_f29_493;
  289. cfg.clk_div = 256;
  290. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  291. frm_cfg.wordrate = DEFAULT_WORD_RATE;
  292. frm_cfg.type = regk_sser_edge;
  293. frm_cfg.frame_pin_dir = regk_sser_out;
  294. frm_cfg.frame_pin_use = regk_sser_frm;
  295. frm_cfg.status_pin_dir = regk_sser_in;
  296. frm_cfg.status_pin_use = regk_sser_hold;
  297. frm_cfg.out_on = regk_sser_tr;
  298. frm_cfg.tr_delay = 1;
  299. REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
  300. tr_cfg.urun_stop = regk_sser_no;
  301. tr_cfg.sample_size = 7;
  302. tr_cfg.sh_dir = regk_sser_msbfirst;
  303. tr_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
  304. #if 0
  305. tr_cfg.rate_ctrl = regk_sser_bulk;
  306. tr_cfg.data_pin_use = regk_sser_dout;
  307. #else
  308. tr_cfg.rate_ctrl = regk_sser_iso;
  309. tr_cfg.data_pin_use = regk_sser_dout;
  310. #endif
  311. tr_cfg.bulk_wspace = 1;
  312. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  313. rec_cfg.sample_size = 7;
  314. rec_cfg.sh_dir = regk_sser_msbfirst;
  315. rec_cfg.use_dma = port->use_dma ? regk_sser_yes : regk_sser_no;
  316. rec_cfg.fifo_thr = regk_sser_inf;
  317. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  318. #ifdef SYNC_SER_DMA
  319. /* Setup the descriptor ring for dma out/transmit. */
  320. for (i = 0; i < NBR_OUT_DESCR; i++) {
  321. port->out_descr[i].wait = 0;
  322. port->out_descr[i].intr = 1;
  323. port->out_descr[i].eol = 0;
  324. port->out_descr[i].out_eop = 0;
  325. port->out_descr[i].next =
  326. (dma_descr_data *)virt_to_phys(&port->out_descr[i+1]);
  327. }
  328. /* Create a ring from the list. */
  329. port->out_descr[NBR_OUT_DESCR-1].next =
  330. (dma_descr_data *)virt_to_phys(&port->out_descr[0]);
  331. /* Setup context for traversing the ring. */
  332. port->active_tr_descr = &port->out_descr[0];
  333. port->prev_tr_descr = &port->out_descr[NBR_OUT_DESCR-1];
  334. port->catch_tr_descr = &port->out_descr[0];
  335. #endif
  336. }
  337. static inline int sync_data_avail(struct sync_port *port)
  338. {
  339. int avail;
  340. unsigned char *start;
  341. unsigned char *end;
  342. start = (unsigned char*)port->readp; /* cast away volatile */
  343. end = (unsigned char*)port->writep; /* cast away volatile */
  344. /* 0123456789 0123456789
  345. * ----- - -----
  346. * ^rp ^wp ^wp ^rp
  347. */
  348. if (end >= start)
  349. avail = end - start;
  350. else
  351. avail = port->in_buffer_size - (start - end);
  352. return avail;
  353. }
  354. static inline int sync_data_avail_to_end(struct sync_port *port)
  355. {
  356. int avail;
  357. unsigned char *start;
  358. unsigned char *end;
  359. start = (unsigned char*)port->readp; /* cast away volatile */
  360. end = (unsigned char*)port->writep; /* cast away volatile */
  361. /* 0123456789 0123456789
  362. * ----- -----
  363. * ^rp ^wp ^wp ^rp
  364. */
  365. if (end >= start)
  366. avail = end - start;
  367. else
  368. avail = port->flip + port->in_buffer_size - start;
  369. return avail;
  370. }
  371. static int sync_serial_open(struct inode *inode, struct file *file)
  372. {
  373. int dev = iminor(inode);
  374. int ret = -EBUSY;
  375. sync_port *port;
  376. reg_dma_rw_cfg cfg = {.en = regk_dma_yes};
  377. reg_dma_rw_intr_mask intr_mask = {.data = regk_dma_yes};
  378. lock_kernel();
  379. DEBUG(printk(KERN_DEBUG "Open sync serial port %d\n", dev));
  380. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
  381. {
  382. DEBUG(printk(KERN_DEBUG "Invalid minor %d\n", dev));
  383. ret = -ENODEV;
  384. goto out;
  385. }
  386. port = &ports[dev];
  387. /* Allow open this device twice (assuming one reader and one writer) */
  388. if (port->busy == 2)
  389. {
  390. DEBUG(printk(KERN_DEBUG "Device is busy.. \n"));
  391. goto out;
  392. }
  393. if (port->init_irqs) {
  394. if (port->use_dma) {
  395. if (port == &ports[0]) {
  396. #ifdef SYNC_SER_DMA
  397. if (request_irq(DMA_OUT_INTR_VECT,
  398. tr_interrupt,
  399. 0,
  400. "synchronous serial 0 dma tr",
  401. &ports[0])) {
  402. printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
  403. goto out;
  404. } else if (request_irq(DMA_IN_INTR_VECT,
  405. rx_interrupt,
  406. 0,
  407. "synchronous serial 1 dma rx",
  408. &ports[0])) {
  409. free_irq(DMA_OUT_INTR_VECT, &port[0]);
  410. printk(KERN_CRIT "Can't allocate sync serial port 0 IRQ");
  411. goto out;
  412. } else if (crisv32_request_dma(OUT_DMA_NBR,
  413. "synchronous serial 0 dma tr",
  414. DMA_VERBOSE_ON_ERROR,
  415. 0,
  416. REQ_DMA_SYNCSER)) {
  417. free_irq(DMA_OUT_INTR_VECT, &port[0]);
  418. free_irq(DMA_IN_INTR_VECT, &port[0]);
  419. printk(KERN_CRIT "Can't allocate sync serial port 0 TX DMA channel");
  420. goto out;
  421. } else if (crisv32_request_dma(IN_DMA_NBR,
  422. "synchronous serial 0 dma rec",
  423. DMA_VERBOSE_ON_ERROR,
  424. 0,
  425. REQ_DMA_SYNCSER)) {
  426. crisv32_free_dma(OUT_DMA_NBR);
  427. free_irq(DMA_OUT_INTR_VECT, &port[0]);
  428. free_irq(DMA_IN_INTR_VECT, &port[0]);
  429. printk(KERN_CRIT "Can't allocate sync serial port 1 RX DMA channel");
  430. goto out;
  431. }
  432. #endif
  433. }
  434. #ifdef CONFIG_ETRAXFS
  435. else if (port == &ports[1]) {
  436. #ifdef SYNC_SER_DMA
  437. if (request_irq(DMA6_INTR_VECT,
  438. tr_interrupt,
  439. 0,
  440. "synchronous serial 1 dma tr",
  441. &ports[1])) {
  442. printk(KERN_CRIT "Can't allocate sync serial port 1 IRQ");
  443. goto out;
  444. } else if (request_irq(DMA7_INTR_VECT,
  445. rx_interrupt,
  446. 0,
  447. "synchronous serial 1 dma rx",
  448. &ports[1])) {
  449. free_irq(DMA6_INTR_VECT, &ports[1]);
  450. printk(KERN_CRIT "Can't allocate sync serial port 3 IRQ");
  451. goto out;
  452. } else if (crisv32_request_dma(
  453. SYNC_SER1_TX_DMA_NBR,
  454. "synchronous serial 1 dma tr",
  455. DMA_VERBOSE_ON_ERROR,
  456. 0,
  457. dma_sser1)) {
  458. free_irq(DMA6_INTR_VECT, &ports[1]);
  459. free_irq(DMA7_INTR_VECT, &ports[1]);
  460. printk(KERN_CRIT "Can't allocate sync serial port 3 TX DMA channel");
  461. goto out;
  462. } else if (crisv32_request_dma(
  463. SYNC_SER1_RX_DMA_NBR,
  464. "synchronous serial 3 dma rec",
  465. DMA_VERBOSE_ON_ERROR,
  466. 0,
  467. dma_sser1)) {
  468. crisv32_free_dma(SYNC_SER1_TX_DMA_NBR);
  469. free_irq(DMA6_INTR_VECT, &ports[1]);
  470. free_irq(DMA7_INTR_VECT, &ports[1]);
  471. printk(KERN_CRIT "Can't allocate sync serial port 3 RX DMA channel");
  472. goto out;
  473. }
  474. #endif
  475. }
  476. #endif
  477. /* Enable DMAs */
  478. REG_WR(dma, port->regi_dmain, rw_cfg, cfg);
  479. REG_WR(dma, port->regi_dmaout, rw_cfg, cfg);
  480. /* Enable DMA IRQs */
  481. REG_WR(dma, port->regi_dmain, rw_intr_mask, intr_mask);
  482. REG_WR(dma, port->regi_dmaout, rw_intr_mask, intr_mask);
  483. /* Set up wordsize = 1 for DMAs. */
  484. DMA_WR_CMD (port->regi_dmain, regk_dma_set_w_size1);
  485. DMA_WR_CMD (port->regi_dmaout, regk_dma_set_w_size1);
  486. start_dma_in(port);
  487. port->init_irqs = 0;
  488. } else { /* !port->use_dma */
  489. #ifdef SYNC_SER_MANUAL
  490. if (port == &ports[0]) {
  491. if (request_irq(SYNCSER_INTR_VECT,
  492. manual_interrupt,
  493. 0,
  494. "synchronous serial manual irq",
  495. &ports[0])) {
  496. printk("Can't allocate sync serial manual irq");
  497. goto out;
  498. }
  499. }
  500. #ifdef CONFIG_ETRAXFS
  501. else if (port == &ports[1]) {
  502. if (request_irq(SSER1_INTR_VECT,
  503. manual_interrupt,
  504. 0,
  505. "synchronous serial manual irq",
  506. &ports[1])) {
  507. printk(KERN_CRIT "Can't allocate sync serial manual irq");
  508. goto out;
  509. }
  510. }
  511. #endif
  512. port->init_irqs = 0;
  513. #else
  514. panic("sync_serial: Manual mode not supported.\n");
  515. #endif /* SYNC_SER_MANUAL */
  516. }
  517. } /* port->init_irqs */
  518. port->busy++;
  519. ret = 0;
  520. out:
  521. unlock_kernel();
  522. return ret;
  523. }
  524. static int sync_serial_release(struct inode *inode, struct file *file)
  525. {
  526. int dev = iminor(inode);
  527. sync_port *port;
  528. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
  529. {
  530. DEBUG(printk("Invalid minor %d\n", dev));
  531. return -ENODEV;
  532. }
  533. port = &ports[dev];
  534. if (port->busy)
  535. port->busy--;
  536. if (!port->busy)
  537. /* XXX */ ;
  538. return 0;
  539. }
  540. static unsigned int sync_serial_poll(struct file *file, poll_table *wait)
  541. {
  542. int dev = iminor(file->f_path.dentry->d_inode);
  543. unsigned int mask = 0;
  544. sync_port *port;
  545. DEBUGPOLL( static unsigned int prev_mask = 0; );
  546. port = &ports[dev];
  547. if (!port->started) {
  548. reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  549. reg_sser_rw_rec_cfg rec_cfg =
  550. REG_RD(sser, port->regi_sser, rw_rec_cfg);
  551. cfg.en = regk_sser_yes;
  552. rec_cfg.rec_en = port->input;
  553. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  554. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  555. port->started = 1;
  556. }
  557. poll_wait(file, &port->out_wait_q, wait);
  558. poll_wait(file, &port->in_wait_q, wait);
  559. /* No active transfer, descriptors are available */
  560. if (port->output && !port->tr_running)
  561. mask |= POLLOUT | POLLWRNORM;
  562. /* Descriptor and buffer space available. */
  563. if (port->output &&
  564. port->active_tr_descr != port->catch_tr_descr &&
  565. port->out_buf_count < OUT_BUFFER_SIZE)
  566. mask |= POLLOUT | POLLWRNORM;
  567. /* At least an inbufchunk of data */
  568. if (port->input && sync_data_avail(port) >= port->inbufchunk)
  569. mask |= POLLIN | POLLRDNORM;
  570. DEBUGPOLL(if (mask != prev_mask)
  571. printk("sync_serial_poll: mask 0x%08X %s %s\n", mask,
  572. mask&POLLOUT?"POLLOUT":"", mask&POLLIN?"POLLIN":"");
  573. prev_mask = mask;
  574. );
  575. return mask;
  576. }
  577. static int sync_serial_ioctl(struct inode *inode, struct file *file,
  578. unsigned int cmd, unsigned long arg)
  579. {
  580. int return_val = 0;
  581. int dma_w_size = regk_dma_set_w_size1;
  582. int dev = iminor(file->f_path.dentry->d_inode);
  583. sync_port *port;
  584. reg_sser_rw_tr_cfg tr_cfg;
  585. reg_sser_rw_rec_cfg rec_cfg;
  586. reg_sser_rw_frm_cfg frm_cfg;
  587. reg_sser_rw_cfg gen_cfg;
  588. reg_sser_rw_intr_mask intr_mask;
  589. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
  590. {
  591. DEBUG(printk("Invalid minor %d\n", dev));
  592. return -1;
  593. }
  594. port = &ports[dev];
  595. spin_lock_irq(&port->lock);
  596. tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
  597. rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  598. frm_cfg = REG_RD(sser, port->regi_sser, rw_frm_cfg);
  599. gen_cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  600. intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
  601. switch(cmd)
  602. {
  603. case SSP_SPEED:
  604. if (GET_SPEED(arg) == CODEC)
  605. {
  606. unsigned int freq;
  607. gen_cfg.base_freq = regk_sser_f32;
  608. /* Clock divider will internally be
  609. * gen_cfg.clk_div + 1.
  610. */
  611. freq = GET_FREQ(arg);
  612. switch (freq) {
  613. case FREQ_32kHz:
  614. case FREQ_64kHz:
  615. case FREQ_128kHz:
  616. case FREQ_256kHz:
  617. gen_cfg.clk_div = 125 *
  618. (1 << (freq - FREQ_256kHz)) - 1;
  619. break;
  620. case FREQ_512kHz:
  621. gen_cfg.clk_div = 62;
  622. break;
  623. case FREQ_1MHz:
  624. case FREQ_2MHz:
  625. case FREQ_4MHz:
  626. gen_cfg.clk_div = 8 * (1 << freq) - 1;
  627. break;
  628. }
  629. } else {
  630. gen_cfg.base_freq = regk_sser_f29_493;
  631. switch (GET_SPEED(arg)) {
  632. case SSP150:
  633. gen_cfg.clk_div = 29493000 / (150 * 8) - 1;
  634. break;
  635. case SSP300:
  636. gen_cfg.clk_div = 29493000 / (300 * 8) - 1;
  637. break;
  638. case SSP600:
  639. gen_cfg.clk_div = 29493000 / (600 * 8) - 1;
  640. break;
  641. case SSP1200:
  642. gen_cfg.clk_div = 29493000 / (1200 * 8) - 1;
  643. break;
  644. case SSP2400:
  645. gen_cfg.clk_div = 29493000 / (2400 * 8) - 1;
  646. break;
  647. case SSP4800:
  648. gen_cfg.clk_div = 29493000 / (4800 * 8) - 1;
  649. break;
  650. case SSP9600:
  651. gen_cfg.clk_div = 29493000 / (9600 * 8) - 1;
  652. break;
  653. case SSP19200:
  654. gen_cfg.clk_div = 29493000 / (19200 * 8) - 1;
  655. break;
  656. case SSP28800:
  657. gen_cfg.clk_div = 29493000 / (28800 * 8) - 1;
  658. break;
  659. case SSP57600:
  660. gen_cfg.clk_div = 29493000 / (57600 * 8) - 1;
  661. break;
  662. case SSP115200:
  663. gen_cfg.clk_div = 29493000 / (115200 * 8) - 1;
  664. break;
  665. case SSP230400:
  666. gen_cfg.clk_div = 29493000 / (230400 * 8) - 1;
  667. break;
  668. case SSP460800:
  669. gen_cfg.clk_div = 29493000 / (460800 * 8) - 1;
  670. break;
  671. case SSP921600:
  672. gen_cfg.clk_div = 29493000 / (921600 * 8) - 1;
  673. break;
  674. case SSP3125000:
  675. gen_cfg.base_freq = regk_sser_f100;
  676. gen_cfg.clk_div = 100000000 / (3125000 * 8) - 1;
  677. break;
  678. }
  679. }
  680. frm_cfg.wordrate = GET_WORD_RATE(arg);
  681. break;
  682. case SSP_MODE:
  683. switch(arg)
  684. {
  685. case MASTER_OUTPUT:
  686. port->output = 1;
  687. port->input = 0;
  688. frm_cfg.out_on = regk_sser_tr;
  689. frm_cfg.frame_pin_dir = regk_sser_out;
  690. gen_cfg.clk_dir = regk_sser_out;
  691. break;
  692. case SLAVE_OUTPUT:
  693. port->output = 1;
  694. port->input = 0;
  695. frm_cfg.frame_pin_dir = regk_sser_in;
  696. gen_cfg.clk_dir = regk_sser_in;
  697. break;
  698. case MASTER_INPUT:
  699. port->output = 0;
  700. port->input = 1;
  701. frm_cfg.frame_pin_dir = regk_sser_out;
  702. frm_cfg.out_on = regk_sser_intern_tb;
  703. gen_cfg.clk_dir = regk_sser_out;
  704. break;
  705. case SLAVE_INPUT:
  706. port->output = 0;
  707. port->input = 1;
  708. frm_cfg.frame_pin_dir = regk_sser_in;
  709. gen_cfg.clk_dir = regk_sser_in;
  710. break;
  711. case MASTER_BIDIR:
  712. port->output = 1;
  713. port->input = 1;
  714. frm_cfg.frame_pin_dir = regk_sser_out;
  715. frm_cfg.out_on = regk_sser_intern_tb;
  716. gen_cfg.clk_dir = regk_sser_out;
  717. break;
  718. case SLAVE_BIDIR:
  719. port->output = 1;
  720. port->input = 1;
  721. frm_cfg.frame_pin_dir = regk_sser_in;
  722. gen_cfg.clk_dir = regk_sser_in;
  723. break;
  724. default:
  725. spin_unlock_irq(&port->lock);
  726. return -EINVAL;
  727. }
  728. if (!port->use_dma || (arg == MASTER_OUTPUT || arg == SLAVE_OUTPUT))
  729. intr_mask.rdav = regk_sser_yes;
  730. break;
  731. case SSP_FRAME_SYNC:
  732. if (arg & NORMAL_SYNC) {
  733. frm_cfg.rec_delay = 1;
  734. frm_cfg.tr_delay = 1;
  735. }
  736. else if (arg & EARLY_SYNC)
  737. frm_cfg.rec_delay = frm_cfg.tr_delay = 0;
  738. else if (arg & SECOND_WORD_SYNC) {
  739. frm_cfg.rec_delay = 7;
  740. frm_cfg.tr_delay = 1;
  741. }
  742. tr_cfg.bulk_wspace = frm_cfg.tr_delay;
  743. frm_cfg.early_wend = regk_sser_yes;
  744. if (arg & BIT_SYNC)
  745. frm_cfg.type = regk_sser_edge;
  746. else if (arg & WORD_SYNC)
  747. frm_cfg.type = regk_sser_level;
  748. else if (arg & EXTENDED_SYNC)
  749. frm_cfg.early_wend = regk_sser_no;
  750. if (arg & SYNC_ON)
  751. frm_cfg.frame_pin_use = regk_sser_frm;
  752. else if (arg & SYNC_OFF)
  753. frm_cfg.frame_pin_use = regk_sser_gio0;
  754. dma_w_size = regk_dma_set_w_size2;
  755. if (arg & WORD_SIZE_8) {
  756. rec_cfg.sample_size = tr_cfg.sample_size = 7;
  757. dma_w_size = regk_dma_set_w_size1;
  758. } else if (arg & WORD_SIZE_12)
  759. rec_cfg.sample_size = tr_cfg.sample_size = 11;
  760. else if (arg & WORD_SIZE_16)
  761. rec_cfg.sample_size = tr_cfg.sample_size = 15;
  762. else if (arg & WORD_SIZE_24)
  763. rec_cfg.sample_size = tr_cfg.sample_size = 23;
  764. else if (arg & WORD_SIZE_32)
  765. rec_cfg.sample_size = tr_cfg.sample_size = 31;
  766. if (arg & BIT_ORDER_MSB)
  767. rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
  768. else if (arg & BIT_ORDER_LSB)
  769. rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_lsbfirst;
  770. if (arg & FLOW_CONTROL_ENABLE) {
  771. frm_cfg.status_pin_use = regk_sser_frm;
  772. rec_cfg.fifo_thr = regk_sser_thr16;
  773. } else if (arg & FLOW_CONTROL_DISABLE) {
  774. frm_cfg.status_pin_use = regk_sser_gio0;
  775. rec_cfg.fifo_thr = regk_sser_inf;
  776. }
  777. if (arg & CLOCK_NOT_GATED)
  778. gen_cfg.gate_clk = regk_sser_no;
  779. else if (arg & CLOCK_GATED)
  780. gen_cfg.gate_clk = regk_sser_yes;
  781. break;
  782. case SSP_IPOLARITY:
  783. /* NOTE!! negedge is considered NORMAL */
  784. if (arg & CLOCK_NORMAL)
  785. rec_cfg.clk_pol = regk_sser_neg;
  786. else if (arg & CLOCK_INVERT)
  787. rec_cfg.clk_pol = regk_sser_pos;
  788. if (arg & FRAME_NORMAL)
  789. frm_cfg.level = regk_sser_pos_hi;
  790. else if (arg & FRAME_INVERT)
  791. frm_cfg.level = regk_sser_neg_lo;
  792. if (arg & STATUS_NORMAL)
  793. gen_cfg.hold_pol = regk_sser_pos;
  794. else if (arg & STATUS_INVERT)
  795. gen_cfg.hold_pol = regk_sser_neg;
  796. break;
  797. case SSP_OPOLARITY:
  798. if (arg & CLOCK_NORMAL)
  799. gen_cfg.out_clk_pol = regk_sser_pos;
  800. else if (arg & CLOCK_INVERT)
  801. gen_cfg.out_clk_pol = regk_sser_neg;
  802. if (arg & FRAME_NORMAL)
  803. frm_cfg.level = regk_sser_pos_hi;
  804. else if (arg & FRAME_INVERT)
  805. frm_cfg.level = regk_sser_neg_lo;
  806. if (arg & STATUS_NORMAL)
  807. gen_cfg.hold_pol = regk_sser_pos;
  808. else if (arg & STATUS_INVERT)
  809. gen_cfg.hold_pol = regk_sser_neg;
  810. break;
  811. case SSP_SPI:
  812. rec_cfg.fifo_thr = regk_sser_inf;
  813. rec_cfg.sh_dir = tr_cfg.sh_dir = regk_sser_msbfirst;
  814. rec_cfg.sample_size = tr_cfg.sample_size = 7;
  815. frm_cfg.frame_pin_use = regk_sser_frm;
  816. frm_cfg.type = regk_sser_level;
  817. frm_cfg.tr_delay = 1;
  818. frm_cfg.level = regk_sser_neg_lo;
  819. if (arg & SPI_SLAVE)
  820. {
  821. rec_cfg.clk_pol = regk_sser_neg;
  822. gen_cfg.clk_dir = regk_sser_in;
  823. port->input = 1;
  824. port->output = 0;
  825. }
  826. else
  827. {
  828. gen_cfg.out_clk_pol = regk_sser_pos;
  829. port->input = 0;
  830. port->output = 1;
  831. gen_cfg.clk_dir = regk_sser_out;
  832. }
  833. break;
  834. case SSP_INBUFCHUNK:
  835. break;
  836. default:
  837. return_val = -1;
  838. }
  839. if (port->started) {
  840. rec_cfg.rec_en = port->input;
  841. gen_cfg.en = (port->output | port->input);
  842. }
  843. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  844. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  845. REG_WR(sser, port->regi_sser, rw_frm_cfg, frm_cfg);
  846. REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
  847. REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
  848. if (cmd == SSP_FRAME_SYNC && (arg & (WORD_SIZE_8 | WORD_SIZE_12 |
  849. WORD_SIZE_16 | WORD_SIZE_24 | WORD_SIZE_32))) {
  850. int en = gen_cfg.en;
  851. gen_cfg.en = 0;
  852. REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
  853. /* ##### Should DMA be stoped before we change dma size? */
  854. DMA_WR_CMD(port->regi_dmain, dma_w_size);
  855. DMA_WR_CMD(port->regi_dmaout, dma_w_size);
  856. gen_cfg.en = en;
  857. REG_WR(sser, port->regi_sser, rw_cfg, gen_cfg);
  858. }
  859. spin_unlock_irq(&port->lock);
  860. return return_val;
  861. }
  862. /* NOTE: sync_serial_write does not support concurrency */
  863. static ssize_t sync_serial_write(struct file *file, const char *buf,
  864. size_t count, loff_t *ppos)
  865. {
  866. int dev = iminor(file->f_path.dentry->d_inode);
  867. DECLARE_WAITQUEUE(wait, current);
  868. struct sync_port *port;
  869. int trunc_count;
  870. unsigned long flags;
  871. int bytes_free;
  872. int out_buf_count;
  873. unsigned char *rd_ptr; /* First allocated byte in the buffer */
  874. unsigned char *wr_ptr; /* First free byte in the buffer */
  875. unsigned char *buf_stop_ptr; /* Last byte + 1 */
  876. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled) {
  877. DEBUG(printk("Invalid minor %d\n", dev));
  878. return -ENODEV;
  879. }
  880. port = &ports[dev];
  881. /* |<- OUT_BUFFER_SIZE ->|
  882. * |<- out_buf_count ->|
  883. * |<- trunc_count ->| ...->|
  884. * ______________________________________________________
  885. * | free | data | free |
  886. * |_________|___________________|________________________|
  887. * ^ rd_ptr ^ wr_ptr
  888. */
  889. DEBUGWRITE(printk(KERN_DEBUG "W d%d c %lu a: %p c: %p\n",
  890. port->port_nbr, count, port->active_tr_descr,
  891. port->catch_tr_descr));
  892. /* Read variables that may be updated by interrupts */
  893. spin_lock_irqsave(&port->lock, flags);
  894. rd_ptr = port->out_rd_ptr;
  895. out_buf_count = port->out_buf_count;
  896. spin_unlock_irqrestore(&port->lock, flags);
  897. /* Check if resources are available */
  898. if (port->tr_running &&
  899. ((port->use_dma && port->active_tr_descr == port->catch_tr_descr) ||
  900. out_buf_count >= OUT_BUFFER_SIZE)) {
  901. DEBUGWRITE(printk(KERN_DEBUG "sser%d full\n", dev));
  902. return -EAGAIN;
  903. }
  904. buf_stop_ptr = port->out_buffer + OUT_BUFFER_SIZE;
  905. /* Determine pointer to the first free byte, before copying. */
  906. wr_ptr = rd_ptr + out_buf_count;
  907. if (wr_ptr >= buf_stop_ptr)
  908. wr_ptr -= OUT_BUFFER_SIZE;
  909. /* If we wrap the ring buffer, let the user space program handle it by
  910. * truncating the data. This could be more elegant, small buffer
  911. * fragments may occur.
  912. */
  913. bytes_free = OUT_BUFFER_SIZE - out_buf_count;
  914. if (wr_ptr + bytes_free > buf_stop_ptr)
  915. bytes_free = buf_stop_ptr - wr_ptr;
  916. trunc_count = (count < bytes_free) ? count : bytes_free;
  917. if (copy_from_user(wr_ptr, buf, trunc_count))
  918. return -EFAULT;
  919. DEBUGOUTBUF(printk(KERN_DEBUG "%-4d + %-4d = %-4d %p %p %p\n",
  920. out_buf_count, trunc_count,
  921. port->out_buf_count, port->out_buffer,
  922. wr_ptr, buf_stop_ptr));
  923. /* Make sure transmitter/receiver is running */
  924. if (!port->started) {
  925. reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  926. reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  927. cfg.en = regk_sser_yes;
  928. rec_cfg.rec_en = port->input;
  929. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  930. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  931. port->started = 1;
  932. }
  933. /* Setup wait if blocking */
  934. if (!(file->f_flags & O_NONBLOCK)) {
  935. add_wait_queue(&port->out_wait_q, &wait);
  936. set_current_state(TASK_INTERRUPTIBLE);
  937. }
  938. spin_lock_irqsave(&port->lock, flags);
  939. port->out_buf_count += trunc_count;
  940. if (port->use_dma) {
  941. start_dma_out(port, wr_ptr, trunc_count);
  942. } else if (!port->tr_running) {
  943. reg_sser_rw_intr_mask intr_mask;
  944. intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
  945. /* Start sender by writing data */
  946. send_word(port);
  947. /* and enable transmitter ready IRQ */
  948. intr_mask.trdy = 1;
  949. REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
  950. }
  951. spin_unlock_irqrestore(&port->lock, flags);
  952. /* Exit if non blocking */
  953. if (file->f_flags & O_NONBLOCK) {
  954. DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu %08x\n",
  955. port->port_nbr, trunc_count,
  956. REG_RD_INT(dma, port->regi_dmaout, r_intr)));
  957. return trunc_count;
  958. }
  959. schedule();
  960. set_current_state(TASK_RUNNING);
  961. remove_wait_queue(&port->out_wait_q, &wait);
  962. if (signal_pending(current))
  963. return -EINTR;
  964. DEBUGWRITE(printk(KERN_DEBUG "w d%d c %lu\n",
  965. port->port_nbr, trunc_count));
  966. return trunc_count;
  967. }
  968. static ssize_t sync_serial_read(struct file * file, char * buf,
  969. size_t count, loff_t *ppos)
  970. {
  971. int dev = iminor(file->f_path.dentry->d_inode);
  972. int avail;
  973. sync_port *port;
  974. unsigned char* start;
  975. unsigned char* end;
  976. unsigned long flags;
  977. if (dev < 0 || dev >= NBR_PORTS || !ports[dev].enabled)
  978. {
  979. DEBUG(printk("Invalid minor %d\n", dev));
  980. return -ENODEV;
  981. }
  982. port = &ports[dev];
  983. DEBUGREAD(printk("R%d c %d ri %lu wi %lu /%lu\n", dev, count, port->readp - port->flip, port->writep - port->flip, port->in_buffer_size));
  984. if (!port->started)
  985. {
  986. reg_sser_rw_cfg cfg = REG_RD(sser, port->regi_sser, rw_cfg);
  987. reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
  988. reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  989. cfg.en = regk_sser_yes;
  990. tr_cfg.tr_en = regk_sser_yes;
  991. rec_cfg.rec_en = regk_sser_yes;
  992. REG_WR(sser, port->regi_sser, rw_cfg, cfg);
  993. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  994. REG_WR(sser, port->regi_sser, rw_rec_cfg, rec_cfg);
  995. port->started = 1;
  996. }
  997. /* Calculate number of available bytes */
  998. /* Save pointers to avoid that they are modified by interrupt */
  999. spin_lock_irqsave(&port->lock, flags);
  1000. start = (unsigned char*)port->readp; /* cast away volatile */
  1001. end = (unsigned char*)port->writep; /* cast away volatile */
  1002. spin_unlock_irqrestore(&port->lock, flags);
  1003. while ((start == end) && !port->full) /* No data */
  1004. {
  1005. DEBUGREAD(printk(KERN_DEBUG "&"));
  1006. if (file->f_flags & O_NONBLOCK)
  1007. return -EAGAIN;
  1008. interruptible_sleep_on(&port->in_wait_q);
  1009. if (signal_pending(current))
  1010. return -EINTR;
  1011. spin_lock_irqsave(&port->lock, flags);
  1012. start = (unsigned char*)port->readp; /* cast away volatile */
  1013. end = (unsigned char*)port->writep; /* cast away volatile */
  1014. spin_unlock_irqrestore(&port->lock, flags);
  1015. }
  1016. /* Lazy read, never return wrapped data. */
  1017. if (port->full)
  1018. avail = port->in_buffer_size;
  1019. else if (end > start)
  1020. avail = end - start;
  1021. else
  1022. avail = port->flip + port->in_buffer_size - start;
  1023. count = count > avail ? avail : count;
  1024. if (copy_to_user(buf, start, count))
  1025. return -EFAULT;
  1026. /* Disable interrupts while updating readp */
  1027. spin_lock_irqsave(&port->lock, flags);
  1028. port->readp += count;
  1029. if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
  1030. port->readp = port->flip;
  1031. port->full = 0;
  1032. spin_unlock_irqrestore(&port->lock, flags);
  1033. DEBUGREAD(printk("r %d\n", count));
  1034. return count;
  1035. }
  1036. static void send_word(sync_port* port)
  1037. {
  1038. reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser, rw_tr_cfg);
  1039. reg_sser_rw_tr_data tr_data = {0};
  1040. switch(tr_cfg.sample_size)
  1041. {
  1042. case 8:
  1043. port->out_buf_count--;
  1044. tr_data.data = *port->out_rd_ptr++;
  1045. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1046. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1047. port->out_rd_ptr = port->out_buffer;
  1048. break;
  1049. case 12:
  1050. {
  1051. int data = (*port->out_rd_ptr++) << 8;
  1052. data |= *port->out_rd_ptr++;
  1053. port->out_buf_count -= 2;
  1054. tr_data.data = data;
  1055. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1056. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1057. port->out_rd_ptr = port->out_buffer;
  1058. }
  1059. break;
  1060. case 16:
  1061. port->out_buf_count -= 2;
  1062. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1063. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1064. port->out_rd_ptr += 2;
  1065. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1066. port->out_rd_ptr = port->out_buffer;
  1067. break;
  1068. case 24:
  1069. port->out_buf_count -= 3;
  1070. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1071. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1072. port->out_rd_ptr += 2;
  1073. tr_data.data = *port->out_rd_ptr++;
  1074. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1075. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1076. port->out_rd_ptr = port->out_buffer;
  1077. break;
  1078. case 32:
  1079. port->out_buf_count -= 4;
  1080. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1081. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1082. port->out_rd_ptr += 2;
  1083. tr_data.data = *(unsigned short *)port->out_rd_ptr;
  1084. REG_WR(sser, port->regi_sser, rw_tr_data, tr_data);
  1085. port->out_rd_ptr += 2;
  1086. if (port->out_rd_ptr >= port->out_buffer + OUT_BUFFER_SIZE)
  1087. port->out_rd_ptr = port->out_buffer;
  1088. break;
  1089. }
  1090. }
  1091. static void start_dma_out(struct sync_port *port,
  1092. const char *data, int count)
  1093. {
  1094. port->active_tr_descr->buf = (char *) virt_to_phys((char *) data);
  1095. port->active_tr_descr->after = port->active_tr_descr->buf + count;
  1096. port->active_tr_descr->intr = 1;
  1097. port->active_tr_descr->eol = 1;
  1098. port->prev_tr_descr->eol = 0;
  1099. DEBUGTRDMA(printk(KERN_DEBUG "Inserting eolr:%p eol@:%p\n",
  1100. port->prev_tr_descr, port->active_tr_descr));
  1101. port->prev_tr_descr = port->active_tr_descr;
  1102. port->active_tr_descr = phys_to_virt((int) port->active_tr_descr->next);
  1103. if (!port->tr_running) {
  1104. reg_sser_rw_tr_cfg tr_cfg = REG_RD(sser, port->regi_sser,
  1105. rw_tr_cfg);
  1106. port->out_context.next = 0;
  1107. port->out_context.saved_data =
  1108. (dma_descr_data *)virt_to_phys(port->prev_tr_descr);
  1109. port->out_context.saved_data_buf = port->prev_tr_descr->buf;
  1110. DMA_START_CONTEXT(port->regi_dmaout,
  1111. virt_to_phys((char *)&port->out_context));
  1112. tr_cfg.tr_en = regk_sser_yes;
  1113. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  1114. DEBUGTRDMA(printk(KERN_DEBUG "dma s\n"););
  1115. } else {
  1116. DMA_CONTINUE_DATA(port->regi_dmaout);
  1117. DEBUGTRDMA(printk(KERN_DEBUG "dma c\n"););
  1118. }
  1119. port->tr_running = 1;
  1120. }
  1121. static void start_dma_in(sync_port *port)
  1122. {
  1123. int i;
  1124. char *buf;
  1125. port->writep = port->flip;
  1126. if (port->writep > port->flip + port->in_buffer_size) {
  1127. panic("Offset too large in sync serial driver\n");
  1128. return;
  1129. }
  1130. buf = (char*)virt_to_phys(port->in_buffer);
  1131. for (i = 0; i < NBR_IN_DESCR; i++) {
  1132. port->in_descr[i].buf = buf;
  1133. port->in_descr[i].after = buf + port->inbufchunk;
  1134. port->in_descr[i].intr = 1;
  1135. port->in_descr[i].next = (dma_descr_data*)virt_to_phys(&port->in_descr[i+1]);
  1136. port->in_descr[i].buf = buf;
  1137. buf += port->inbufchunk;
  1138. }
  1139. /* Link the last descriptor to the first */
  1140. port->in_descr[i-1].next = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
  1141. port->in_descr[i-1].eol = regk_sser_yes;
  1142. port->next_rx_desc = &port->in_descr[0];
  1143. port->prev_rx_desc = &port->in_descr[NBR_IN_DESCR - 1];
  1144. port->in_context.saved_data = (dma_descr_data*)virt_to_phys(&port->in_descr[0]);
  1145. port->in_context.saved_data_buf = port->in_descr[0].buf;
  1146. DMA_START_CONTEXT(port->regi_dmain, virt_to_phys(&port->in_context));
  1147. }
  1148. #ifdef SYNC_SER_DMA
  1149. static irqreturn_t tr_interrupt(int irq, void *dev_id)
  1150. {
  1151. reg_dma_r_masked_intr masked;
  1152. reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
  1153. reg_dma_rw_stat stat;
  1154. int i;
  1155. int found = 0;
  1156. int stop_sser = 0;
  1157. for (i = 0; i < NBR_PORTS; i++) {
  1158. sync_port *port = &ports[i];
  1159. if (!port->enabled || !port->use_dma)
  1160. continue;
  1161. /* IRQ active for the port? */
  1162. masked = REG_RD(dma, port->regi_dmaout, r_masked_intr);
  1163. if (!masked.data)
  1164. continue;
  1165. found = 1;
  1166. /* Check if we should stop the DMA transfer */
  1167. stat = REG_RD(dma, port->regi_dmaout, rw_stat);
  1168. if (stat.list_state == regk_dma_data_at_eol)
  1169. stop_sser = 1;
  1170. /* Clear IRQ */
  1171. REG_WR(dma, port->regi_dmaout, rw_ack_intr, ack_intr);
  1172. if (!stop_sser) {
  1173. /* The DMA has completed a descriptor, EOL was not
  1174. * encountered, so step relevant descriptor and
  1175. * datapointers forward. */
  1176. int sent;
  1177. sent = port->catch_tr_descr->after -
  1178. port->catch_tr_descr->buf;
  1179. DEBUGTXINT(printk(KERN_DEBUG "%-4d - %-4d = %-4d\t"
  1180. "in descr %p (ac: %p)\n",
  1181. port->out_buf_count, sent,
  1182. port->out_buf_count - sent,
  1183. port->catch_tr_descr,
  1184. port->active_tr_descr););
  1185. port->out_buf_count -= sent;
  1186. port->catch_tr_descr =
  1187. phys_to_virt((int) port->catch_tr_descr->next);
  1188. port->out_rd_ptr =
  1189. phys_to_virt((int) port->catch_tr_descr->buf);
  1190. } else {
  1191. int i, sent;
  1192. /* EOL handler.
  1193. * Note that if an EOL was encountered during the irq
  1194. * locked section of sync_ser_write the DMA will be
  1195. * restarted and the eol flag will be cleared.
  1196. * The remaining descriptors will be traversed by
  1197. * the descriptor interrupts as usual.
  1198. */
  1199. i = 0;
  1200. while (!port->catch_tr_descr->eol) {
  1201. sent = port->catch_tr_descr->after -
  1202. port->catch_tr_descr->buf;
  1203. DEBUGOUTBUF(printk(KERN_DEBUG
  1204. "traversing descr %p -%d (%d)\n",
  1205. port->catch_tr_descr,
  1206. sent,
  1207. port->out_buf_count));
  1208. port->out_buf_count -= sent;
  1209. port->catch_tr_descr = phys_to_virt(
  1210. (int)port->catch_tr_descr->next);
  1211. i++;
  1212. if (i >= NBR_OUT_DESCR) {
  1213. /* TODO: Reset and recover */
  1214. panic("sync_serial: missing eol");
  1215. }
  1216. }
  1217. sent = port->catch_tr_descr->after -
  1218. port->catch_tr_descr->buf;
  1219. DEBUGOUTBUF(printk(KERN_DEBUG
  1220. "eol at descr %p -%d (%d)\n",
  1221. port->catch_tr_descr,
  1222. sent,
  1223. port->out_buf_count));
  1224. port->out_buf_count -= sent;
  1225. /* Update read pointer to first free byte, we
  1226. * may already be writing data there. */
  1227. port->out_rd_ptr =
  1228. phys_to_virt((int) port->catch_tr_descr->after);
  1229. if (port->out_rd_ptr > port->out_buffer +
  1230. OUT_BUFFER_SIZE)
  1231. port->out_rd_ptr = port->out_buffer;
  1232. reg_sser_rw_tr_cfg tr_cfg =
  1233. REG_RD(sser, port->regi_sser, rw_tr_cfg);
  1234. DEBUGTXINT(printk(KERN_DEBUG
  1235. "tr_int DMA stop %d, set catch @ %p\n",
  1236. port->out_buf_count,
  1237. port->active_tr_descr));
  1238. if (port->out_buf_count != 0)
  1239. printk(KERN_CRIT "sync_ser: buffer not "
  1240. "empty after eol.\n");
  1241. port->catch_tr_descr = port->active_tr_descr;
  1242. port->tr_running = 0;
  1243. tr_cfg.tr_en = regk_sser_no;
  1244. REG_WR(sser, port->regi_sser, rw_tr_cfg, tr_cfg);
  1245. }
  1246. /* wake up the waiting process */
  1247. wake_up_interruptible(&port->out_wait_q);
  1248. }
  1249. return IRQ_RETVAL(found);
  1250. } /* tr_interrupt */
  1251. static irqreturn_t rx_interrupt(int irq, void *dev_id)
  1252. {
  1253. reg_dma_r_masked_intr masked;
  1254. reg_dma_rw_ack_intr ack_intr = {.data = regk_dma_yes};
  1255. int i;
  1256. int found = 0;
  1257. for (i = 0; i < NBR_PORTS; i++)
  1258. {
  1259. sync_port *port = &ports[i];
  1260. if (!port->enabled || !port->use_dma )
  1261. continue;
  1262. masked = REG_RD(dma, port->regi_dmain, r_masked_intr);
  1263. if (masked.data) /* Descriptor interrupt */
  1264. {
  1265. found = 1;
  1266. while (REG_RD(dma, port->regi_dmain, rw_data) !=
  1267. virt_to_phys(port->next_rx_desc)) {
  1268. DEBUGRXINT(printk(KERN_DEBUG "!"));
  1269. if (port->writep + port->inbufchunk > port->flip + port->in_buffer_size) {
  1270. int first_size = port->flip + port->in_buffer_size - port->writep;
  1271. memcpy((char*)port->writep, phys_to_virt((unsigned)port->next_rx_desc->buf), first_size);
  1272. memcpy(port->flip, phys_to_virt((unsigned)port->next_rx_desc->buf+first_size), port->inbufchunk - first_size);
  1273. port->writep = port->flip + port->inbufchunk - first_size;
  1274. } else {
  1275. memcpy((char*)port->writep,
  1276. phys_to_virt((unsigned)port->next_rx_desc->buf),
  1277. port->inbufchunk);
  1278. port->writep += port->inbufchunk;
  1279. if (port->writep >= port->flip + port->in_buffer_size)
  1280. port->writep = port->flip;
  1281. }
  1282. if (port->writep == port->readp)
  1283. {
  1284. port->full = 1;
  1285. }
  1286. port->next_rx_desc->eol = 1;
  1287. port->prev_rx_desc->eol = 0;
  1288. /* Cache bug workaround */
  1289. flush_dma_descr(port->prev_rx_desc, 0);
  1290. port->prev_rx_desc = port->next_rx_desc;
  1291. port->next_rx_desc = phys_to_virt((unsigned)port->next_rx_desc->next);
  1292. /* Cache bug workaround */
  1293. flush_dma_descr(port->prev_rx_desc, 1);
  1294. /* wake up the waiting process */
  1295. wake_up_interruptible(&port->in_wait_q);
  1296. DMA_CONTINUE(port->regi_dmain);
  1297. REG_WR(dma, port->regi_dmain, rw_ack_intr, ack_intr);
  1298. }
  1299. }
  1300. }
  1301. return IRQ_RETVAL(found);
  1302. } /* rx_interrupt */
  1303. #endif /* SYNC_SER_DMA */
  1304. #ifdef SYNC_SER_MANUAL
  1305. static irqreturn_t manual_interrupt(int irq, void *dev_id)
  1306. {
  1307. int i;
  1308. int found = 0;
  1309. reg_sser_r_masked_intr masked;
  1310. for (i = 0; i < NBR_PORTS; i++)
  1311. {
  1312. sync_port *port = &ports[i];
  1313. if (!port->enabled || port->use_dma)
  1314. {
  1315. continue;
  1316. }
  1317. masked = REG_RD(sser, port->regi_sser, r_masked_intr);
  1318. if (masked.rdav) /* Data received? */
  1319. {
  1320. reg_sser_rw_rec_cfg rec_cfg = REG_RD(sser, port->regi_sser, rw_rec_cfg);
  1321. reg_sser_r_rec_data data = REG_RD(sser, port->regi_sser, r_rec_data);
  1322. found = 1;
  1323. /* Read data */
  1324. switch(rec_cfg.sample_size)
  1325. {
  1326. case 8:
  1327. *port->writep++ = data.data & 0xff;
  1328. break;
  1329. case 12:
  1330. *port->writep = (data.data & 0x0ff0) >> 4;
  1331. *(port->writep + 1) = data.data & 0x0f;
  1332. port->writep+=2;
  1333. break;
  1334. case 16:
  1335. *(unsigned short*)port->writep = data.data;
  1336. port->writep+=2;
  1337. break;
  1338. case 24:
  1339. *(unsigned int*)port->writep = data.data;
  1340. port->writep+=3;
  1341. break;
  1342. case 32:
  1343. *(unsigned int*)port->writep = data.data;
  1344. port->writep+=4;
  1345. break;
  1346. }
  1347. if (port->writep >= port->flip + port->in_buffer_size) /* Wrap? */
  1348. port->writep = port->flip;
  1349. if (port->writep == port->readp) {
  1350. /* receive buffer overrun, discard oldest data
  1351. */
  1352. port->readp++;
  1353. if (port->readp >= port->flip + port->in_buffer_size) /* Wrap? */
  1354. port->readp = port->flip;
  1355. }
  1356. if (sync_data_avail(port) >= port->inbufchunk)
  1357. wake_up_interruptible(&port->in_wait_q); /* Wake up application */
  1358. }
  1359. if (masked.trdy) /* Transmitter ready? */
  1360. {
  1361. found = 1;
  1362. if (port->out_buf_count > 0) /* More data to send */
  1363. send_word(port);
  1364. else /* transmission finished */
  1365. {
  1366. reg_sser_rw_intr_mask intr_mask;
  1367. intr_mask = REG_RD(sser, port->regi_sser, rw_intr_mask);
  1368. intr_mask.trdy = 0;
  1369. REG_WR(sser, port->regi_sser, rw_intr_mask, intr_mask);
  1370. wake_up_interruptible(&port->out_wait_q); /* Wake up application */
  1371. }
  1372. }
  1373. }
  1374. return IRQ_RETVAL(found);
  1375. }
  1376. #endif
  1377. module_init(etrax_sync_serial_init);