pxaficp_ir.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905
  1. /*
  2. * linux/drivers/net/irda/pxaficp_ir.c
  3. *
  4. * Based on sa1100_ir.c by Russell King
  5. *
  6. * Changes copyright (C) 2003-2005 MontaVista Software, Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor
  13. *
  14. */
  15. #include <linux/module.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/clk.h>
  19. #include <net/irda/irda.h>
  20. #include <net/irda/irmod.h>
  21. #include <net/irda/wrapper.h>
  22. #include <net/irda/irda_device.h>
  23. #include <asm/dma.h>
  24. #include <mach/irda.h>
  25. #include <mach/pxa-regs.h>
  26. #define IrSR_RXPL_NEG_IS_ZERO (1<<4)
  27. #define IrSR_RXPL_POS_IS_ZERO 0x0
  28. #define IrSR_TXPL_NEG_IS_ZERO (1<<3)
  29. #define IrSR_TXPL_POS_IS_ZERO 0x0
  30. #define IrSR_XMODE_PULSE_1_6 (1<<2)
  31. #define IrSR_XMODE_PULSE_3_16 0x0
  32. #define IrSR_RCVEIR_IR_MODE (1<<1)
  33. #define IrSR_RCVEIR_UART_MODE 0x0
  34. #define IrSR_XMITIR_IR_MODE (1<<0)
  35. #define IrSR_XMITIR_UART_MODE 0x0
  36. #define IrSR_IR_RECEIVE_ON (\
  37. IrSR_RXPL_NEG_IS_ZERO | \
  38. IrSR_TXPL_POS_IS_ZERO | \
  39. IrSR_XMODE_PULSE_3_16 | \
  40. IrSR_RCVEIR_IR_MODE | \
  41. IrSR_XMITIR_UART_MODE)
  42. #define IrSR_IR_TRANSMIT_ON (\
  43. IrSR_RXPL_NEG_IS_ZERO | \
  44. IrSR_TXPL_POS_IS_ZERO | \
  45. IrSR_XMODE_PULSE_3_16 | \
  46. IrSR_RCVEIR_UART_MODE | \
  47. IrSR_XMITIR_IR_MODE)
  48. struct pxa_irda {
  49. int speed;
  50. int newspeed;
  51. unsigned long last_oscr;
  52. unsigned char *dma_rx_buff;
  53. unsigned char *dma_tx_buff;
  54. dma_addr_t dma_rx_buff_phy;
  55. dma_addr_t dma_tx_buff_phy;
  56. unsigned int dma_tx_buff_len;
  57. int txdma;
  58. int rxdma;
  59. struct net_device_stats stats;
  60. struct irlap_cb *irlap;
  61. struct qos_info qos;
  62. iobuff_t tx_buff;
  63. iobuff_t rx_buff;
  64. struct device *dev;
  65. struct pxaficp_platform_data *pdata;
  66. struct clk *fir_clk;
  67. struct clk *sir_clk;
  68. struct clk *cur_clk;
  69. };
  70. static inline void pxa_irda_disable_clk(struct pxa_irda *si)
  71. {
  72. if (si->cur_clk)
  73. clk_disable(si->cur_clk);
  74. si->cur_clk = NULL;
  75. }
  76. static inline void pxa_irda_enable_firclk(struct pxa_irda *si)
  77. {
  78. si->cur_clk = si->fir_clk;
  79. clk_enable(si->fir_clk);
  80. }
  81. static inline void pxa_irda_enable_sirclk(struct pxa_irda *si)
  82. {
  83. si->cur_clk = si->sir_clk;
  84. clk_enable(si->sir_clk);
  85. }
  86. #define IS_FIR(si) ((si)->speed >= 4000000)
  87. #define IRDA_FRAME_SIZE_LIMIT 2047
  88. inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si)
  89. {
  90. DCSR(si->rxdma) = DCSR_NODESC;
  91. DSADR(si->rxdma) = __PREG(ICDR);
  92. DTADR(si->rxdma) = si->dma_rx_buff_phy;
  93. DCMD(si->rxdma) = DCMD_INCTRGADDR | DCMD_FLOWSRC | DCMD_WIDTH1 | DCMD_BURST32 | IRDA_FRAME_SIZE_LIMIT;
  94. DCSR(si->rxdma) |= DCSR_RUN;
  95. }
  96. inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
  97. {
  98. DCSR(si->txdma) = DCSR_NODESC;
  99. DSADR(si->txdma) = si->dma_tx_buff_phy;
  100. DTADR(si->txdma) = __PREG(ICDR);
  101. DCMD(si->txdma) = DCMD_INCSRCADDR | DCMD_FLOWTRG | DCMD_ENDIRQEN | DCMD_WIDTH1 | DCMD_BURST32 | si->dma_tx_buff_len;
  102. DCSR(si->txdma) |= DCSR_RUN;
  103. }
  104. /*
  105. * Set the IrDA communications speed.
  106. */
  107. static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
  108. {
  109. unsigned long flags;
  110. unsigned int divisor;
  111. switch (speed) {
  112. case 9600: case 19200: case 38400:
  113. case 57600: case 115200:
  114. /* refer to PXA250/210 Developer's Manual 10-7 */
  115. /* BaudRate = 14.7456 MHz / (16*Divisor) */
  116. divisor = 14745600 / (16 * speed);
  117. local_irq_save(flags);
  118. if (IS_FIR(si)) {
  119. /* stop RX DMA */
  120. DCSR(si->rxdma) &= ~DCSR_RUN;
  121. /* disable FICP */
  122. ICCR0 = 0;
  123. pxa_irda_disable_clk(si);
  124. /* set board transceiver to SIR mode */
  125. si->pdata->transceiver_mode(si->dev, IR_SIRMODE);
  126. /* enable the STUART clock */
  127. pxa_irda_enable_sirclk(si);
  128. }
  129. /* disable STUART first */
  130. STIER = 0;
  131. /* access DLL & DLH */
  132. STLCR |= LCR_DLAB;
  133. STDLL = divisor & 0xff;
  134. STDLH = divisor >> 8;
  135. STLCR &= ~LCR_DLAB;
  136. si->speed = speed;
  137. STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6;
  138. STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE;
  139. local_irq_restore(flags);
  140. break;
  141. case 4000000:
  142. local_irq_save(flags);
  143. /* disable STUART */
  144. STIER = 0;
  145. STISR = 0;
  146. pxa_irda_disable_clk(si);
  147. /* disable FICP first */
  148. ICCR0 = 0;
  149. /* set board transceiver to FIR mode */
  150. si->pdata->transceiver_mode(si->dev, IR_FIRMODE);
  151. /* enable the FICP clock */
  152. pxa_irda_enable_firclk(si);
  153. si->speed = speed;
  154. pxa_irda_fir_dma_rx_start(si);
  155. ICCR0 = ICCR0_ITR | ICCR0_RXE;
  156. local_irq_restore(flags);
  157. break;
  158. default:
  159. return -EINVAL;
  160. }
  161. return 0;
  162. }
  163. /* SIR interrupt service routine. */
  164. static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
  165. {
  166. struct net_device *dev = dev_id;
  167. struct pxa_irda *si = netdev_priv(dev);
  168. int iir, lsr, data;
  169. iir = STIIR;
  170. switch (iir & 0x0F) {
  171. case 0x06: /* Receiver Line Status */
  172. lsr = STLSR;
  173. while (lsr & LSR_FIFOE) {
  174. data = STRBR;
  175. if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
  176. printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
  177. si->stats.rx_errors++;
  178. if (lsr & LSR_FE)
  179. si->stats.rx_frame_errors++;
  180. if (lsr & LSR_OE)
  181. si->stats.rx_fifo_errors++;
  182. } else {
  183. si->stats.rx_bytes++;
  184. async_unwrap_char(dev, &si->stats, &si->rx_buff, data);
  185. }
  186. lsr = STLSR;
  187. }
  188. si->last_oscr = OSCR;
  189. break;
  190. case 0x04: /* Received Data Available */
  191. /* forth through */
  192. case 0x0C: /* Character Timeout Indication */
  193. do {
  194. si->stats.rx_bytes++;
  195. async_unwrap_char(dev, &si->stats, &si->rx_buff, STRBR);
  196. } while (STLSR & LSR_DR);
  197. si->last_oscr = OSCR;
  198. break;
  199. case 0x02: /* Transmit FIFO Data Request */
  200. while ((si->tx_buff.len) && (STLSR & LSR_TDRQ)) {
  201. STTHR = *si->tx_buff.data++;
  202. si->tx_buff.len -= 1;
  203. }
  204. if (si->tx_buff.len == 0) {
  205. si->stats.tx_packets++;
  206. si->stats.tx_bytes += si->tx_buff.data -
  207. si->tx_buff.head;
  208. /* We need to ensure that the transmitter has finished. */
  209. while ((STLSR & LSR_TEMT) == 0)
  210. cpu_relax();
  211. si->last_oscr = OSCR;
  212. /*
  213. * Ok, we've finished transmitting. Now enable
  214. * the receiver. Sometimes we get a receive IRQ
  215. * immediately after a transmit...
  216. */
  217. if (si->newspeed) {
  218. pxa_irda_set_speed(si, si->newspeed);
  219. si->newspeed = 0;
  220. } else {
  221. /* enable IR Receiver, disable IR Transmitter */
  222. STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6;
  223. /* enable STUART and receive interrupts */
  224. STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE;
  225. }
  226. /* I'm hungry! */
  227. netif_wake_queue(dev);
  228. }
  229. break;
  230. }
  231. return IRQ_HANDLED;
  232. }
  233. /* FIR Receive DMA interrupt handler */
  234. static void pxa_irda_fir_dma_rx_irq(int channel, void *data)
  235. {
  236. int dcsr = DCSR(channel);
  237. DCSR(channel) = dcsr & ~DCSR_RUN;
  238. printk(KERN_DEBUG "pxa_ir: fir rx dma bus error %#x\n", dcsr);
  239. }
  240. /* FIR Transmit DMA interrupt handler */
  241. static void pxa_irda_fir_dma_tx_irq(int channel, void *data)
  242. {
  243. struct net_device *dev = data;
  244. struct pxa_irda *si = netdev_priv(dev);
  245. int dcsr;
  246. dcsr = DCSR(channel);
  247. DCSR(channel) = dcsr & ~DCSR_RUN;
  248. if (dcsr & DCSR_ENDINTR) {
  249. si->stats.tx_packets++;
  250. si->stats.tx_bytes += si->dma_tx_buff_len;
  251. } else {
  252. si->stats.tx_errors++;
  253. }
  254. while (ICSR1 & ICSR1_TBY)
  255. cpu_relax();
  256. si->last_oscr = OSCR;
  257. /*
  258. * HACK: It looks like the TBY bit is dropped too soon.
  259. * Without this delay things break.
  260. */
  261. udelay(120);
  262. if (si->newspeed) {
  263. pxa_irda_set_speed(si, si->newspeed);
  264. si->newspeed = 0;
  265. } else {
  266. int i = 64;
  267. ICCR0 = 0;
  268. pxa_irda_fir_dma_rx_start(si);
  269. while ((ICSR1 & ICSR1_RNE) && i--)
  270. (void)ICDR;
  271. ICCR0 = ICCR0_ITR | ICCR0_RXE;
  272. if (i < 0)
  273. printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
  274. }
  275. netif_wake_queue(dev);
  276. }
  277. /* EIF(Error in FIFO/End in Frame) handler for FIR */
  278. static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0)
  279. {
  280. unsigned int len, stat, data;
  281. /* Get the current data position. */
  282. len = DTADR(si->rxdma) - si->dma_rx_buff_phy;
  283. do {
  284. /* Read Status, and then Data. */
  285. stat = ICSR1;
  286. rmb();
  287. data = ICDR;
  288. if (stat & (ICSR1_CRE | ICSR1_ROR)) {
  289. si->stats.rx_errors++;
  290. if (stat & ICSR1_CRE) {
  291. printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
  292. si->stats.rx_crc_errors++;
  293. }
  294. if (stat & ICSR1_ROR) {
  295. printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
  296. si->stats.rx_over_errors++;
  297. }
  298. } else {
  299. si->dma_rx_buff[len++] = data;
  300. }
  301. /* If we hit the end of frame, there's no point in continuing. */
  302. if (stat & ICSR1_EOF)
  303. break;
  304. } while (ICSR0 & ICSR0_EIF);
  305. if (stat & ICSR1_EOF) {
  306. /* end of frame. */
  307. struct sk_buff *skb;
  308. if (icsr0 & ICSR0_FRE) {
  309. printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
  310. si->stats.rx_dropped++;
  311. return;
  312. }
  313. skb = alloc_skb(len+1,GFP_ATOMIC);
  314. if (!skb) {
  315. printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
  316. si->stats.rx_dropped++;
  317. return;
  318. }
  319. /* Align IP header to 20 bytes */
  320. skb_reserve(skb, 1);
  321. skb_copy_to_linear_data(skb, si->dma_rx_buff, len);
  322. skb_put(skb, len);
  323. /* Feed it to IrLAP */
  324. skb->dev = dev;
  325. skb_reset_mac_header(skb);
  326. skb->protocol = htons(ETH_P_IRDA);
  327. netif_rx(skb);
  328. si->stats.rx_packets++;
  329. si->stats.rx_bytes += len;
  330. }
  331. }
  332. /* FIR interrupt handler */
  333. static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id)
  334. {
  335. struct net_device *dev = dev_id;
  336. struct pxa_irda *si = netdev_priv(dev);
  337. int icsr0, i = 64;
  338. /* stop RX DMA */
  339. DCSR(si->rxdma) &= ~DCSR_RUN;
  340. si->last_oscr = OSCR;
  341. icsr0 = ICSR0;
  342. if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
  343. if (icsr0 & ICSR0_FRE) {
  344. printk(KERN_DEBUG "pxa_ir: fir receive frame error\n");
  345. si->stats.rx_frame_errors++;
  346. } else {
  347. printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
  348. si->stats.rx_errors++;
  349. }
  350. ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB);
  351. }
  352. if (icsr0 & ICSR0_EIF) {
  353. /* An error in FIFO occured, or there is a end of frame */
  354. pxa_irda_fir_irq_eif(si, dev, icsr0);
  355. }
  356. ICCR0 = 0;
  357. pxa_irda_fir_dma_rx_start(si);
  358. while ((ICSR1 & ICSR1_RNE) && i--)
  359. (void)ICDR;
  360. ICCR0 = ICCR0_ITR | ICCR0_RXE;
  361. if (i < 0)
  362. printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
  363. return IRQ_HANDLED;
  364. }
  365. /* hard_xmit interface of irda device */
  366. static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
  367. {
  368. struct pxa_irda *si = netdev_priv(dev);
  369. int speed = irda_get_next_speed(skb);
  370. /*
  371. * Does this packet contain a request to change the interface
  372. * speed? If so, remember it until we complete the transmission
  373. * of this frame.
  374. */
  375. if (speed != si->speed && speed != -1)
  376. si->newspeed = speed;
  377. /*
  378. * If this is an empty frame, we can bypass a lot.
  379. */
  380. if (skb->len == 0) {
  381. if (si->newspeed) {
  382. si->newspeed = 0;
  383. pxa_irda_set_speed(si, speed);
  384. }
  385. dev_kfree_skb(skb);
  386. return 0;
  387. }
  388. netif_stop_queue(dev);
  389. if (!IS_FIR(si)) {
  390. si->tx_buff.data = si->tx_buff.head;
  391. si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize);
  392. /* Disable STUART interrupts and switch to transmit mode. */
  393. STIER = 0;
  394. STISR = IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6;
  395. /* enable STUART and transmit interrupts */
  396. STIER = IER_UUE | IER_TIE;
  397. } else {
  398. unsigned long mtt = irda_get_mtt(skb);
  399. si->dma_tx_buff_len = skb->len;
  400. skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len);
  401. if (mtt)
  402. while ((unsigned)(OSCR - si->last_oscr)/4 < mtt)
  403. cpu_relax();
  404. /* stop RX DMA, disable FICP */
  405. DCSR(si->rxdma) &= ~DCSR_RUN;
  406. ICCR0 = 0;
  407. pxa_irda_fir_dma_tx_start(si);
  408. ICCR0 = ICCR0_ITR | ICCR0_TXE;
  409. }
  410. dev_kfree_skb(skb);
  411. dev->trans_start = jiffies;
  412. return 0;
  413. }
  414. static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
  415. {
  416. struct if_irda_req *rq = (struct if_irda_req *)ifreq;
  417. struct pxa_irda *si = netdev_priv(dev);
  418. int ret;
  419. switch (cmd) {
  420. case SIOCSBANDWIDTH:
  421. ret = -EPERM;
  422. if (capable(CAP_NET_ADMIN)) {
  423. /*
  424. * We are unable to set the speed if the
  425. * device is not running.
  426. */
  427. if (netif_running(dev)) {
  428. ret = pxa_irda_set_speed(si,
  429. rq->ifr_baudrate);
  430. } else {
  431. printk(KERN_INFO "pxa_ir: SIOCSBANDWIDTH: !netif_running\n");
  432. ret = 0;
  433. }
  434. }
  435. break;
  436. case SIOCSMEDIABUSY:
  437. ret = -EPERM;
  438. if (capable(CAP_NET_ADMIN)) {
  439. irda_device_set_media_busy(dev, TRUE);
  440. ret = 0;
  441. }
  442. break;
  443. case SIOCGRECEIVING:
  444. ret = 0;
  445. rq->ifr_receiving = IS_FIR(si) ? 0
  446. : si->rx_buff.state != OUTSIDE_FRAME;
  447. break;
  448. default:
  449. ret = -EOPNOTSUPP;
  450. break;
  451. }
  452. return ret;
  453. }
  454. static struct net_device_stats *pxa_irda_stats(struct net_device *dev)
  455. {
  456. struct pxa_irda *si = netdev_priv(dev);
  457. return &si->stats;
  458. }
  459. static void pxa_irda_startup(struct pxa_irda *si)
  460. {
  461. /* Disable STUART interrupts */
  462. STIER = 0;
  463. /* enable STUART interrupt to the processor */
  464. STMCR = MCR_OUT2;
  465. /* configure SIR frame format: StartBit - Data 7 ... Data 0 - Stop Bit */
  466. STLCR = LCR_WLS0 | LCR_WLS1;
  467. /* enable FIFO, we use FIFO to improve performance */
  468. STFCR = FCR_TRFIFOE | FCR_ITL_32;
  469. /* disable FICP */
  470. ICCR0 = 0;
  471. /* configure FICP ICCR2 */
  472. ICCR2 = ICCR2_TXP | ICCR2_TRIG_32;
  473. /* configure DMAC */
  474. DRCMR(17) = si->rxdma | DRCMR_MAPVLD;
  475. DRCMR(18) = si->txdma | DRCMR_MAPVLD;
  476. /* force SIR reinitialization */
  477. si->speed = 4000000;
  478. pxa_irda_set_speed(si, 9600);
  479. printk(KERN_DEBUG "pxa_ir: irda startup\n");
  480. }
  481. static void pxa_irda_shutdown(struct pxa_irda *si)
  482. {
  483. unsigned long flags;
  484. local_irq_save(flags);
  485. /* disable STUART and interrupt */
  486. STIER = 0;
  487. /* disable STUART SIR mode */
  488. STISR = 0;
  489. /* disable DMA */
  490. DCSR(si->txdma) &= ~DCSR_RUN;
  491. DCSR(si->rxdma) &= ~DCSR_RUN;
  492. /* disable FICP */
  493. ICCR0 = 0;
  494. /* disable the STUART or FICP clocks */
  495. pxa_irda_disable_clk(si);
  496. DRCMR(17) = 0;
  497. DRCMR(18) = 0;
  498. local_irq_restore(flags);
  499. /* power off board transceiver */
  500. si->pdata->transceiver_mode(si->dev, IR_OFF);
  501. printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
  502. }
  503. static int pxa_irda_start(struct net_device *dev)
  504. {
  505. struct pxa_irda *si = netdev_priv(dev);
  506. int err;
  507. si->speed = 9600;
  508. err = request_irq(IRQ_STUART, pxa_irda_sir_irq, 0, dev->name, dev);
  509. if (err)
  510. goto err_irq1;
  511. err = request_irq(IRQ_ICP, pxa_irda_fir_irq, 0, dev->name, dev);
  512. if (err)
  513. goto err_irq2;
  514. /*
  515. * The interrupt must remain disabled for now.
  516. */
  517. disable_irq(IRQ_STUART);
  518. disable_irq(IRQ_ICP);
  519. err = -EBUSY;
  520. si->rxdma = pxa_request_dma("FICP_RX",DMA_PRIO_LOW, pxa_irda_fir_dma_rx_irq, dev);
  521. if (si->rxdma < 0)
  522. goto err_rx_dma;
  523. si->txdma = pxa_request_dma("FICP_TX",DMA_PRIO_LOW, pxa_irda_fir_dma_tx_irq, dev);
  524. if (si->txdma < 0)
  525. goto err_tx_dma;
  526. err = -ENOMEM;
  527. si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
  528. &si->dma_rx_buff_phy, GFP_KERNEL );
  529. if (!si->dma_rx_buff)
  530. goto err_dma_rx_buff;
  531. si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
  532. &si->dma_tx_buff_phy, GFP_KERNEL );
  533. if (!si->dma_tx_buff)
  534. goto err_dma_tx_buff;
  535. /* Setup the serial port for the initial speed. */
  536. pxa_irda_startup(si);
  537. /*
  538. * Open a new IrLAP layer instance.
  539. */
  540. si->irlap = irlap_open(dev, &si->qos, "pxa");
  541. err = -ENOMEM;
  542. if (!si->irlap)
  543. goto err_irlap;
  544. /*
  545. * Now enable the interrupt and start the queue
  546. */
  547. enable_irq(IRQ_STUART);
  548. enable_irq(IRQ_ICP);
  549. netif_start_queue(dev);
  550. printk(KERN_DEBUG "pxa_ir: irda driver opened\n");
  551. return 0;
  552. err_irlap:
  553. pxa_irda_shutdown(si);
  554. dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
  555. err_dma_tx_buff:
  556. dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
  557. err_dma_rx_buff:
  558. pxa_free_dma(si->txdma);
  559. err_tx_dma:
  560. pxa_free_dma(si->rxdma);
  561. err_rx_dma:
  562. free_irq(IRQ_ICP, dev);
  563. err_irq2:
  564. free_irq(IRQ_STUART, dev);
  565. err_irq1:
  566. return err;
  567. }
  568. static int pxa_irda_stop(struct net_device *dev)
  569. {
  570. struct pxa_irda *si = netdev_priv(dev);
  571. netif_stop_queue(dev);
  572. pxa_irda_shutdown(si);
  573. /* Stop IrLAP */
  574. if (si->irlap) {
  575. irlap_close(si->irlap);
  576. si->irlap = NULL;
  577. }
  578. free_irq(IRQ_STUART, dev);
  579. free_irq(IRQ_ICP, dev);
  580. pxa_free_dma(si->rxdma);
  581. pxa_free_dma(si->txdma);
  582. if (si->dma_rx_buff)
  583. dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
  584. if (si->dma_tx_buff)
  585. dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
  586. printk(KERN_DEBUG "pxa_ir: irda driver closed\n");
  587. return 0;
  588. }
  589. static int pxa_irda_suspend(struct platform_device *_dev, pm_message_t state)
  590. {
  591. struct net_device *dev = platform_get_drvdata(_dev);
  592. struct pxa_irda *si;
  593. if (dev && netif_running(dev)) {
  594. si = netdev_priv(dev);
  595. netif_device_detach(dev);
  596. pxa_irda_shutdown(si);
  597. }
  598. return 0;
  599. }
  600. static int pxa_irda_resume(struct platform_device *_dev)
  601. {
  602. struct net_device *dev = platform_get_drvdata(_dev);
  603. struct pxa_irda *si;
  604. if (dev && netif_running(dev)) {
  605. si = netdev_priv(dev);
  606. pxa_irda_startup(si);
  607. netif_device_attach(dev);
  608. netif_wake_queue(dev);
  609. }
  610. return 0;
  611. }
  612. static int pxa_irda_init_iobuf(iobuff_t *io, int size)
  613. {
  614. io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
  615. if (io->head != NULL) {
  616. io->truesize = size;
  617. io->in_frame = FALSE;
  618. io->state = OUTSIDE_FRAME;
  619. io->data = io->head;
  620. }
  621. return io->head ? 0 : -ENOMEM;
  622. }
  623. static int pxa_irda_probe(struct platform_device *pdev)
  624. {
  625. struct net_device *dev;
  626. struct pxa_irda *si;
  627. unsigned int baudrate_mask;
  628. int err;
  629. if (!pdev->dev.platform_data)
  630. return -ENODEV;
  631. err = request_mem_region(__PREG(STUART), 0x24, "IrDA") ? 0 : -EBUSY;
  632. if (err)
  633. goto err_mem_1;
  634. err = request_mem_region(__PREG(FICP), 0x1c, "IrDA") ? 0 : -EBUSY;
  635. if (err)
  636. goto err_mem_2;
  637. dev = alloc_irdadev(sizeof(struct pxa_irda));
  638. if (!dev)
  639. goto err_mem_3;
  640. si = netdev_priv(dev);
  641. si->dev = &pdev->dev;
  642. si->pdata = pdev->dev.platform_data;
  643. si->sir_clk = clk_get(&pdev->dev, "UARTCLK");
  644. si->fir_clk = clk_get(&pdev->dev, "FICPCLK");
  645. if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) {
  646. err = PTR_ERR(IS_ERR(si->sir_clk) ? si->sir_clk : si->fir_clk);
  647. goto err_mem_4;
  648. }
  649. /*
  650. * Initialise the SIR buffers
  651. */
  652. err = pxa_irda_init_iobuf(&si->rx_buff, 14384);
  653. if (err)
  654. goto err_mem_4;
  655. err = pxa_irda_init_iobuf(&si->tx_buff, 4000);
  656. if (err)
  657. goto err_mem_5;
  658. if (si->pdata->startup)
  659. err = si->pdata->startup(si->dev);
  660. if (err)
  661. goto err_startup;
  662. dev->hard_start_xmit = pxa_irda_hard_xmit;
  663. dev->open = pxa_irda_start;
  664. dev->stop = pxa_irda_stop;
  665. dev->do_ioctl = pxa_irda_ioctl;
  666. dev->get_stats = pxa_irda_stats;
  667. irda_init_max_qos_capabilies(&si->qos);
  668. baudrate_mask = 0;
  669. if (si->pdata->transceiver_cap & IR_SIRMODE)
  670. baudrate_mask |= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
  671. if (si->pdata->transceiver_cap & IR_FIRMODE)
  672. baudrate_mask |= IR_4000000 << 8;
  673. si->qos.baud_rate.bits &= baudrate_mask;
  674. si->qos.min_turn_time.bits = 7; /* 1ms or more */
  675. irda_qos_bits_to_value(&si->qos);
  676. err = register_netdev(dev);
  677. if (err == 0)
  678. dev_set_drvdata(&pdev->dev, dev);
  679. if (err) {
  680. if (si->pdata->shutdown)
  681. si->pdata->shutdown(si->dev);
  682. err_startup:
  683. kfree(si->tx_buff.head);
  684. err_mem_5:
  685. kfree(si->rx_buff.head);
  686. err_mem_4:
  687. if (si->sir_clk && !IS_ERR(si->sir_clk))
  688. clk_put(si->sir_clk);
  689. if (si->fir_clk && !IS_ERR(si->fir_clk))
  690. clk_put(si->fir_clk);
  691. free_netdev(dev);
  692. err_mem_3:
  693. release_mem_region(__PREG(FICP), 0x1c);
  694. err_mem_2:
  695. release_mem_region(__PREG(STUART), 0x24);
  696. }
  697. err_mem_1:
  698. return err;
  699. }
  700. static int pxa_irda_remove(struct platform_device *_dev)
  701. {
  702. struct net_device *dev = platform_get_drvdata(_dev);
  703. if (dev) {
  704. struct pxa_irda *si = netdev_priv(dev);
  705. unregister_netdev(dev);
  706. if (si->pdata->shutdown)
  707. si->pdata->shutdown(si->dev);
  708. kfree(si->tx_buff.head);
  709. kfree(si->rx_buff.head);
  710. clk_put(si->fir_clk);
  711. clk_put(si->sir_clk);
  712. free_netdev(dev);
  713. }
  714. release_mem_region(__PREG(STUART), 0x24);
  715. release_mem_region(__PREG(FICP), 0x1c);
  716. return 0;
  717. }
  718. static struct platform_driver pxa_ir_driver = {
  719. .driver = {
  720. .name = "pxa2xx-ir",
  721. .owner = THIS_MODULE,
  722. },
  723. .probe = pxa_irda_probe,
  724. .remove = pxa_irda_remove,
  725. .suspend = pxa_irda_suspend,
  726. .resume = pxa_irda_resume,
  727. };
  728. static int __init pxa_irda_init(void)
  729. {
  730. return platform_driver_register(&pxa_ir_driver);
  731. }
  732. static void __exit pxa_irda_exit(void)
  733. {
  734. platform_driver_unregister(&pxa_ir_driver);
  735. }
  736. module_init(pxa_irda_init);
  737. module_exit(pxa_irda_exit);
  738. MODULE_LICENSE("GPL");
  739. MODULE_ALIAS("platform:pxa2xx-ir");