pxaficp_ir.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953
  1. /*
  2. * linux/drivers/net/irda/pxaficp_ir.c
  3. *
  4. * Based on sa1100_ir.c by Russell King
  5. *
  6. * Changes copyright (C) 2003-2005 MontaVista Software, Inc.
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * Infra-red driver (SIR/FIR) for the PXA2xx embedded microprocessor
  13. *
  14. */
  15. #include <linux/module.h>
  16. #include <linux/netdevice.h>
  17. #include <linux/platform_device.h>
  18. #include <linux/clk.h>
  19. #include <net/irda/irda.h>
  20. #include <net/irda/irmod.h>
  21. #include <net/irda/wrapper.h>
  22. #include <net/irda/irda_device.h>
  23. #include <mach/dma.h>
  24. #include <mach/irda.h>
  25. #include <mach/hardware.h>
  26. #include <mach/pxa-regs.h>
  27. #include <mach/regs-uart.h>
  28. #define FICP __REG(0x40800000) /* Start of FICP area */
  29. #define ICCR0 __REG(0x40800000) /* ICP Control Register 0 */
  30. #define ICCR1 __REG(0x40800004) /* ICP Control Register 1 */
  31. #define ICCR2 __REG(0x40800008) /* ICP Control Register 2 */
  32. #define ICDR __REG(0x4080000c) /* ICP Data Register */
  33. #define ICSR0 __REG(0x40800014) /* ICP Status Register 0 */
  34. #define ICSR1 __REG(0x40800018) /* ICP Status Register 1 */
  35. #define ICCR0_AME (1 << 7) /* Address match enable */
  36. #define ICCR0_TIE (1 << 6) /* Transmit FIFO interrupt enable */
  37. #define ICCR0_RIE (1 << 5) /* Recieve FIFO interrupt enable */
  38. #define ICCR0_RXE (1 << 4) /* Receive enable */
  39. #define ICCR0_TXE (1 << 3) /* Transmit enable */
  40. #define ICCR0_TUS (1 << 2) /* Transmit FIFO underrun select */
  41. #define ICCR0_LBM (1 << 1) /* Loopback mode */
  42. #define ICCR0_ITR (1 << 0) /* IrDA transmission */
  43. #define ICCR2_RXP (1 << 3) /* Receive Pin Polarity select */
  44. #define ICCR2_TXP (1 << 2) /* Transmit Pin Polarity select */
  45. #define ICCR2_TRIG (3 << 0) /* Receive FIFO Trigger threshold */
  46. #define ICCR2_TRIG_8 (0 << 0) /* >= 8 bytes */
  47. #define ICCR2_TRIG_16 (1 << 0) /* >= 16 bytes */
  48. #define ICCR2_TRIG_32 (2 << 0) /* >= 32 bytes */
  49. #ifdef CONFIG_PXA27x
  50. #define ICSR0_EOC (1 << 6) /* DMA End of Descriptor Chain */
  51. #endif
  52. #define ICSR0_FRE (1 << 5) /* Framing error */
  53. #define ICSR0_RFS (1 << 4) /* Receive FIFO service request */
  54. #define ICSR0_TFS (1 << 3) /* Transnit FIFO service request */
  55. #define ICSR0_RAB (1 << 2) /* Receiver abort */
  56. #define ICSR0_TUR (1 << 1) /* Trunsmit FIFO underun */
  57. #define ICSR0_EIF (1 << 0) /* End/Error in FIFO */
  58. #define ICSR1_ROR (1 << 6) /* Receiver FIFO underrun */
  59. #define ICSR1_CRE (1 << 5) /* CRC error */
  60. #define ICSR1_EOF (1 << 4) /* End of frame */
  61. #define ICSR1_TNF (1 << 3) /* Transmit FIFO not full */
  62. #define ICSR1_RNE (1 << 2) /* Receive FIFO not empty */
  63. #define ICSR1_TBY (1 << 1) /* Tramsmiter busy flag */
  64. #define ICSR1_RSY (1 << 0) /* Recevier synchronized flag */
  65. #define IrSR_RXPL_NEG_IS_ZERO (1<<4)
  66. #define IrSR_RXPL_POS_IS_ZERO 0x0
  67. #define IrSR_TXPL_NEG_IS_ZERO (1<<3)
  68. #define IrSR_TXPL_POS_IS_ZERO 0x0
  69. #define IrSR_XMODE_PULSE_1_6 (1<<2)
  70. #define IrSR_XMODE_PULSE_3_16 0x0
  71. #define IrSR_RCVEIR_IR_MODE (1<<1)
  72. #define IrSR_RCVEIR_UART_MODE 0x0
  73. #define IrSR_XMITIR_IR_MODE (1<<0)
  74. #define IrSR_XMITIR_UART_MODE 0x0
  75. #define IrSR_IR_RECEIVE_ON (\
  76. IrSR_RXPL_NEG_IS_ZERO | \
  77. IrSR_TXPL_POS_IS_ZERO | \
  78. IrSR_XMODE_PULSE_3_16 | \
  79. IrSR_RCVEIR_IR_MODE | \
  80. IrSR_XMITIR_UART_MODE)
  81. #define IrSR_IR_TRANSMIT_ON (\
  82. IrSR_RXPL_NEG_IS_ZERO | \
  83. IrSR_TXPL_POS_IS_ZERO | \
  84. IrSR_XMODE_PULSE_3_16 | \
  85. IrSR_RCVEIR_UART_MODE | \
  86. IrSR_XMITIR_IR_MODE)
  87. struct pxa_irda {
  88. int speed;
  89. int newspeed;
  90. unsigned long last_oscr;
  91. unsigned char *dma_rx_buff;
  92. unsigned char *dma_tx_buff;
  93. dma_addr_t dma_rx_buff_phy;
  94. dma_addr_t dma_tx_buff_phy;
  95. unsigned int dma_tx_buff_len;
  96. int txdma;
  97. int rxdma;
  98. struct net_device_stats stats;
  99. struct irlap_cb *irlap;
  100. struct qos_info qos;
  101. iobuff_t tx_buff;
  102. iobuff_t rx_buff;
  103. struct device *dev;
  104. struct pxaficp_platform_data *pdata;
  105. struct clk *fir_clk;
  106. struct clk *sir_clk;
  107. struct clk *cur_clk;
  108. };
  109. static inline void pxa_irda_disable_clk(struct pxa_irda *si)
  110. {
  111. if (si->cur_clk)
  112. clk_disable(si->cur_clk);
  113. si->cur_clk = NULL;
  114. }
  115. static inline void pxa_irda_enable_firclk(struct pxa_irda *si)
  116. {
  117. si->cur_clk = si->fir_clk;
  118. clk_enable(si->fir_clk);
  119. }
  120. static inline void pxa_irda_enable_sirclk(struct pxa_irda *si)
  121. {
  122. si->cur_clk = si->sir_clk;
  123. clk_enable(si->sir_clk);
  124. }
  125. #define IS_FIR(si) ((si)->speed >= 4000000)
  126. #define IRDA_FRAME_SIZE_LIMIT 2047
  127. inline static void pxa_irda_fir_dma_rx_start(struct pxa_irda *si)
  128. {
  129. DCSR(si->rxdma) = DCSR_NODESC;
  130. DSADR(si->rxdma) = __PREG(ICDR);
  131. DTADR(si->rxdma) = si->dma_rx_buff_phy;
  132. DCMD(si->rxdma) = DCMD_INCTRGADDR | DCMD_FLOWSRC | DCMD_WIDTH1 | DCMD_BURST32 | IRDA_FRAME_SIZE_LIMIT;
  133. DCSR(si->rxdma) |= DCSR_RUN;
  134. }
  135. inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
  136. {
  137. DCSR(si->txdma) = DCSR_NODESC;
  138. DSADR(si->txdma) = si->dma_tx_buff_phy;
  139. DTADR(si->txdma) = __PREG(ICDR);
  140. DCMD(si->txdma) = DCMD_INCSRCADDR | DCMD_FLOWTRG | DCMD_ENDIRQEN | DCMD_WIDTH1 | DCMD_BURST32 | si->dma_tx_buff_len;
  141. DCSR(si->txdma) |= DCSR_RUN;
  142. }
  143. /*
  144. * Set the IrDA communications speed.
  145. */
  146. static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
  147. {
  148. unsigned long flags;
  149. unsigned int divisor;
  150. switch (speed) {
  151. case 9600: case 19200: case 38400:
  152. case 57600: case 115200:
  153. /* refer to PXA250/210 Developer's Manual 10-7 */
  154. /* BaudRate = 14.7456 MHz / (16*Divisor) */
  155. divisor = 14745600 / (16 * speed);
  156. local_irq_save(flags);
  157. if (IS_FIR(si)) {
  158. /* stop RX DMA */
  159. DCSR(si->rxdma) &= ~DCSR_RUN;
  160. /* disable FICP */
  161. ICCR0 = 0;
  162. pxa_irda_disable_clk(si);
  163. /* set board transceiver to SIR mode */
  164. si->pdata->transceiver_mode(si->dev, IR_SIRMODE);
  165. /* enable the STUART clock */
  166. pxa_irda_enable_sirclk(si);
  167. }
  168. /* disable STUART first */
  169. STIER = 0;
  170. /* access DLL & DLH */
  171. STLCR |= LCR_DLAB;
  172. STDLL = divisor & 0xff;
  173. STDLH = divisor >> 8;
  174. STLCR &= ~LCR_DLAB;
  175. si->speed = speed;
  176. STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6;
  177. STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE;
  178. local_irq_restore(flags);
  179. break;
  180. case 4000000:
  181. local_irq_save(flags);
  182. /* disable STUART */
  183. STIER = 0;
  184. STISR = 0;
  185. pxa_irda_disable_clk(si);
  186. /* disable FICP first */
  187. ICCR0 = 0;
  188. /* set board transceiver to FIR mode */
  189. si->pdata->transceiver_mode(si->dev, IR_FIRMODE);
  190. /* enable the FICP clock */
  191. pxa_irda_enable_firclk(si);
  192. si->speed = speed;
  193. pxa_irda_fir_dma_rx_start(si);
  194. ICCR0 = ICCR0_ITR | ICCR0_RXE;
  195. local_irq_restore(flags);
  196. break;
  197. default:
  198. return -EINVAL;
  199. }
  200. return 0;
  201. }
  202. /* SIR interrupt service routine. */
  203. static irqreturn_t pxa_irda_sir_irq(int irq, void *dev_id)
  204. {
  205. struct net_device *dev = dev_id;
  206. struct pxa_irda *si = netdev_priv(dev);
  207. int iir, lsr, data;
  208. iir = STIIR;
  209. switch (iir & 0x0F) {
  210. case 0x06: /* Receiver Line Status */
  211. lsr = STLSR;
  212. while (lsr & LSR_FIFOE) {
  213. data = STRBR;
  214. if (lsr & (LSR_OE | LSR_PE | LSR_FE | LSR_BI)) {
  215. printk(KERN_DEBUG "pxa_ir: sir receiving error\n");
  216. si->stats.rx_errors++;
  217. if (lsr & LSR_FE)
  218. si->stats.rx_frame_errors++;
  219. if (lsr & LSR_OE)
  220. si->stats.rx_fifo_errors++;
  221. } else {
  222. si->stats.rx_bytes++;
  223. async_unwrap_char(dev, &si->stats, &si->rx_buff, data);
  224. }
  225. lsr = STLSR;
  226. }
  227. dev->last_rx = jiffies;
  228. si->last_oscr = OSCR;
  229. break;
  230. case 0x04: /* Received Data Available */
  231. /* forth through */
  232. case 0x0C: /* Character Timeout Indication */
  233. do {
  234. si->stats.rx_bytes++;
  235. async_unwrap_char(dev, &si->stats, &si->rx_buff, STRBR);
  236. } while (STLSR & LSR_DR);
  237. dev->last_rx = jiffies;
  238. si->last_oscr = OSCR;
  239. break;
  240. case 0x02: /* Transmit FIFO Data Request */
  241. while ((si->tx_buff.len) && (STLSR & LSR_TDRQ)) {
  242. STTHR = *si->tx_buff.data++;
  243. si->tx_buff.len -= 1;
  244. }
  245. if (si->tx_buff.len == 0) {
  246. si->stats.tx_packets++;
  247. si->stats.tx_bytes += si->tx_buff.data -
  248. si->tx_buff.head;
  249. /* We need to ensure that the transmitter has finished. */
  250. while ((STLSR & LSR_TEMT) == 0)
  251. cpu_relax();
  252. si->last_oscr = OSCR;
  253. /*
  254. * Ok, we've finished transmitting. Now enable
  255. * the receiver. Sometimes we get a receive IRQ
  256. * immediately after a transmit...
  257. */
  258. if (si->newspeed) {
  259. pxa_irda_set_speed(si, si->newspeed);
  260. si->newspeed = 0;
  261. } else {
  262. /* enable IR Receiver, disable IR Transmitter */
  263. STISR = IrSR_IR_RECEIVE_ON | IrSR_XMODE_PULSE_1_6;
  264. /* enable STUART and receive interrupts */
  265. STIER = IER_UUE | IER_RLSE | IER_RAVIE | IER_RTIOE;
  266. }
  267. /* I'm hungry! */
  268. netif_wake_queue(dev);
  269. }
  270. break;
  271. }
  272. return IRQ_HANDLED;
  273. }
  274. /* FIR Receive DMA interrupt handler */
  275. static void pxa_irda_fir_dma_rx_irq(int channel, void *data)
  276. {
  277. int dcsr = DCSR(channel);
  278. DCSR(channel) = dcsr & ~DCSR_RUN;
  279. printk(KERN_DEBUG "pxa_ir: fir rx dma bus error %#x\n", dcsr);
  280. }
  281. /* FIR Transmit DMA interrupt handler */
  282. static void pxa_irda_fir_dma_tx_irq(int channel, void *data)
  283. {
  284. struct net_device *dev = data;
  285. struct pxa_irda *si = netdev_priv(dev);
  286. int dcsr;
  287. dcsr = DCSR(channel);
  288. DCSR(channel) = dcsr & ~DCSR_RUN;
  289. if (dcsr & DCSR_ENDINTR) {
  290. si->stats.tx_packets++;
  291. si->stats.tx_bytes += si->dma_tx_buff_len;
  292. } else {
  293. si->stats.tx_errors++;
  294. }
  295. while (ICSR1 & ICSR1_TBY)
  296. cpu_relax();
  297. si->last_oscr = OSCR;
  298. /*
  299. * HACK: It looks like the TBY bit is dropped too soon.
  300. * Without this delay things break.
  301. */
  302. udelay(120);
  303. if (si->newspeed) {
  304. pxa_irda_set_speed(si, si->newspeed);
  305. si->newspeed = 0;
  306. } else {
  307. int i = 64;
  308. ICCR0 = 0;
  309. pxa_irda_fir_dma_rx_start(si);
  310. while ((ICSR1 & ICSR1_RNE) && i--)
  311. (void)ICDR;
  312. ICCR0 = ICCR0_ITR | ICCR0_RXE;
  313. if (i < 0)
  314. printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
  315. }
  316. netif_wake_queue(dev);
  317. }
  318. /* EIF(Error in FIFO/End in Frame) handler for FIR */
  319. static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0)
  320. {
  321. unsigned int len, stat, data;
  322. /* Get the current data position. */
  323. len = DTADR(si->rxdma) - si->dma_rx_buff_phy;
  324. do {
  325. /* Read Status, and then Data. */
  326. stat = ICSR1;
  327. rmb();
  328. data = ICDR;
  329. if (stat & (ICSR1_CRE | ICSR1_ROR)) {
  330. si->stats.rx_errors++;
  331. if (stat & ICSR1_CRE) {
  332. printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
  333. si->stats.rx_crc_errors++;
  334. }
  335. if (stat & ICSR1_ROR) {
  336. printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
  337. si->stats.rx_over_errors++;
  338. }
  339. } else {
  340. si->dma_rx_buff[len++] = data;
  341. }
  342. /* If we hit the end of frame, there's no point in continuing. */
  343. if (stat & ICSR1_EOF)
  344. break;
  345. } while (ICSR0 & ICSR0_EIF);
  346. if (stat & ICSR1_EOF) {
  347. /* end of frame. */
  348. struct sk_buff *skb;
  349. if (icsr0 & ICSR0_FRE) {
  350. printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
  351. si->stats.rx_dropped++;
  352. return;
  353. }
  354. skb = alloc_skb(len+1,GFP_ATOMIC);
  355. if (!skb) {
  356. printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
  357. si->stats.rx_dropped++;
  358. return;
  359. }
  360. /* Align IP header to 20 bytes */
  361. skb_reserve(skb, 1);
  362. skb_copy_to_linear_data(skb, si->dma_rx_buff, len);
  363. skb_put(skb, len);
  364. /* Feed it to IrLAP */
  365. skb->dev = dev;
  366. skb_reset_mac_header(skb);
  367. skb->protocol = htons(ETH_P_IRDA);
  368. netif_rx(skb);
  369. si->stats.rx_packets++;
  370. si->stats.rx_bytes += len;
  371. dev->last_rx = jiffies;
  372. }
  373. }
  374. /* FIR interrupt handler */
  375. static irqreturn_t pxa_irda_fir_irq(int irq, void *dev_id)
  376. {
  377. struct net_device *dev = dev_id;
  378. struct pxa_irda *si = netdev_priv(dev);
  379. int icsr0, i = 64;
  380. /* stop RX DMA */
  381. DCSR(si->rxdma) &= ~DCSR_RUN;
  382. si->last_oscr = OSCR;
  383. icsr0 = ICSR0;
  384. if (icsr0 & (ICSR0_FRE | ICSR0_RAB)) {
  385. if (icsr0 & ICSR0_FRE) {
  386. printk(KERN_DEBUG "pxa_ir: fir receive frame error\n");
  387. si->stats.rx_frame_errors++;
  388. } else {
  389. printk(KERN_DEBUG "pxa_ir: fir receive abort\n");
  390. si->stats.rx_errors++;
  391. }
  392. ICSR0 = icsr0 & (ICSR0_FRE | ICSR0_RAB);
  393. }
  394. if (icsr0 & ICSR0_EIF) {
  395. /* An error in FIFO occured, or there is a end of frame */
  396. pxa_irda_fir_irq_eif(si, dev, icsr0);
  397. }
  398. ICCR0 = 0;
  399. pxa_irda_fir_dma_rx_start(si);
  400. while ((ICSR1 & ICSR1_RNE) && i--)
  401. (void)ICDR;
  402. ICCR0 = ICCR0_ITR | ICCR0_RXE;
  403. if (i < 0)
  404. printk(KERN_ERR "pxa_ir: cannot clear Rx FIFO!\n");
  405. return IRQ_HANDLED;
  406. }
  407. /* hard_xmit interface of irda device */
  408. static int pxa_irda_hard_xmit(struct sk_buff *skb, struct net_device *dev)
  409. {
  410. struct pxa_irda *si = netdev_priv(dev);
  411. int speed = irda_get_next_speed(skb);
  412. /*
  413. * Does this packet contain a request to change the interface
  414. * speed? If so, remember it until we complete the transmission
  415. * of this frame.
  416. */
  417. if (speed != si->speed && speed != -1)
  418. si->newspeed = speed;
  419. /*
  420. * If this is an empty frame, we can bypass a lot.
  421. */
  422. if (skb->len == 0) {
  423. if (si->newspeed) {
  424. si->newspeed = 0;
  425. pxa_irda_set_speed(si, speed);
  426. }
  427. dev_kfree_skb(skb);
  428. return 0;
  429. }
  430. netif_stop_queue(dev);
  431. if (!IS_FIR(si)) {
  432. si->tx_buff.data = si->tx_buff.head;
  433. si->tx_buff.len = async_wrap_skb(skb, si->tx_buff.data, si->tx_buff.truesize);
  434. /* Disable STUART interrupts and switch to transmit mode. */
  435. STIER = 0;
  436. STISR = IrSR_IR_TRANSMIT_ON | IrSR_XMODE_PULSE_1_6;
  437. /* enable STUART and transmit interrupts */
  438. STIER = IER_UUE | IER_TIE;
  439. } else {
  440. unsigned long mtt = irda_get_mtt(skb);
  441. si->dma_tx_buff_len = skb->len;
  442. skb_copy_from_linear_data(skb, si->dma_tx_buff, skb->len);
  443. if (mtt)
  444. while ((unsigned)(OSCR - si->last_oscr)/4 < mtt)
  445. cpu_relax();
  446. /* stop RX DMA, disable FICP */
  447. DCSR(si->rxdma) &= ~DCSR_RUN;
  448. ICCR0 = 0;
  449. pxa_irda_fir_dma_tx_start(si);
  450. ICCR0 = ICCR0_ITR | ICCR0_TXE;
  451. }
  452. dev_kfree_skb(skb);
  453. dev->trans_start = jiffies;
  454. return 0;
  455. }
  456. static int pxa_irda_ioctl(struct net_device *dev, struct ifreq *ifreq, int cmd)
  457. {
  458. struct if_irda_req *rq = (struct if_irda_req *)ifreq;
  459. struct pxa_irda *si = netdev_priv(dev);
  460. int ret;
  461. switch (cmd) {
  462. case SIOCSBANDWIDTH:
  463. ret = -EPERM;
  464. if (capable(CAP_NET_ADMIN)) {
  465. /*
  466. * We are unable to set the speed if the
  467. * device is not running.
  468. */
  469. if (netif_running(dev)) {
  470. ret = pxa_irda_set_speed(si,
  471. rq->ifr_baudrate);
  472. } else {
  473. printk(KERN_INFO "pxa_ir: SIOCSBANDWIDTH: !netif_running\n");
  474. ret = 0;
  475. }
  476. }
  477. break;
  478. case SIOCSMEDIABUSY:
  479. ret = -EPERM;
  480. if (capable(CAP_NET_ADMIN)) {
  481. irda_device_set_media_busy(dev, TRUE);
  482. ret = 0;
  483. }
  484. break;
  485. case SIOCGRECEIVING:
  486. ret = 0;
  487. rq->ifr_receiving = IS_FIR(si) ? 0
  488. : si->rx_buff.state != OUTSIDE_FRAME;
  489. break;
  490. default:
  491. ret = -EOPNOTSUPP;
  492. break;
  493. }
  494. return ret;
  495. }
  496. static struct net_device_stats *pxa_irda_stats(struct net_device *dev)
  497. {
  498. struct pxa_irda *si = netdev_priv(dev);
  499. return &si->stats;
  500. }
  501. static void pxa_irda_startup(struct pxa_irda *si)
  502. {
  503. /* Disable STUART interrupts */
  504. STIER = 0;
  505. /* enable STUART interrupt to the processor */
  506. STMCR = MCR_OUT2;
  507. /* configure SIR frame format: StartBit - Data 7 ... Data 0 - Stop Bit */
  508. STLCR = LCR_WLS0 | LCR_WLS1;
  509. /* enable FIFO, we use FIFO to improve performance */
  510. STFCR = FCR_TRFIFOE | FCR_ITL_32;
  511. /* disable FICP */
  512. ICCR0 = 0;
  513. /* configure FICP ICCR2 */
  514. ICCR2 = ICCR2_TXP | ICCR2_TRIG_32;
  515. /* configure DMAC */
  516. DRCMR(17) = si->rxdma | DRCMR_MAPVLD;
  517. DRCMR(18) = si->txdma | DRCMR_MAPVLD;
  518. /* force SIR reinitialization */
  519. si->speed = 4000000;
  520. pxa_irda_set_speed(si, 9600);
  521. printk(KERN_DEBUG "pxa_ir: irda startup\n");
  522. }
  523. static void pxa_irda_shutdown(struct pxa_irda *si)
  524. {
  525. unsigned long flags;
  526. local_irq_save(flags);
  527. /* disable STUART and interrupt */
  528. STIER = 0;
  529. /* disable STUART SIR mode */
  530. STISR = 0;
  531. /* disable DMA */
  532. DCSR(si->txdma) &= ~DCSR_RUN;
  533. DCSR(si->rxdma) &= ~DCSR_RUN;
  534. /* disable FICP */
  535. ICCR0 = 0;
  536. /* disable the STUART or FICP clocks */
  537. pxa_irda_disable_clk(si);
  538. DRCMR(17) = 0;
  539. DRCMR(18) = 0;
  540. local_irq_restore(flags);
  541. /* power off board transceiver */
  542. si->pdata->transceiver_mode(si->dev, IR_OFF);
  543. printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
  544. }
  545. static int pxa_irda_start(struct net_device *dev)
  546. {
  547. struct pxa_irda *si = netdev_priv(dev);
  548. int err;
  549. si->speed = 9600;
  550. err = request_irq(IRQ_STUART, pxa_irda_sir_irq, 0, dev->name, dev);
  551. if (err)
  552. goto err_irq1;
  553. err = request_irq(IRQ_ICP, pxa_irda_fir_irq, 0, dev->name, dev);
  554. if (err)
  555. goto err_irq2;
  556. /*
  557. * The interrupt must remain disabled for now.
  558. */
  559. disable_irq(IRQ_STUART);
  560. disable_irq(IRQ_ICP);
  561. err = -EBUSY;
  562. si->rxdma = pxa_request_dma("FICP_RX",DMA_PRIO_LOW, pxa_irda_fir_dma_rx_irq, dev);
  563. if (si->rxdma < 0)
  564. goto err_rx_dma;
  565. si->txdma = pxa_request_dma("FICP_TX",DMA_PRIO_LOW, pxa_irda_fir_dma_tx_irq, dev);
  566. if (si->txdma < 0)
  567. goto err_tx_dma;
  568. err = -ENOMEM;
  569. si->dma_rx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
  570. &si->dma_rx_buff_phy, GFP_KERNEL );
  571. if (!si->dma_rx_buff)
  572. goto err_dma_rx_buff;
  573. si->dma_tx_buff = dma_alloc_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT,
  574. &si->dma_tx_buff_phy, GFP_KERNEL );
  575. if (!si->dma_tx_buff)
  576. goto err_dma_tx_buff;
  577. /* Setup the serial port for the initial speed. */
  578. pxa_irda_startup(si);
  579. /*
  580. * Open a new IrLAP layer instance.
  581. */
  582. si->irlap = irlap_open(dev, &si->qos, "pxa");
  583. err = -ENOMEM;
  584. if (!si->irlap)
  585. goto err_irlap;
  586. /*
  587. * Now enable the interrupt and start the queue
  588. */
  589. enable_irq(IRQ_STUART);
  590. enable_irq(IRQ_ICP);
  591. netif_start_queue(dev);
  592. printk(KERN_DEBUG "pxa_ir: irda driver opened\n");
  593. return 0;
  594. err_irlap:
  595. pxa_irda_shutdown(si);
  596. dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
  597. err_dma_tx_buff:
  598. dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
  599. err_dma_rx_buff:
  600. pxa_free_dma(si->txdma);
  601. err_tx_dma:
  602. pxa_free_dma(si->rxdma);
  603. err_rx_dma:
  604. free_irq(IRQ_ICP, dev);
  605. err_irq2:
  606. free_irq(IRQ_STUART, dev);
  607. err_irq1:
  608. return err;
  609. }
  610. static int pxa_irda_stop(struct net_device *dev)
  611. {
  612. struct pxa_irda *si = netdev_priv(dev);
  613. netif_stop_queue(dev);
  614. pxa_irda_shutdown(si);
  615. /* Stop IrLAP */
  616. if (si->irlap) {
  617. irlap_close(si->irlap);
  618. si->irlap = NULL;
  619. }
  620. free_irq(IRQ_STUART, dev);
  621. free_irq(IRQ_ICP, dev);
  622. pxa_free_dma(si->rxdma);
  623. pxa_free_dma(si->txdma);
  624. if (si->dma_rx_buff)
  625. dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_tx_buff, si->dma_tx_buff_phy);
  626. if (si->dma_tx_buff)
  627. dma_free_coherent(si->dev, IRDA_FRAME_SIZE_LIMIT, si->dma_rx_buff, si->dma_rx_buff_phy);
  628. printk(KERN_DEBUG "pxa_ir: irda driver closed\n");
  629. return 0;
  630. }
  631. static int pxa_irda_suspend(struct platform_device *_dev, pm_message_t state)
  632. {
  633. struct net_device *dev = platform_get_drvdata(_dev);
  634. struct pxa_irda *si;
  635. if (dev && netif_running(dev)) {
  636. si = netdev_priv(dev);
  637. netif_device_detach(dev);
  638. pxa_irda_shutdown(si);
  639. }
  640. return 0;
  641. }
  642. static int pxa_irda_resume(struct platform_device *_dev)
  643. {
  644. struct net_device *dev = platform_get_drvdata(_dev);
  645. struct pxa_irda *si;
  646. if (dev && netif_running(dev)) {
  647. si = netdev_priv(dev);
  648. pxa_irda_startup(si);
  649. netif_device_attach(dev);
  650. netif_wake_queue(dev);
  651. }
  652. return 0;
  653. }
  654. static int pxa_irda_init_iobuf(iobuff_t *io, int size)
  655. {
  656. io->head = kmalloc(size, GFP_KERNEL | GFP_DMA);
  657. if (io->head != NULL) {
  658. io->truesize = size;
  659. io->in_frame = FALSE;
  660. io->state = OUTSIDE_FRAME;
  661. io->data = io->head;
  662. }
  663. return io->head ? 0 : -ENOMEM;
  664. }
  665. static int pxa_irda_probe(struct platform_device *pdev)
  666. {
  667. struct net_device *dev;
  668. struct pxa_irda *si;
  669. unsigned int baudrate_mask;
  670. int err;
  671. if (!pdev->dev.platform_data)
  672. return -ENODEV;
  673. err = request_mem_region(__PREG(STUART), 0x24, "IrDA") ? 0 : -EBUSY;
  674. if (err)
  675. goto err_mem_1;
  676. err = request_mem_region(__PREG(FICP), 0x1c, "IrDA") ? 0 : -EBUSY;
  677. if (err)
  678. goto err_mem_2;
  679. dev = alloc_irdadev(sizeof(struct pxa_irda));
  680. if (!dev)
  681. goto err_mem_3;
  682. si = netdev_priv(dev);
  683. si->dev = &pdev->dev;
  684. si->pdata = pdev->dev.platform_data;
  685. si->sir_clk = clk_get(&pdev->dev, "UARTCLK");
  686. si->fir_clk = clk_get(&pdev->dev, "FICPCLK");
  687. if (IS_ERR(si->sir_clk) || IS_ERR(si->fir_clk)) {
  688. err = PTR_ERR(IS_ERR(si->sir_clk) ? si->sir_clk : si->fir_clk);
  689. goto err_mem_4;
  690. }
  691. /*
  692. * Initialise the SIR buffers
  693. */
  694. err = pxa_irda_init_iobuf(&si->rx_buff, 14384);
  695. if (err)
  696. goto err_mem_4;
  697. err = pxa_irda_init_iobuf(&si->tx_buff, 4000);
  698. if (err)
  699. goto err_mem_5;
  700. if (si->pdata->startup)
  701. err = si->pdata->startup(si->dev);
  702. if (err)
  703. goto err_startup;
  704. dev->hard_start_xmit = pxa_irda_hard_xmit;
  705. dev->open = pxa_irda_start;
  706. dev->stop = pxa_irda_stop;
  707. dev->do_ioctl = pxa_irda_ioctl;
  708. dev->get_stats = pxa_irda_stats;
  709. irda_init_max_qos_capabilies(&si->qos);
  710. baudrate_mask = 0;
  711. if (si->pdata->transceiver_cap & IR_SIRMODE)
  712. baudrate_mask |= IR_9600|IR_19200|IR_38400|IR_57600|IR_115200;
  713. if (si->pdata->transceiver_cap & IR_FIRMODE)
  714. baudrate_mask |= IR_4000000 << 8;
  715. si->qos.baud_rate.bits &= baudrate_mask;
  716. si->qos.min_turn_time.bits = 7; /* 1ms or more */
  717. irda_qos_bits_to_value(&si->qos);
  718. err = register_netdev(dev);
  719. if (err == 0)
  720. dev_set_drvdata(&pdev->dev, dev);
  721. if (err) {
  722. if (si->pdata->shutdown)
  723. si->pdata->shutdown(si->dev);
  724. err_startup:
  725. kfree(si->tx_buff.head);
  726. err_mem_5:
  727. kfree(si->rx_buff.head);
  728. err_mem_4:
  729. if (si->sir_clk && !IS_ERR(si->sir_clk))
  730. clk_put(si->sir_clk);
  731. if (si->fir_clk && !IS_ERR(si->fir_clk))
  732. clk_put(si->fir_clk);
  733. free_netdev(dev);
  734. err_mem_3:
  735. release_mem_region(__PREG(FICP), 0x1c);
  736. err_mem_2:
  737. release_mem_region(__PREG(STUART), 0x24);
  738. }
  739. err_mem_1:
  740. return err;
  741. }
  742. static int pxa_irda_remove(struct platform_device *_dev)
  743. {
  744. struct net_device *dev = platform_get_drvdata(_dev);
  745. if (dev) {
  746. struct pxa_irda *si = netdev_priv(dev);
  747. unregister_netdev(dev);
  748. if (si->pdata->shutdown)
  749. si->pdata->shutdown(si->dev);
  750. kfree(si->tx_buff.head);
  751. kfree(si->rx_buff.head);
  752. clk_put(si->fir_clk);
  753. clk_put(si->sir_clk);
  754. free_netdev(dev);
  755. }
  756. release_mem_region(__PREG(STUART), 0x24);
  757. release_mem_region(__PREG(FICP), 0x1c);
  758. return 0;
  759. }
  760. static struct platform_driver pxa_ir_driver = {
  761. .driver = {
  762. .name = "pxa2xx-ir",
  763. .owner = THIS_MODULE,
  764. },
  765. .probe = pxa_irda_probe,
  766. .remove = pxa_irda_remove,
  767. .suspend = pxa_irda_suspend,
  768. .resume = pxa_irda_resume,
  769. };
  770. static int __init pxa_irda_init(void)
  771. {
  772. return platform_driver_register(&pxa_ir_driver);
  773. }
  774. static void __exit pxa_irda_exit(void)
  775. {
  776. platform_driver_unregister(&pxa_ir_driver);
  777. }
  778. module_init(pxa_irda_init);
  779. module_exit(pxa_irda_exit);
  780. MODULE_LICENSE("GPL");
  781. MODULE_ALIAS("platform:pxa2xx-ir");