sh_irda.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873
  1. /*
  2. * SuperH IrDA Driver
  3. *
  4. * Copyright (C) 2010 Renesas Solutions Corp.
  5. * Kuninori Morimoto <morimoto.kuninori@renesas.com>
  6. *
  7. * Based on sh_sir.c
  8. * Copyright (C) 2009 Renesas Solutions Corp.
  9. * Copyright 2006-2009 Analog Devices Inc.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. */
  15. /*
  16. * CAUTION
  17. *
  18. * This driver is very simple.
  19. * So, it doesn't have below support now
  20. * - MIR/FIR support
  21. * - DMA transfer support
  22. * - FIFO mode support
  23. */
  24. #include <linux/module.h>
  25. #include <linux/platform_device.h>
  26. #include <linux/clk.h>
  27. #include <net/irda/wrapper.h>
  28. #include <net/irda/irda_device.h>
  29. #define DRIVER_NAME "sh_irda"
  30. #if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
  31. #define __IRDARAM_LEN 0x13FF
  32. #else
  33. #define __IRDARAM_LEN 0x1039
  34. #endif
  35. #define IRTMR 0x1F00 /* Transfer mode */
  36. #define IRCFR 0x1F02 /* Configuration */
  37. #define IRCTR 0x1F04 /* IR control */
  38. #define IRTFLR 0x1F20 /* Transmit frame length */
  39. #define IRTCTR 0x1F22 /* Transmit control */
  40. #define IRRFLR 0x1F40 /* Receive frame length */
  41. #define IRRCTR 0x1F42 /* Receive control */
  42. #define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
  43. #define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
  44. #define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
  45. #define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
  46. #define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
  47. #define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
  48. #define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
  49. #define CRCCTR 0x1F80 /* CRC engine control */
  50. #define CRCIR 0x1F86 /* CRC engine input data */
  51. #define CRCCR 0x1F8A /* CRC engine calculation */
  52. #define CRCOR 0x1F8E /* CRC engine output data */
  53. #define FIFOCP 0x1FC0 /* FIFO current pointer */
  54. #define FIFOFP 0x1FC2 /* FIFO follow pointer */
  55. #define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
  56. #define FIFORSOR 0x1FC6 /* FIFO receive status OR */
  57. #define FIFOSEL 0x1FC8 /* FIFO select */
  58. #define FIFORS 0x1FCA /* FIFO receive status */
  59. #define FIFORFL 0x1FCC /* FIFO receive frame length */
  60. #define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
  61. #define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
  62. #define BIFCTL 0x1FD2 /* BUS interface control */
  63. #define IRDARAM 0x0000 /* IrDA buffer RAM */
  64. #define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
  65. /* IRTMR */
  66. #define TMD_MASK (0x3 << 14) /* Transfer Mode */
  67. #define TMD_SIR (0x0 << 14)
  68. #define TMD_MIR (0x3 << 14)
  69. #define TMD_FIR (0x2 << 14)
  70. #define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
  71. #define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
  72. #define SIM (1 << 0) /* SIR Interrupt Mask */
  73. #define xIM_MASK (FIFORIM | MIM | SIM)
  74. /* IRCFR */
  75. #define RTO_SHIFT 8 /* shift for Receive Timeout */
  76. #define RTO (0x3 << RTO_SHIFT)
  77. /* IRTCTR */
  78. #define ARMOD (1 << 15) /* Auto-Receive Mode */
  79. #define TE (1 << 0) /* Transmit Enable */
  80. /* IRRFLR */
  81. #define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
  82. /* IRRCTR */
  83. #define RE (1 << 0) /* Receive Enable */
  84. /*
  85. * SIRISR, SIRIMR, SIRICR,
  86. * MFIRISR, MFIRIMR, MFIRICR
  87. */
  88. #define FRE (1 << 15) /* Frame Receive End */
  89. #define TROV (1 << 11) /* Transfer Area Overflow */
  90. #define xIR_9 (1 << 9)
  91. #define TOT xIR_9 /* for SIR Timeout */
  92. #define ABTD xIR_9 /* for MIR/FIR Abort Detection */
  93. #define xIR_8 (1 << 8)
  94. #define FER xIR_8 /* for SIR Framing Error */
  95. #define CRCER xIR_8 /* for MIR/FIR CRC error */
  96. #define FTE (1 << 7) /* Frame Transmit End */
  97. #define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
  98. /* SIRBCR */
  99. #define BRC_MASK (0x3F) /* mask for Baud Rate Count */
  100. /* CRCCTR */
  101. #define CRC_RST (1 << 15) /* CRC Engine Reset */
  102. #define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
  103. /* CRCIR */
  104. #define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
  105. /************************************************************************
  106. enum / structure
  107. ************************************************************************/
  108. enum sh_irda_mode {
  109. SH_IRDA_NONE = 0,
  110. SH_IRDA_SIR,
  111. SH_IRDA_MIR,
  112. SH_IRDA_FIR,
  113. };
  114. struct sh_irda_self;
  115. struct sh_irda_xir_func {
  116. int (*xir_fre) (struct sh_irda_self *self);
  117. int (*xir_trov) (struct sh_irda_self *self);
  118. int (*xir_9) (struct sh_irda_self *self);
  119. int (*xir_8) (struct sh_irda_self *self);
  120. int (*xir_fte) (struct sh_irda_self *self);
  121. };
  122. struct sh_irda_self {
  123. void __iomem *membase;
  124. unsigned int irq;
  125. struct clk *clk;
  126. struct net_device *ndev;
  127. struct irlap_cb *irlap;
  128. struct qos_info qos;
  129. iobuff_t tx_buff;
  130. iobuff_t rx_buff;
  131. enum sh_irda_mode mode;
  132. spinlock_t lock;
  133. struct sh_irda_xir_func *xir_func;
  134. };
  135. /************************************************************************
  136. common function
  137. ************************************************************************/
  138. static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
  139. {
  140. unsigned long flags;
  141. spin_lock_irqsave(&self->lock, flags);
  142. iowrite16(data, self->membase + offset);
  143. spin_unlock_irqrestore(&self->lock, flags);
  144. }
  145. static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
  146. {
  147. unsigned long flags;
  148. u16 ret;
  149. spin_lock_irqsave(&self->lock, flags);
  150. ret = ioread16(self->membase + offset);
  151. spin_unlock_irqrestore(&self->lock, flags);
  152. return ret;
  153. }
  154. static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
  155. u16 mask, u16 data)
  156. {
  157. unsigned long flags;
  158. u16 old, new;
  159. spin_lock_irqsave(&self->lock, flags);
  160. old = ioread16(self->membase + offset);
  161. new = (old & ~mask) | data;
  162. if (old != new)
  163. iowrite16(data, self->membase + offset);
  164. spin_unlock_irqrestore(&self->lock, flags);
  165. }
  166. /************************************************************************
  167. mode function
  168. ************************************************************************/
  169. /*=====================================
  170. *
  171. * common
  172. *
  173. *=====================================*/
  174. static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
  175. {
  176. struct device *dev = &self->ndev->dev;
  177. sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
  178. dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
  179. }
  180. static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
  181. {
  182. struct device *dev = &self->ndev->dev;
  183. if (SH_IRDA_SIR != self->mode)
  184. interval = 0;
  185. if (interval < 0 || interval > 2) {
  186. dev_err(dev, "unsupported timeout interval\n");
  187. return -EINVAL;
  188. }
  189. sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
  190. return 0;
  191. }
  192. static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
  193. {
  194. struct device *dev = &self->ndev->dev;
  195. u16 val;
  196. if (baudrate < 0)
  197. return 0;
  198. if (SH_IRDA_SIR != self->mode) {
  199. dev_err(dev, "it is not SIR mode\n");
  200. return -EINVAL;
  201. }
  202. /*
  203. * Baud rate (bits/s) =
  204. * (48 MHz / 26) / (baud rate counter value + 1) x 16
  205. */
  206. val = (48000000 / 26 / 16 / baudrate) - 1;
  207. dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val);
  208. sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
  209. return 0;
  210. }
  211. static int xir_get_rcv_length(struct sh_irda_self *self)
  212. {
  213. return RFL_MASK & sh_irda_read(self, IRRFLR);
  214. }
  215. /*=====================================
  216. *
  217. * NONE MODE
  218. *
  219. *=====================================*/
  220. static int xir_fre(struct sh_irda_self *self)
  221. {
  222. struct device *dev = &self->ndev->dev;
  223. dev_err(dev, "none mode: frame recv\n");
  224. return 0;
  225. }
  226. static int xir_trov(struct sh_irda_self *self)
  227. {
  228. struct device *dev = &self->ndev->dev;
  229. dev_err(dev, "none mode: buffer ram over\n");
  230. return 0;
  231. }
  232. static int xir_9(struct sh_irda_self *self)
  233. {
  234. struct device *dev = &self->ndev->dev;
  235. dev_err(dev, "none mode: time over\n");
  236. return 0;
  237. }
  238. static int xir_8(struct sh_irda_self *self)
  239. {
  240. struct device *dev = &self->ndev->dev;
  241. dev_err(dev, "none mode: framing error\n");
  242. return 0;
  243. }
  244. static int xir_fte(struct sh_irda_self *self)
  245. {
  246. struct device *dev = &self->ndev->dev;
  247. dev_err(dev, "none mode: frame transmit end\n");
  248. return 0;
  249. }
  250. static struct sh_irda_xir_func xir_func = {
  251. .xir_fre = xir_fre,
  252. .xir_trov = xir_trov,
  253. .xir_9 = xir_9,
  254. .xir_8 = xir_8,
  255. .xir_fte = xir_fte,
  256. };
  257. /*=====================================
  258. *
  259. * MIR/FIR MODE
  260. *
  261. * MIR/FIR are not supported now
  262. *=====================================*/
  263. static struct sh_irda_xir_func mfir_func = {
  264. .xir_fre = xir_fre,
  265. .xir_trov = xir_trov,
  266. .xir_9 = xir_9,
  267. .xir_8 = xir_8,
  268. .xir_fte = xir_fte,
  269. };
  270. /*=====================================
  271. *
  272. * SIR MODE
  273. *
  274. *=====================================*/
  275. static int sir_fre(struct sh_irda_self *self)
  276. {
  277. struct device *dev = &self->ndev->dev;
  278. u16 data16;
  279. u8 *data = (u8 *)&data16;
  280. int len = xir_get_rcv_length(self);
  281. int i, j;
  282. if (len > IRDARAM_LEN)
  283. len = IRDARAM_LEN;
  284. dev_dbg(dev, "frame recv length = %d\n", len);
  285. for (i = 0; i < len; i++) {
  286. j = i % 2;
  287. if (!j)
  288. data16 = sh_irda_read(self, IRDARAM + i);
  289. async_unwrap_char(self->ndev, &self->ndev->stats,
  290. &self->rx_buff, data[j]);
  291. }
  292. self->ndev->last_rx = jiffies;
  293. sh_irda_rcv_ctrl(self, 1);
  294. return 0;
  295. }
  296. static int sir_trov(struct sh_irda_self *self)
  297. {
  298. struct device *dev = &self->ndev->dev;
  299. dev_err(dev, "buffer ram over\n");
  300. sh_irda_rcv_ctrl(self, 1);
  301. return 0;
  302. }
  303. static int sir_tot(struct sh_irda_self *self)
  304. {
  305. struct device *dev = &self->ndev->dev;
  306. dev_err(dev, "time over\n");
  307. sh_irda_set_baudrate(self, 9600);
  308. sh_irda_rcv_ctrl(self, 1);
  309. return 0;
  310. }
  311. static int sir_fer(struct sh_irda_self *self)
  312. {
  313. struct device *dev = &self->ndev->dev;
  314. dev_err(dev, "framing error\n");
  315. sh_irda_rcv_ctrl(self, 1);
  316. return 0;
  317. }
  318. static int sir_fte(struct sh_irda_self *self)
  319. {
  320. struct device *dev = &self->ndev->dev;
  321. dev_dbg(dev, "frame transmit end\n");
  322. netif_wake_queue(self->ndev);
  323. return 0;
  324. }
  325. static struct sh_irda_xir_func sir_func = {
  326. .xir_fre = sir_fre,
  327. .xir_trov = sir_trov,
  328. .xir_9 = sir_tot,
  329. .xir_8 = sir_fer,
  330. .xir_fte = sir_fte,
  331. };
  332. static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
  333. {
  334. struct device *dev = &self->ndev->dev;
  335. struct sh_irda_xir_func *func;
  336. const char *name;
  337. u16 data;
  338. switch (mode) {
  339. case SH_IRDA_SIR:
  340. name = "SIR";
  341. data = TMD_SIR;
  342. func = &sir_func;
  343. break;
  344. case SH_IRDA_MIR:
  345. name = "MIR";
  346. data = TMD_MIR;
  347. func = &mfir_func;
  348. break;
  349. case SH_IRDA_FIR:
  350. name = "FIR";
  351. data = TMD_FIR;
  352. func = &mfir_func;
  353. break;
  354. default:
  355. name = "NONE";
  356. data = 0;
  357. func = &xir_func;
  358. break;
  359. }
  360. self->mode = mode;
  361. self->xir_func = func;
  362. sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
  363. dev_dbg(dev, "switch to %s mode", name);
  364. }
  365. /************************************************************************
  366. irq function
  367. ************************************************************************/
  368. static void sh_irda_set_irq_mask(struct sh_irda_self *self)
  369. {
  370. u16 tmr_hole;
  371. u16 xir_reg;
  372. /* set all mask */
  373. sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK);
  374. sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK);
  375. sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
  376. /* clear irq */
  377. sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK);
  378. sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
  379. switch (self->mode) {
  380. case SH_IRDA_SIR:
  381. tmr_hole = SIM;
  382. xir_reg = SIRIMR;
  383. break;
  384. case SH_IRDA_MIR:
  385. case SH_IRDA_FIR:
  386. tmr_hole = MIM;
  387. xir_reg = MFIRIMR;
  388. break;
  389. default:
  390. tmr_hole = 0;
  391. xir_reg = 0;
  392. break;
  393. }
  394. /* open mask */
  395. if (xir_reg) {
  396. sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
  397. sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
  398. }
  399. }
  400. static irqreturn_t sh_irda_irq(int irq, void *dev_id)
  401. {
  402. struct sh_irda_self *self = dev_id;
  403. struct sh_irda_xir_func *func = self->xir_func;
  404. u16 isr = sh_irda_read(self, SIRISR);
  405. /* clear irq */
  406. sh_irda_write(self, SIRICR, isr);
  407. if (isr & FRE)
  408. func->xir_fre(self);
  409. if (isr & TROV)
  410. func->xir_trov(self);
  411. if (isr & xIR_9)
  412. func->xir_9(self);
  413. if (isr & xIR_8)
  414. func->xir_8(self);
  415. if (isr & FTE)
  416. func->xir_fte(self);
  417. return IRQ_HANDLED;
  418. }
  419. /************************************************************************
  420. CRC function
  421. ************************************************************************/
  422. static void sh_irda_crc_reset(struct sh_irda_self *self)
  423. {
  424. sh_irda_write(self, CRCCTR, CRC_RST);
  425. }
  426. static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
  427. {
  428. sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
  429. }
  430. static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
  431. {
  432. return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
  433. }
  434. static u16 sh_irda_crc_out(struct sh_irda_self *self)
  435. {
  436. return sh_irda_read(self, CRCOR);
  437. }
  438. static int sh_irda_crc_init(struct sh_irda_self *self)
  439. {
  440. struct device *dev = &self->ndev->dev;
  441. int ret = -EIO;
  442. u16 val;
  443. sh_irda_crc_reset(self);
  444. sh_irda_crc_add(self, 0xCC);
  445. sh_irda_crc_add(self, 0xF5);
  446. sh_irda_crc_add(self, 0xF1);
  447. sh_irda_crc_add(self, 0xA7);
  448. val = sh_irda_crc_cnt(self);
  449. if (4 != val) {
  450. dev_err(dev, "CRC count error %x\n", val);
  451. goto crc_init_out;
  452. }
  453. val = sh_irda_crc_out(self);
  454. if (0x51DF != val) {
  455. dev_err(dev, "CRC result error%x\n", val);
  456. goto crc_init_out;
  457. }
  458. ret = 0;
  459. crc_init_out:
  460. sh_irda_crc_reset(self);
  461. return ret;
  462. }
  463. /************************************************************************
  464. iobuf function
  465. ************************************************************************/
  466. static void sh_irda_remove_iobuf(struct sh_irda_self *self)
  467. {
  468. kfree(self->rx_buff.head);
  469. self->tx_buff.head = NULL;
  470. self->tx_buff.data = NULL;
  471. self->rx_buff.head = NULL;
  472. self->rx_buff.data = NULL;
  473. }
  474. static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
  475. {
  476. if (self->rx_buff.head ||
  477. self->tx_buff.head) {
  478. dev_err(&self->ndev->dev, "iobuff has already existed.");
  479. return -EINVAL;
  480. }
  481. /* rx_buff */
  482. self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
  483. if (!self->rx_buff.head)
  484. return -ENOMEM;
  485. self->rx_buff.truesize = rxsize;
  486. self->rx_buff.in_frame = FALSE;
  487. self->rx_buff.state = OUTSIDE_FRAME;
  488. self->rx_buff.data = self->rx_buff.head;
  489. /* tx_buff */
  490. self->tx_buff.head = self->membase + IRDARAM;
  491. self->tx_buff.truesize = IRDARAM_LEN;
  492. return 0;
  493. }
  494. /************************************************************************
  495. net_device_ops function
  496. ************************************************************************/
  497. static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
  498. {
  499. struct sh_irda_self *self = netdev_priv(ndev);
  500. struct device *dev = &self->ndev->dev;
  501. int speed = irda_get_next_speed(skb);
  502. int ret;
  503. dev_dbg(dev, "hard xmit\n");
  504. netif_stop_queue(ndev);
  505. sh_irda_rcv_ctrl(self, 0);
  506. ret = sh_irda_set_baudrate(self, speed);
  507. if (ret < 0)
  508. goto sh_irda_hard_xmit_end;
  509. self->tx_buff.len = 0;
  510. if (skb->len) {
  511. unsigned long flags;
  512. spin_lock_irqsave(&self->lock, flags);
  513. self->tx_buff.len = async_wrap_skb(skb,
  514. self->tx_buff.head,
  515. self->tx_buff.truesize);
  516. spin_unlock_irqrestore(&self->lock, flags);
  517. if (self->tx_buff.len > self->tx_buff.truesize)
  518. self->tx_buff.len = self->tx_buff.truesize;
  519. sh_irda_write(self, IRTFLR, self->tx_buff.len);
  520. sh_irda_write(self, IRTCTR, ARMOD | TE);
  521. } else
  522. goto sh_irda_hard_xmit_end;
  523. dev_kfree_skb(skb);
  524. return 0;
  525. sh_irda_hard_xmit_end:
  526. sh_irda_set_baudrate(self, 9600);
  527. netif_wake_queue(self->ndev);
  528. sh_irda_rcv_ctrl(self, 1);
  529. dev_kfree_skb(skb);
  530. return ret;
  531. }
  532. static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
  533. {
  534. /*
  535. * FIXME
  536. *
  537. * This function is needed for irda framework.
  538. * But nothing to do now
  539. */
  540. return 0;
  541. }
  542. static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
  543. {
  544. struct sh_irda_self *self = netdev_priv(ndev);
  545. return &self->ndev->stats;
  546. }
  547. static int sh_irda_open(struct net_device *ndev)
  548. {
  549. struct sh_irda_self *self = netdev_priv(ndev);
  550. int err;
  551. clk_enable(self->clk);
  552. err = sh_irda_crc_init(self);
  553. if (err)
  554. goto open_err;
  555. sh_irda_set_mode(self, SH_IRDA_SIR);
  556. sh_irda_set_timeout(self, 2);
  557. sh_irda_set_baudrate(self, 9600);
  558. self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
  559. if (!self->irlap) {
  560. err = -ENODEV;
  561. goto open_err;
  562. }
  563. netif_start_queue(ndev);
  564. sh_irda_rcv_ctrl(self, 1);
  565. sh_irda_set_irq_mask(self);
  566. dev_info(&ndev->dev, "opened\n");
  567. return 0;
  568. open_err:
  569. clk_disable(self->clk);
  570. return err;
  571. }
  572. static int sh_irda_stop(struct net_device *ndev)
  573. {
  574. struct sh_irda_self *self = netdev_priv(ndev);
  575. /* Stop IrLAP */
  576. if (self->irlap) {
  577. irlap_close(self->irlap);
  578. self->irlap = NULL;
  579. }
  580. netif_stop_queue(ndev);
  581. dev_info(&ndev->dev, "stoped\n");
  582. return 0;
  583. }
  584. static const struct net_device_ops sh_irda_ndo = {
  585. .ndo_open = sh_irda_open,
  586. .ndo_stop = sh_irda_stop,
  587. .ndo_start_xmit = sh_irda_hard_xmit,
  588. .ndo_do_ioctl = sh_irda_ioctl,
  589. .ndo_get_stats = sh_irda_stats,
  590. };
  591. /************************************************************************
  592. platform_driver function
  593. ************************************************************************/
  594. static int __devinit sh_irda_probe(struct platform_device *pdev)
  595. {
  596. struct net_device *ndev;
  597. struct sh_irda_self *self;
  598. struct resource *res;
  599. int irq;
  600. int err = -ENOMEM;
  601. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  602. irq = platform_get_irq(pdev, 0);
  603. if (!res || irq < 0) {
  604. dev_err(&pdev->dev, "Not enough platform resources.\n");
  605. goto exit;
  606. }
  607. ndev = alloc_irdadev(sizeof(*self));
  608. if (!ndev)
  609. goto exit;
  610. self = netdev_priv(ndev);
  611. self->membase = ioremap_nocache(res->start, resource_size(res));
  612. if (!self->membase) {
  613. err = -ENXIO;
  614. dev_err(&pdev->dev, "Unable to ioremap.\n");
  615. goto err_mem_1;
  616. }
  617. err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
  618. if (err)
  619. goto err_mem_2;
  620. self->clk = clk_get(&pdev->dev, NULL);
  621. if (IS_ERR(self->clk)) {
  622. dev_err(&pdev->dev, "cannot get irda clock\n");
  623. goto err_mem_3;
  624. }
  625. irda_init_max_qos_capabilies(&self->qos);
  626. ndev->netdev_ops = &sh_irda_ndo;
  627. ndev->irq = irq;
  628. self->ndev = ndev;
  629. self->qos.baud_rate.bits &= IR_9600; /* FIXME */
  630. self->qos.min_turn_time.bits = 1; /* 10 ms or more */
  631. spin_lock_init(&self->lock);
  632. irda_qos_bits_to_value(&self->qos);
  633. err = register_netdev(ndev);
  634. if (err)
  635. goto err_mem_4;
  636. platform_set_drvdata(pdev, ndev);
  637. if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) {
  638. dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
  639. goto err_mem_4;
  640. }
  641. dev_info(&pdev->dev, "SuperH IrDA probed\n");
  642. goto exit;
  643. err_mem_4:
  644. clk_put(self->clk);
  645. err_mem_3:
  646. sh_irda_remove_iobuf(self);
  647. err_mem_2:
  648. iounmap(self->membase);
  649. err_mem_1:
  650. free_netdev(ndev);
  651. exit:
  652. return err;
  653. }
  654. static int __devexit sh_irda_remove(struct platform_device *pdev)
  655. {
  656. struct net_device *ndev = platform_get_drvdata(pdev);
  657. struct sh_irda_self *self = netdev_priv(ndev);
  658. if (!self)
  659. return 0;
  660. unregister_netdev(ndev);
  661. clk_put(self->clk);
  662. sh_irda_remove_iobuf(self);
  663. iounmap(self->membase);
  664. free_netdev(ndev);
  665. platform_set_drvdata(pdev, NULL);
  666. return 0;
  667. }
  668. static struct platform_driver sh_irda_driver = {
  669. .probe = sh_irda_probe,
  670. .remove = __devexit_p(sh_irda_remove),
  671. .driver = {
  672. .name = DRIVER_NAME,
  673. },
  674. };
  675. static int __init sh_irda_init(void)
  676. {
  677. return platform_driver_register(&sh_irda_driver);
  678. }
  679. static void __exit sh_irda_exit(void)
  680. {
  681. platform_driver_unregister(&sh_irda_driver);
  682. }
  683. module_init(sh_irda_init);
  684. module_exit(sh_irda_exit);
  685. MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
  686. MODULE_DESCRIPTION("SuperH IrDA driver");
  687. MODULE_LICENSE("GPL");