sh_irda.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875
  1. /*
  2. * SuperH IrDA Driver
  3. *
  4. * Copyright (C) 2010 Renesas Solutions Corp.
  5. * Kuninori Morimoto <morimoto.kuninori@renesas.com>
  6. *
  7. * Based on sh_sir.c
  8. * Copyright (C) 2009 Renesas Solutions Corp.
  9. * Copyright 2006-2009 Analog Devices Inc.
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. */
  15. /*
  16. * CAUTION
  17. *
  18. * This driver is very simple.
  19. * So, it doesn't have below support now
  20. * - MIR/FIR support
  21. * - DMA transfer support
  22. * - FIFO mode support
  23. */
  24. #include <linux/io.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/module.h>
  27. #include <linux/platform_device.h>
  28. #include <linux/clk.h>
  29. #include <net/irda/wrapper.h>
  30. #include <net/irda/irda_device.h>
  31. #define DRIVER_NAME "sh_irda"
  32. #if defined(CONFIG_ARCH_SH7367) || defined(CONFIG_ARCH_SH7377)
  33. #define __IRDARAM_LEN 0x13FF
  34. #else
  35. #define __IRDARAM_LEN 0x1039
  36. #endif
  37. #define IRTMR 0x1F00 /* Transfer mode */
  38. #define IRCFR 0x1F02 /* Configuration */
  39. #define IRCTR 0x1F04 /* IR control */
  40. #define IRTFLR 0x1F20 /* Transmit frame length */
  41. #define IRTCTR 0x1F22 /* Transmit control */
  42. #define IRRFLR 0x1F40 /* Receive frame length */
  43. #define IRRCTR 0x1F42 /* Receive control */
  44. #define SIRISR 0x1F60 /* SIR-UART mode interrupt source */
  45. #define SIRIMR 0x1F62 /* SIR-UART mode interrupt mask */
  46. #define SIRICR 0x1F64 /* SIR-UART mode interrupt clear */
  47. #define SIRBCR 0x1F68 /* SIR-UART mode baud rate count */
  48. #define MFIRISR 0x1F70 /* MIR/FIR mode interrupt source */
  49. #define MFIRIMR 0x1F72 /* MIR/FIR mode interrupt mask */
  50. #define MFIRICR 0x1F74 /* MIR/FIR mode interrupt clear */
  51. #define CRCCTR 0x1F80 /* CRC engine control */
  52. #define CRCIR 0x1F86 /* CRC engine input data */
  53. #define CRCCR 0x1F8A /* CRC engine calculation */
  54. #define CRCOR 0x1F8E /* CRC engine output data */
  55. #define FIFOCP 0x1FC0 /* FIFO current pointer */
  56. #define FIFOFP 0x1FC2 /* FIFO follow pointer */
  57. #define FIFORSMSK 0x1FC4 /* FIFO receive status mask */
  58. #define FIFORSOR 0x1FC6 /* FIFO receive status OR */
  59. #define FIFOSEL 0x1FC8 /* FIFO select */
  60. #define FIFORS 0x1FCA /* FIFO receive status */
  61. #define FIFORFL 0x1FCC /* FIFO receive frame length */
  62. #define FIFORAMCP 0x1FCE /* FIFO RAM current pointer */
  63. #define FIFORAMFP 0x1FD0 /* FIFO RAM follow pointer */
  64. #define BIFCTL 0x1FD2 /* BUS interface control */
  65. #define IRDARAM 0x0000 /* IrDA buffer RAM */
  66. #define IRDARAM_LEN __IRDARAM_LEN /* - 8/16/32 (read-only for 32) */
  67. /* IRTMR */
  68. #define TMD_MASK (0x3 << 14) /* Transfer Mode */
  69. #define TMD_SIR (0x0 << 14)
  70. #define TMD_MIR (0x3 << 14)
  71. #define TMD_FIR (0x2 << 14)
  72. #define FIFORIM (1 << 8) /* FIFO receive interrupt mask */
  73. #define MIM (1 << 4) /* MIR/FIR Interrupt Mask */
  74. #define SIM (1 << 0) /* SIR Interrupt Mask */
  75. #define xIM_MASK (FIFORIM | MIM | SIM)
  76. /* IRCFR */
  77. #define RTO_SHIFT 8 /* shift for Receive Timeout */
  78. #define RTO (0x3 << RTO_SHIFT)
  79. /* IRTCTR */
  80. #define ARMOD (1 << 15) /* Auto-Receive Mode */
  81. #define TE (1 << 0) /* Transmit Enable */
  82. /* IRRFLR */
  83. #define RFL_MASK (0x1FFF) /* mask for Receive Frame Length */
  84. /* IRRCTR */
  85. #define RE (1 << 0) /* Receive Enable */
  86. /*
  87. * SIRISR, SIRIMR, SIRICR,
  88. * MFIRISR, MFIRIMR, MFIRICR
  89. */
  90. #define FRE (1 << 15) /* Frame Receive End */
  91. #define TROV (1 << 11) /* Transfer Area Overflow */
  92. #define xIR_9 (1 << 9)
  93. #define TOT xIR_9 /* for SIR Timeout */
  94. #define ABTD xIR_9 /* for MIR/FIR Abort Detection */
  95. #define xIR_8 (1 << 8)
  96. #define FER xIR_8 /* for SIR Framing Error */
  97. #define CRCER xIR_8 /* for MIR/FIR CRC error */
  98. #define FTE (1 << 7) /* Frame Transmit End */
  99. #define xIR_MASK (FRE | TROV | xIR_9 | xIR_8 | FTE)
  100. /* SIRBCR */
  101. #define BRC_MASK (0x3F) /* mask for Baud Rate Count */
  102. /* CRCCTR */
  103. #define CRC_RST (1 << 15) /* CRC Engine Reset */
  104. #define CRC_CT_MASK 0x0FFF /* mask for CRC Engine Input Data Count */
  105. /* CRCIR */
  106. #define CRC_IN_MASK 0x0FFF /* mask for CRC Engine Input Data */
  107. /************************************************************************
  108. enum / structure
  109. ************************************************************************/
  110. enum sh_irda_mode {
  111. SH_IRDA_NONE = 0,
  112. SH_IRDA_SIR,
  113. SH_IRDA_MIR,
  114. SH_IRDA_FIR,
  115. };
  116. struct sh_irda_self;
  117. struct sh_irda_xir_func {
  118. int (*xir_fre) (struct sh_irda_self *self);
  119. int (*xir_trov) (struct sh_irda_self *self);
  120. int (*xir_9) (struct sh_irda_self *self);
  121. int (*xir_8) (struct sh_irda_self *self);
  122. int (*xir_fte) (struct sh_irda_self *self);
  123. };
  124. struct sh_irda_self {
  125. void __iomem *membase;
  126. unsigned int irq;
  127. struct clk *clk;
  128. struct net_device *ndev;
  129. struct irlap_cb *irlap;
  130. struct qos_info qos;
  131. iobuff_t tx_buff;
  132. iobuff_t rx_buff;
  133. enum sh_irda_mode mode;
  134. spinlock_t lock;
  135. struct sh_irda_xir_func *xir_func;
  136. };
  137. /************************************************************************
  138. common function
  139. ************************************************************************/
  140. static void sh_irda_write(struct sh_irda_self *self, u32 offset, u16 data)
  141. {
  142. unsigned long flags;
  143. spin_lock_irqsave(&self->lock, flags);
  144. iowrite16(data, self->membase + offset);
  145. spin_unlock_irqrestore(&self->lock, flags);
  146. }
  147. static u16 sh_irda_read(struct sh_irda_self *self, u32 offset)
  148. {
  149. unsigned long flags;
  150. u16 ret;
  151. spin_lock_irqsave(&self->lock, flags);
  152. ret = ioread16(self->membase + offset);
  153. spin_unlock_irqrestore(&self->lock, flags);
  154. return ret;
  155. }
  156. static void sh_irda_update_bits(struct sh_irda_self *self, u32 offset,
  157. u16 mask, u16 data)
  158. {
  159. unsigned long flags;
  160. u16 old, new;
  161. spin_lock_irqsave(&self->lock, flags);
  162. old = ioread16(self->membase + offset);
  163. new = (old & ~mask) | data;
  164. if (old != new)
  165. iowrite16(data, self->membase + offset);
  166. spin_unlock_irqrestore(&self->lock, flags);
  167. }
  168. /************************************************************************
  169. mode function
  170. ************************************************************************/
  171. /*=====================================
  172. *
  173. * common
  174. *
  175. *=====================================*/
  176. static void sh_irda_rcv_ctrl(struct sh_irda_self *self, int enable)
  177. {
  178. struct device *dev = &self->ndev->dev;
  179. sh_irda_update_bits(self, IRRCTR, RE, enable ? RE : 0);
  180. dev_dbg(dev, "recv %s\n", enable ? "enable" : "disable");
  181. }
  182. static int sh_irda_set_timeout(struct sh_irda_self *self, int interval)
  183. {
  184. struct device *dev = &self->ndev->dev;
  185. if (SH_IRDA_SIR != self->mode)
  186. interval = 0;
  187. if (interval < 0 || interval > 2) {
  188. dev_err(dev, "unsupported timeout interval\n");
  189. return -EINVAL;
  190. }
  191. sh_irda_update_bits(self, IRCFR, RTO, interval << RTO_SHIFT);
  192. return 0;
  193. }
  194. static int sh_irda_set_baudrate(struct sh_irda_self *self, int baudrate)
  195. {
  196. struct device *dev = &self->ndev->dev;
  197. u16 val;
  198. if (baudrate < 0)
  199. return 0;
  200. if (SH_IRDA_SIR != self->mode) {
  201. dev_err(dev, "it is not SIR mode\n");
  202. return -EINVAL;
  203. }
  204. /*
  205. * Baud rate (bits/s) =
  206. * (48 MHz / 26) / (baud rate counter value + 1) x 16
  207. */
  208. val = (48000000 / 26 / 16 / baudrate) - 1;
  209. dev_dbg(dev, "baudrate = %d, val = 0x%02x\n", baudrate, val);
  210. sh_irda_update_bits(self, SIRBCR, BRC_MASK, val);
  211. return 0;
  212. }
  213. static int sh_irda_get_rcv_length(struct sh_irda_self *self)
  214. {
  215. return RFL_MASK & sh_irda_read(self, IRRFLR);
  216. }
  217. /*=====================================
  218. *
  219. * NONE MODE
  220. *
  221. *=====================================*/
  222. static int sh_irda_xir_fre(struct sh_irda_self *self)
  223. {
  224. struct device *dev = &self->ndev->dev;
  225. dev_err(dev, "none mode: frame recv\n");
  226. return 0;
  227. }
  228. static int sh_irda_xir_trov(struct sh_irda_self *self)
  229. {
  230. struct device *dev = &self->ndev->dev;
  231. dev_err(dev, "none mode: buffer ram over\n");
  232. return 0;
  233. }
  234. static int sh_irda_xir_9(struct sh_irda_self *self)
  235. {
  236. struct device *dev = &self->ndev->dev;
  237. dev_err(dev, "none mode: time over\n");
  238. return 0;
  239. }
  240. static int sh_irda_xir_8(struct sh_irda_self *self)
  241. {
  242. struct device *dev = &self->ndev->dev;
  243. dev_err(dev, "none mode: framing error\n");
  244. return 0;
  245. }
  246. static int sh_irda_xir_fte(struct sh_irda_self *self)
  247. {
  248. struct device *dev = &self->ndev->dev;
  249. dev_err(dev, "none mode: frame transmit end\n");
  250. return 0;
  251. }
  252. static struct sh_irda_xir_func sh_irda_xir_func = {
  253. .xir_fre = sh_irda_xir_fre,
  254. .xir_trov = sh_irda_xir_trov,
  255. .xir_9 = sh_irda_xir_9,
  256. .xir_8 = sh_irda_xir_8,
  257. .xir_fte = sh_irda_xir_fte,
  258. };
  259. /*=====================================
  260. *
  261. * MIR/FIR MODE
  262. *
  263. * MIR/FIR are not supported now
  264. *=====================================*/
  265. static struct sh_irda_xir_func sh_irda_mfir_func = {
  266. .xir_fre = sh_irda_xir_fre,
  267. .xir_trov = sh_irda_xir_trov,
  268. .xir_9 = sh_irda_xir_9,
  269. .xir_8 = sh_irda_xir_8,
  270. .xir_fte = sh_irda_xir_fte,
  271. };
  272. /*=====================================
  273. *
  274. * SIR MODE
  275. *
  276. *=====================================*/
  277. static int sh_irda_sir_fre(struct sh_irda_self *self)
  278. {
  279. struct device *dev = &self->ndev->dev;
  280. u16 data16;
  281. u8 *data = (u8 *)&data16;
  282. int len = sh_irda_get_rcv_length(self);
  283. int i, j;
  284. if (len > IRDARAM_LEN)
  285. len = IRDARAM_LEN;
  286. dev_dbg(dev, "frame recv length = %d\n", len);
  287. for (i = 0; i < len; i++) {
  288. j = i % 2;
  289. if (!j)
  290. data16 = sh_irda_read(self, IRDARAM + i);
  291. async_unwrap_char(self->ndev, &self->ndev->stats,
  292. &self->rx_buff, data[j]);
  293. }
  294. self->ndev->last_rx = jiffies;
  295. sh_irda_rcv_ctrl(self, 1);
  296. return 0;
  297. }
  298. static int sh_irda_sir_trov(struct sh_irda_self *self)
  299. {
  300. struct device *dev = &self->ndev->dev;
  301. dev_err(dev, "buffer ram over\n");
  302. sh_irda_rcv_ctrl(self, 1);
  303. return 0;
  304. }
  305. static int sh_irda_sir_tot(struct sh_irda_self *self)
  306. {
  307. struct device *dev = &self->ndev->dev;
  308. dev_err(dev, "time over\n");
  309. sh_irda_set_baudrate(self, 9600);
  310. sh_irda_rcv_ctrl(self, 1);
  311. return 0;
  312. }
  313. static int sh_irda_sir_fer(struct sh_irda_self *self)
  314. {
  315. struct device *dev = &self->ndev->dev;
  316. dev_err(dev, "framing error\n");
  317. sh_irda_rcv_ctrl(self, 1);
  318. return 0;
  319. }
  320. static int sh_irda_sir_fte(struct sh_irda_self *self)
  321. {
  322. struct device *dev = &self->ndev->dev;
  323. dev_dbg(dev, "frame transmit end\n");
  324. netif_wake_queue(self->ndev);
  325. return 0;
  326. }
  327. static struct sh_irda_xir_func sh_irda_sir_func = {
  328. .xir_fre = sh_irda_sir_fre,
  329. .xir_trov = sh_irda_sir_trov,
  330. .xir_9 = sh_irda_sir_tot,
  331. .xir_8 = sh_irda_sir_fer,
  332. .xir_fte = sh_irda_sir_fte,
  333. };
  334. static void sh_irda_set_mode(struct sh_irda_self *self, enum sh_irda_mode mode)
  335. {
  336. struct device *dev = &self->ndev->dev;
  337. struct sh_irda_xir_func *func;
  338. const char *name;
  339. u16 data;
  340. switch (mode) {
  341. case SH_IRDA_SIR:
  342. name = "SIR";
  343. data = TMD_SIR;
  344. func = &sh_irda_sir_func;
  345. break;
  346. case SH_IRDA_MIR:
  347. name = "MIR";
  348. data = TMD_MIR;
  349. func = &sh_irda_mfir_func;
  350. break;
  351. case SH_IRDA_FIR:
  352. name = "FIR";
  353. data = TMD_FIR;
  354. func = &sh_irda_mfir_func;
  355. break;
  356. default:
  357. name = "NONE";
  358. data = 0;
  359. func = &sh_irda_xir_func;
  360. break;
  361. }
  362. self->mode = mode;
  363. self->xir_func = func;
  364. sh_irda_update_bits(self, IRTMR, TMD_MASK, data);
  365. dev_dbg(dev, "switch to %s mode", name);
  366. }
  367. /************************************************************************
  368. irq function
  369. ************************************************************************/
  370. static void sh_irda_set_irq_mask(struct sh_irda_self *self)
  371. {
  372. u16 tmr_hole;
  373. u16 xir_reg;
  374. /* set all mask */
  375. sh_irda_update_bits(self, IRTMR, xIM_MASK, xIM_MASK);
  376. sh_irda_update_bits(self, SIRIMR, xIR_MASK, xIR_MASK);
  377. sh_irda_update_bits(self, MFIRIMR, xIR_MASK, xIR_MASK);
  378. /* clear irq */
  379. sh_irda_update_bits(self, SIRICR, xIR_MASK, xIR_MASK);
  380. sh_irda_update_bits(self, MFIRICR, xIR_MASK, xIR_MASK);
  381. switch (self->mode) {
  382. case SH_IRDA_SIR:
  383. tmr_hole = SIM;
  384. xir_reg = SIRIMR;
  385. break;
  386. case SH_IRDA_MIR:
  387. case SH_IRDA_FIR:
  388. tmr_hole = MIM;
  389. xir_reg = MFIRIMR;
  390. break;
  391. default:
  392. tmr_hole = 0;
  393. xir_reg = 0;
  394. break;
  395. }
  396. /* open mask */
  397. if (xir_reg) {
  398. sh_irda_update_bits(self, IRTMR, tmr_hole, 0);
  399. sh_irda_update_bits(self, xir_reg, xIR_MASK, 0);
  400. }
  401. }
  402. static irqreturn_t sh_irda_irq(int irq, void *dev_id)
  403. {
  404. struct sh_irda_self *self = dev_id;
  405. struct sh_irda_xir_func *func = self->xir_func;
  406. u16 isr = sh_irda_read(self, SIRISR);
  407. /* clear irq */
  408. sh_irda_write(self, SIRICR, isr);
  409. if (isr & FRE)
  410. func->xir_fre(self);
  411. if (isr & TROV)
  412. func->xir_trov(self);
  413. if (isr & xIR_9)
  414. func->xir_9(self);
  415. if (isr & xIR_8)
  416. func->xir_8(self);
  417. if (isr & FTE)
  418. func->xir_fte(self);
  419. return IRQ_HANDLED;
  420. }
  421. /************************************************************************
  422. CRC function
  423. ************************************************************************/
  424. static void sh_irda_crc_reset(struct sh_irda_self *self)
  425. {
  426. sh_irda_write(self, CRCCTR, CRC_RST);
  427. }
  428. static void sh_irda_crc_add(struct sh_irda_self *self, u16 data)
  429. {
  430. sh_irda_write(self, CRCIR, data & CRC_IN_MASK);
  431. }
  432. static u16 sh_irda_crc_cnt(struct sh_irda_self *self)
  433. {
  434. return CRC_CT_MASK & sh_irda_read(self, CRCCTR);
  435. }
  436. static u16 sh_irda_crc_out(struct sh_irda_self *self)
  437. {
  438. return sh_irda_read(self, CRCOR);
  439. }
  440. static int sh_irda_crc_init(struct sh_irda_self *self)
  441. {
  442. struct device *dev = &self->ndev->dev;
  443. int ret = -EIO;
  444. u16 val;
  445. sh_irda_crc_reset(self);
  446. sh_irda_crc_add(self, 0xCC);
  447. sh_irda_crc_add(self, 0xF5);
  448. sh_irda_crc_add(self, 0xF1);
  449. sh_irda_crc_add(self, 0xA7);
  450. val = sh_irda_crc_cnt(self);
  451. if (4 != val) {
  452. dev_err(dev, "CRC count error %x\n", val);
  453. goto crc_init_out;
  454. }
  455. val = sh_irda_crc_out(self);
  456. if (0x51DF != val) {
  457. dev_err(dev, "CRC result error%x\n", val);
  458. goto crc_init_out;
  459. }
  460. ret = 0;
  461. crc_init_out:
  462. sh_irda_crc_reset(self);
  463. return ret;
  464. }
  465. /************************************************************************
  466. iobuf function
  467. ************************************************************************/
  468. static void sh_irda_remove_iobuf(struct sh_irda_self *self)
  469. {
  470. kfree(self->rx_buff.head);
  471. self->tx_buff.head = NULL;
  472. self->tx_buff.data = NULL;
  473. self->rx_buff.head = NULL;
  474. self->rx_buff.data = NULL;
  475. }
  476. static int sh_irda_init_iobuf(struct sh_irda_self *self, int rxsize, int txsize)
  477. {
  478. if (self->rx_buff.head ||
  479. self->tx_buff.head) {
  480. dev_err(&self->ndev->dev, "iobuff has already existed.");
  481. return -EINVAL;
  482. }
  483. /* rx_buff */
  484. self->rx_buff.head = kmalloc(rxsize, GFP_KERNEL);
  485. if (!self->rx_buff.head)
  486. return -ENOMEM;
  487. self->rx_buff.truesize = rxsize;
  488. self->rx_buff.in_frame = FALSE;
  489. self->rx_buff.state = OUTSIDE_FRAME;
  490. self->rx_buff.data = self->rx_buff.head;
  491. /* tx_buff */
  492. self->tx_buff.head = self->membase + IRDARAM;
  493. self->tx_buff.truesize = IRDARAM_LEN;
  494. return 0;
  495. }
  496. /************************************************************************
  497. net_device_ops function
  498. ************************************************************************/
  499. static int sh_irda_hard_xmit(struct sk_buff *skb, struct net_device *ndev)
  500. {
  501. struct sh_irda_self *self = netdev_priv(ndev);
  502. struct device *dev = &self->ndev->dev;
  503. int speed = irda_get_next_speed(skb);
  504. int ret;
  505. dev_dbg(dev, "hard xmit\n");
  506. netif_stop_queue(ndev);
  507. sh_irda_rcv_ctrl(self, 0);
  508. ret = sh_irda_set_baudrate(self, speed);
  509. if (ret < 0)
  510. goto sh_irda_hard_xmit_end;
  511. self->tx_buff.len = 0;
  512. if (skb->len) {
  513. unsigned long flags;
  514. spin_lock_irqsave(&self->lock, flags);
  515. self->tx_buff.len = async_wrap_skb(skb,
  516. self->tx_buff.head,
  517. self->tx_buff.truesize);
  518. spin_unlock_irqrestore(&self->lock, flags);
  519. if (self->tx_buff.len > self->tx_buff.truesize)
  520. self->tx_buff.len = self->tx_buff.truesize;
  521. sh_irda_write(self, IRTFLR, self->tx_buff.len);
  522. sh_irda_write(self, IRTCTR, ARMOD | TE);
  523. } else
  524. goto sh_irda_hard_xmit_end;
  525. dev_kfree_skb(skb);
  526. return 0;
  527. sh_irda_hard_xmit_end:
  528. sh_irda_set_baudrate(self, 9600);
  529. netif_wake_queue(self->ndev);
  530. sh_irda_rcv_ctrl(self, 1);
  531. dev_kfree_skb(skb);
  532. return ret;
  533. }
  534. static int sh_irda_ioctl(struct net_device *ndev, struct ifreq *ifreq, int cmd)
  535. {
  536. /*
  537. * FIXME
  538. *
  539. * This function is needed for irda framework.
  540. * But nothing to do now
  541. */
  542. return 0;
  543. }
  544. static struct net_device_stats *sh_irda_stats(struct net_device *ndev)
  545. {
  546. struct sh_irda_self *self = netdev_priv(ndev);
  547. return &self->ndev->stats;
  548. }
  549. static int sh_irda_open(struct net_device *ndev)
  550. {
  551. struct sh_irda_self *self = netdev_priv(ndev);
  552. int err;
  553. clk_enable(self->clk);
  554. err = sh_irda_crc_init(self);
  555. if (err)
  556. goto open_err;
  557. sh_irda_set_mode(self, SH_IRDA_SIR);
  558. sh_irda_set_timeout(self, 2);
  559. sh_irda_set_baudrate(self, 9600);
  560. self->irlap = irlap_open(ndev, &self->qos, DRIVER_NAME);
  561. if (!self->irlap) {
  562. err = -ENODEV;
  563. goto open_err;
  564. }
  565. netif_start_queue(ndev);
  566. sh_irda_rcv_ctrl(self, 1);
  567. sh_irda_set_irq_mask(self);
  568. dev_info(&ndev->dev, "opened\n");
  569. return 0;
  570. open_err:
  571. clk_disable(self->clk);
  572. return err;
  573. }
  574. static int sh_irda_stop(struct net_device *ndev)
  575. {
  576. struct sh_irda_self *self = netdev_priv(ndev);
  577. /* Stop IrLAP */
  578. if (self->irlap) {
  579. irlap_close(self->irlap);
  580. self->irlap = NULL;
  581. }
  582. netif_stop_queue(ndev);
  583. dev_info(&ndev->dev, "stoped\n");
  584. return 0;
  585. }
  586. static const struct net_device_ops sh_irda_ndo = {
  587. .ndo_open = sh_irda_open,
  588. .ndo_stop = sh_irda_stop,
  589. .ndo_start_xmit = sh_irda_hard_xmit,
  590. .ndo_do_ioctl = sh_irda_ioctl,
  591. .ndo_get_stats = sh_irda_stats,
  592. };
  593. /************************************************************************
  594. platform_driver function
  595. ************************************************************************/
  596. static int __devinit sh_irda_probe(struct platform_device *pdev)
  597. {
  598. struct net_device *ndev;
  599. struct sh_irda_self *self;
  600. struct resource *res;
  601. int irq;
  602. int err = -ENOMEM;
  603. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  604. irq = platform_get_irq(pdev, 0);
  605. if (!res || irq < 0) {
  606. dev_err(&pdev->dev, "Not enough platform resources.\n");
  607. goto exit;
  608. }
  609. ndev = alloc_irdadev(sizeof(*self));
  610. if (!ndev)
  611. goto exit;
  612. self = netdev_priv(ndev);
  613. self->membase = ioremap_nocache(res->start, resource_size(res));
  614. if (!self->membase) {
  615. err = -ENXIO;
  616. dev_err(&pdev->dev, "Unable to ioremap.\n");
  617. goto err_mem_1;
  618. }
  619. err = sh_irda_init_iobuf(self, IRDA_SKB_MAX_MTU, IRDA_SIR_MAX_FRAME);
  620. if (err)
  621. goto err_mem_2;
  622. self->clk = clk_get(&pdev->dev, NULL);
  623. if (IS_ERR(self->clk)) {
  624. dev_err(&pdev->dev, "cannot get irda clock\n");
  625. goto err_mem_3;
  626. }
  627. irda_init_max_qos_capabilies(&self->qos);
  628. ndev->netdev_ops = &sh_irda_ndo;
  629. ndev->irq = irq;
  630. self->ndev = ndev;
  631. self->qos.baud_rate.bits &= IR_9600; /* FIXME */
  632. self->qos.min_turn_time.bits = 1; /* 10 ms or more */
  633. spin_lock_init(&self->lock);
  634. irda_qos_bits_to_value(&self->qos);
  635. err = register_netdev(ndev);
  636. if (err)
  637. goto err_mem_4;
  638. platform_set_drvdata(pdev, ndev);
  639. if (request_irq(irq, sh_irda_irq, IRQF_DISABLED, "sh_irda", self)) {
  640. dev_warn(&pdev->dev, "Unable to attach sh_irda interrupt\n");
  641. goto err_mem_4;
  642. }
  643. dev_info(&pdev->dev, "SuperH IrDA probed\n");
  644. goto exit;
  645. err_mem_4:
  646. clk_put(self->clk);
  647. err_mem_3:
  648. sh_irda_remove_iobuf(self);
  649. err_mem_2:
  650. iounmap(self->membase);
  651. err_mem_1:
  652. free_netdev(ndev);
  653. exit:
  654. return err;
  655. }
  656. static int __devexit sh_irda_remove(struct platform_device *pdev)
  657. {
  658. struct net_device *ndev = platform_get_drvdata(pdev);
  659. struct sh_irda_self *self = netdev_priv(ndev);
  660. if (!self)
  661. return 0;
  662. unregister_netdev(ndev);
  663. clk_put(self->clk);
  664. sh_irda_remove_iobuf(self);
  665. iounmap(self->membase);
  666. free_netdev(ndev);
  667. platform_set_drvdata(pdev, NULL);
  668. return 0;
  669. }
  670. static struct platform_driver sh_irda_driver = {
  671. .probe = sh_irda_probe,
  672. .remove = __devexit_p(sh_irda_remove),
  673. .driver = {
  674. .name = DRIVER_NAME,
  675. },
  676. };
  677. static int __init sh_irda_init(void)
  678. {
  679. return platform_driver_register(&sh_irda_driver);
  680. }
  681. static void __exit sh_irda_exit(void)
  682. {
  683. platform_driver_unregister(&sh_irda_driver);
  684. }
  685. module_init(sh_irda_init);
  686. module_exit(sh_irda_exit);
  687. MODULE_AUTHOR("Kuninori Morimoto <morimoto.kuninori@renesas.com>");
  688. MODULE_DESCRIPTION("SuperH IrDA driver");
  689. MODULE_LICENSE("GPL");