nuvoton-cir.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216
  1. /*
  2. * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
  3. *
  4. * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
  5. * Copyright (C) 2009 Nuvoton PS Team
  6. *
  7. * Special thanks to Nuvoton for providing hardware, spec sheets and
  8. * sample code upon which portions of this driver are based. Indirect
  9. * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
  10. * modeled after.
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License as
  14. * published by the Free Software Foundation; either version 2 of the
  15. * License, or (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  25. * USA
  26. */
  27. #include <linux/kernel.h>
  28. #include <linux/module.h>
  29. #include <linux/pnp.h>
  30. #include <linux/io.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/sched.h>
  33. #include <linux/slab.h>
  34. #include <linux/input.h>
  35. #include <media/ir-core.h>
  36. #include <linux/pci_ids.h>
  37. #include "nuvoton-cir.h"
  38. static char *chip_id = "w836x7hg";
  39. /* write val to config reg */
  40. static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg)
  41. {
  42. outb(reg, nvt->cr_efir);
  43. outb(val, nvt->cr_efdr);
  44. }
  45. /* read val from config reg */
  46. static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg)
  47. {
  48. outb(reg, nvt->cr_efir);
  49. return inb(nvt->cr_efdr);
  50. }
  51. /* update config register bit without changing other bits */
  52. static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  53. {
  54. u8 tmp = nvt_cr_read(nvt, reg) | val;
  55. nvt_cr_write(nvt, tmp, reg);
  56. }
  57. /* clear config register bit without changing other bits */
  58. static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  59. {
  60. u8 tmp = nvt_cr_read(nvt, reg) & ~val;
  61. nvt_cr_write(nvt, tmp, reg);
  62. }
  63. /* enter extended function mode */
  64. static inline void nvt_efm_enable(struct nvt_dev *nvt)
  65. {
  66. /* Enabling Extended Function Mode explicitly requires writing 2x */
  67. outb(EFER_EFM_ENABLE, nvt->cr_efir);
  68. outb(EFER_EFM_ENABLE, nvt->cr_efir);
  69. }
  70. /* exit extended function mode */
  71. static inline void nvt_efm_disable(struct nvt_dev *nvt)
  72. {
  73. outb(EFER_EFM_DISABLE, nvt->cr_efir);
  74. }
  75. /*
  76. * When you want to address a specific logical device, write its logical
  77. * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing
  78. * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN.
  79. */
  80. static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
  81. {
  82. outb(CR_LOGICAL_DEV_SEL, nvt->cr_efir);
  83. outb(ldev, nvt->cr_efdr);
  84. }
  85. /* write val to cir config register */
  86. static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
  87. {
  88. outb(val, nvt->cir_addr + offset);
  89. }
  90. /* read val from cir config register */
  91. static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
  92. {
  93. u8 val;
  94. val = inb(nvt->cir_addr + offset);
  95. return val;
  96. }
  97. /* write val to cir wake register */
  98. static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
  99. u8 val, u8 offset)
  100. {
  101. outb(val, nvt->cir_wake_addr + offset);
  102. }
  103. /* read val from cir wake config register */
  104. static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
  105. {
  106. u8 val;
  107. val = inb(nvt->cir_wake_addr + offset);
  108. return val;
  109. }
  110. /* dump current cir register contents */
  111. static void cir_dump_regs(struct nvt_dev *nvt)
  112. {
  113. nvt_efm_enable(nvt);
  114. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  115. printk("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
  116. printk(" * CR CIR ACTIVE : 0x%x\n",
  117. nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
  118. printk(" * CR CIR BASE ADDR: 0x%x\n",
  119. (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
  120. nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
  121. printk(" * CR CIR IRQ NUM: 0x%x\n",
  122. nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
  123. nvt_efm_disable(nvt);
  124. printk("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
  125. printk(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
  126. printk(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
  127. printk(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
  128. printk(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
  129. printk(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
  130. printk(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
  131. printk(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
  132. printk(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
  133. printk(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
  134. printk(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
  135. printk(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
  136. printk(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
  137. printk(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
  138. printk(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
  139. printk(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
  140. printk(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
  141. }
  142. /* dump current cir wake register contents */
  143. static void cir_wake_dump_regs(struct nvt_dev *nvt)
  144. {
  145. u8 i, fifo_len;
  146. nvt_efm_enable(nvt);
  147. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  148. printk("%s: Dump CIR WAKE logical device registers:\n",
  149. NVT_DRIVER_NAME);
  150. printk(" * CR CIR WAKE ACTIVE : 0x%x\n",
  151. nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
  152. printk(" * CR CIR WAKE BASE ADDR: 0x%x\n",
  153. (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
  154. nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
  155. printk(" * CR CIR WAKE IRQ NUM: 0x%x\n",
  156. nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
  157. nvt_efm_disable(nvt);
  158. printk("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
  159. printk(" * IRCON: 0x%x\n",
  160. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
  161. printk(" * IRSTS: 0x%x\n",
  162. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
  163. printk(" * IREN: 0x%x\n",
  164. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
  165. printk(" * FIFO CMP DEEP: 0x%x\n",
  166. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
  167. printk(" * FIFO CMP TOL: 0x%x\n",
  168. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
  169. printk(" * FIFO COUNT: 0x%x\n",
  170. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
  171. printk(" * SLCH: 0x%x\n",
  172. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
  173. printk(" * SLCL: 0x%x\n",
  174. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
  175. printk(" * FIFOCON: 0x%x\n",
  176. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
  177. printk(" * SRXFSTS: 0x%x\n",
  178. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
  179. printk(" * SAMPLE RX FIFO: 0x%x\n",
  180. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
  181. printk(" * WR FIFO DATA: 0x%x\n",
  182. nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
  183. printk(" * RD FIFO ONLY: 0x%x\n",
  184. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
  185. printk(" * RD FIFO ONLY IDX: 0x%x\n",
  186. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
  187. printk(" * FIFO IGNORE: 0x%x\n",
  188. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
  189. printk(" * IRFSM: 0x%x\n",
  190. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
  191. fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
  192. printk("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
  193. printk("* Contents = ");
  194. for (i = 0; i < fifo_len; i++)
  195. printk("%02x ",
  196. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
  197. printk("\n");
  198. }
  199. /* detect hardware features */
  200. static int nvt_hw_detect(struct nvt_dev *nvt)
  201. {
  202. unsigned long flags;
  203. u8 chip_major, chip_minor;
  204. int ret = 0;
  205. nvt_efm_enable(nvt);
  206. /* Check if we're wired for the alternate EFER setup */
  207. chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
  208. if (chip_major == 0xff) {
  209. nvt->cr_efir = CR_EFIR2;
  210. nvt->cr_efdr = CR_EFDR2;
  211. nvt_efm_enable(nvt);
  212. chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
  213. }
  214. chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
  215. nvt_dbg("%s: chip id: 0x%02x 0x%02x", chip_id, chip_major, chip_minor);
  216. if (chip_major != CHIP_ID_HIGH &&
  217. (chip_minor != CHIP_ID_LOW || chip_minor != CHIP_ID_LOW2))
  218. ret = -ENODEV;
  219. nvt_efm_disable(nvt);
  220. spin_lock_irqsave(&nvt->nvt_lock, flags);
  221. nvt->chip_major = chip_major;
  222. nvt->chip_minor = chip_minor;
  223. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  224. return ret;
  225. }
  226. static void nvt_cir_ldev_init(struct nvt_dev *nvt)
  227. {
  228. u8 val;
  229. /* output pin selection (Pin95=CIRRX, Pin96=CIRTX1, WB enabled */
  230. val = nvt_cr_read(nvt, CR_OUTPUT_PIN_SEL);
  231. val &= OUTPUT_PIN_SEL_MASK;
  232. val |= (OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB);
  233. nvt_cr_write(nvt, val, CR_OUTPUT_PIN_SEL);
  234. /* Select CIR logical device and enable */
  235. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  236. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  237. nvt_cr_write(nvt, nvt->cir_addr >> 8, CR_CIR_BASE_ADDR_HI);
  238. nvt_cr_write(nvt, nvt->cir_addr & 0xff, CR_CIR_BASE_ADDR_LO);
  239. nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
  240. nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d",
  241. nvt->cir_addr, nvt->cir_irq);
  242. }
  243. static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
  244. {
  245. /* Select ACPI logical device, enable it and CIR Wake */
  246. nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
  247. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  248. /* Enable CIR Wake via PSOUT# (Pin60) */
  249. nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
  250. /* enable cir interrupt of mouse/keyboard IRQ event */
  251. nvt_set_reg_bit(nvt, CIR_INTR_MOUSE_IRQ_BIT, CR_ACPI_IRQ_EVENTS);
  252. /* enable pme interrupt of cir wakeup event */
  253. nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
  254. /* Select CIR Wake logical device and enable */
  255. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  256. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  257. nvt_cr_write(nvt, nvt->cir_wake_addr >> 8, CR_CIR_BASE_ADDR_HI);
  258. nvt_cr_write(nvt, nvt->cir_wake_addr & 0xff, CR_CIR_BASE_ADDR_LO);
  259. nvt_cr_write(nvt, nvt->cir_wake_irq, CR_CIR_IRQ_RSRC);
  260. nvt_dbg("CIR Wake initialized, base io port address: 0x%lx, irq: %d",
  261. nvt->cir_wake_addr, nvt->cir_wake_irq);
  262. }
  263. /* clear out the hardware's cir rx fifo */
  264. static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
  265. {
  266. u8 val;
  267. val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
  268. nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
  269. }
  270. /* clear out the hardware's cir wake rx fifo */
  271. static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
  272. {
  273. u8 val;
  274. val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
  275. nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
  276. CIR_WAKE_FIFOCON);
  277. }
  278. /* clear out the hardware's cir tx fifo */
  279. static void nvt_clear_tx_fifo(struct nvt_dev *nvt)
  280. {
  281. u8 val;
  282. val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
  283. nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON);
  284. }
  285. static void nvt_cir_regs_init(struct nvt_dev *nvt)
  286. {
  287. /* set sample limit count (PE interrupt raised when reached) */
  288. nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH);
  289. nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL);
  290. /* set fifo irq trigger levels */
  291. nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV |
  292. CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON);
  293. /*
  294. * Enable TX and RX, specify carrier on = low, off = high, and set
  295. * sample period (currently 50us)
  296. */
  297. nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN | CIR_IRCON_RXINV |
  298. CIR_IRCON_SAMPLE_PERIOD_SEL, CIR_IRCON);
  299. /* clear hardware rx and tx fifos */
  300. nvt_clear_cir_fifo(nvt);
  301. nvt_clear_tx_fifo(nvt);
  302. /* clear any and all stray interrupts */
  303. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  304. /* and finally, enable RX Trigger Level Read and Packet End interrupts */
  305. nvt_cir_reg_write(nvt, CIR_IREN_RTR | CIR_IREN_PE, CIR_IREN);
  306. }
  307. static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
  308. {
  309. /* set number of bytes needed for wake key comparison (default 67) */
  310. nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_LEN, CIR_WAKE_FIFO_CMP_DEEP);
  311. /* set tolerance/variance allowed per byte during wake compare */
  312. nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
  313. CIR_WAKE_FIFO_CMP_TOL);
  314. /* set sample limit count (PE interrupt raised when reached) */
  315. nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_WAKE_SLCH);
  316. nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_WAKE_SLCL);
  317. /* set cir wake fifo rx trigger level (currently 67) */
  318. nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFOCON_RX_TRIGGER_LEV,
  319. CIR_WAKE_FIFOCON);
  320. /*
  321. * Enable TX and RX, specific carrier on = low, off = high, and set
  322. * sample period (currently 50us)
  323. */
  324. nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
  325. CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
  326. CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
  327. CIR_WAKE_IRCON);
  328. /* clear cir wake rx fifo */
  329. nvt_clear_cir_wake_fifo(nvt);
  330. /* clear any and all stray interrupts */
  331. nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
  332. }
  333. static void nvt_enable_wake(struct nvt_dev *nvt)
  334. {
  335. nvt_efm_enable(nvt);
  336. nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
  337. nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
  338. nvt_set_reg_bit(nvt, CIR_INTR_MOUSE_IRQ_BIT, CR_ACPI_IRQ_EVENTS);
  339. nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
  340. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  341. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  342. nvt_efm_disable(nvt);
  343. nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
  344. CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
  345. CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL, CIR_WAKE_IRCON);
  346. nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
  347. nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
  348. }
  349. /* rx carrier detect only works in learning mode, must be called w/nvt_lock */
  350. static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
  351. {
  352. u32 count, carrier, duration = 0;
  353. int i;
  354. count = nvt_cir_reg_read(nvt, CIR_FCCL) |
  355. nvt_cir_reg_read(nvt, CIR_FCCH) << 8;
  356. for (i = 0; i < nvt->pkts; i++) {
  357. if (nvt->buf[i] & BUF_PULSE_BIT)
  358. duration += nvt->buf[i] & BUF_LEN_MASK;
  359. }
  360. duration *= SAMPLE_PERIOD;
  361. if (!count || !duration) {
  362. nvt_pr(KERN_NOTICE, "Unable to determine carrier! (c:%u, d:%u)",
  363. count, duration);
  364. return 0;
  365. }
  366. carrier = (count * 1000000) / duration;
  367. if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
  368. nvt_dbg("WTF? Carrier frequency out of range!");
  369. nvt_dbg("Carrier frequency: %u (count %u, duration %u)",
  370. carrier, count, duration);
  371. return carrier;
  372. }
  373. /*
  374. * set carrier frequency
  375. *
  376. * set carrier on 2 registers: CP & CC
  377. * always set CP as 0x81
  378. * set CC by SPEC, CC = 3MHz/carrier - 1
  379. */
  380. static int nvt_set_tx_carrier(void *data, u32 carrier)
  381. {
  382. struct nvt_dev *nvt = data;
  383. u16 val;
  384. nvt_cir_reg_write(nvt, 1, CIR_CP);
  385. val = 3000000 / (carrier) - 1;
  386. nvt_cir_reg_write(nvt, val & 0xff, CIR_CC);
  387. nvt_dbg("cp: 0x%x cc: 0x%x\n",
  388. nvt_cir_reg_read(nvt, CIR_CP), nvt_cir_reg_read(nvt, CIR_CC));
  389. return 0;
  390. }
  391. /*
  392. * nvt_tx_ir
  393. *
  394. * 1) clean TX fifo first (handled by AP)
  395. * 2) copy data from user space
  396. * 3) disable RX interrupts, enable TX interrupts: TTR & TFU
  397. * 4) send 9 packets to TX FIFO to open TTR
  398. * in interrupt_handler:
  399. * 5) send all data out
  400. * go back to write():
  401. * 6) disable TX interrupts, re-enable RX interupts
  402. *
  403. * The key problem of this function is user space data may larger than
  404. * driver's data buf length. So nvt_tx_ir() will only copy TX_BUF_LEN data to
  405. * buf, and keep current copied data buf num in cur_buf_num. But driver's buf
  406. * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to
  407. * set TXFCONT as 0xff, until buf_count less than 0xff.
  408. */
  409. static int nvt_tx_ir(void *priv, int *txbuf, u32 n)
  410. {
  411. struct nvt_dev *nvt = priv;
  412. unsigned long flags;
  413. size_t cur_count;
  414. unsigned int i;
  415. u8 iren;
  416. int ret;
  417. spin_lock_irqsave(&nvt->tx.lock, flags);
  418. if (n >= TX_BUF_LEN) {
  419. nvt->tx.buf_count = cur_count = TX_BUF_LEN;
  420. ret = TX_BUF_LEN;
  421. } else {
  422. nvt->tx.buf_count = cur_count = n;
  423. ret = n;
  424. }
  425. memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count);
  426. nvt->tx.cur_buf_num = 0;
  427. /* save currently enabled interrupts */
  428. iren = nvt_cir_reg_read(nvt, CIR_IREN);
  429. /* now disable all interrupts, save TFU & TTR */
  430. nvt_cir_reg_write(nvt, CIR_IREN_TFU | CIR_IREN_TTR, CIR_IREN);
  431. nvt->tx.tx_state = ST_TX_REPLY;
  432. nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV_8 |
  433. CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
  434. /* trigger TTR interrupt by writing out ones, (yes, it's ugly) */
  435. for (i = 0; i < 9; i++)
  436. nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO);
  437. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  438. wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST);
  439. spin_lock_irqsave(&nvt->tx.lock, flags);
  440. nvt->tx.tx_state = ST_TX_NONE;
  441. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  442. /* restore enabled interrupts to prior state */
  443. nvt_cir_reg_write(nvt, iren, CIR_IREN);
  444. return ret;
  445. }
  446. /* dump contents of the last rx buffer we got from the hw rx fifo */
  447. static void nvt_dump_rx_buf(struct nvt_dev *nvt)
  448. {
  449. int i;
  450. printk("%s (len %d): ", __func__, nvt->pkts);
  451. for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++)
  452. printk("0x%02x ", nvt->buf[i]);
  453. printk("\n");
  454. }
  455. /*
  456. * Process raw data in rx driver buffer, store it in raw IR event kfifo,
  457. * trigger decode when appropriate.
  458. *
  459. * We get IR data samples one byte at a time. If the msb is set, its a pulse,
  460. * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD
  461. * (default 50us) intervals for that pulse/space. A discrete signal is
  462. * followed by a series of 0x7f packets, then either 0x7<something> or 0x80
  463. * to signal more IR coming (repeats) or end of IR, respectively. We store
  464. * sample data in the raw event kfifo until we see 0x7<something> (except f)
  465. * or 0x80, at which time, we trigger a decode operation.
  466. */
  467. static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
  468. {
  469. struct ir_raw_event rawir = { .pulse = false, .duration = 0 };
  470. unsigned int count;
  471. u32 carrier;
  472. u8 sample;
  473. int i;
  474. nvt_dbg_verbose("%s firing", __func__);
  475. if (debug)
  476. nvt_dump_rx_buf(nvt);
  477. if (nvt->carrier_detect_enabled)
  478. carrier = nvt_rx_carrier_detect(nvt);
  479. count = nvt->pkts;
  480. nvt_dbg_verbose("Processing buffer of len %d", count);
  481. for (i = 0; i < count; i++) {
  482. nvt->pkts--;
  483. sample = nvt->buf[i];
  484. rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
  485. rawir.duration = (sample & BUF_LEN_MASK)
  486. * SAMPLE_PERIOD * 1000;
  487. if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
  488. if (nvt->rawir.pulse == rawir.pulse)
  489. nvt->rawir.duration += rawir.duration;
  490. else {
  491. nvt->rawir.duration = rawir.duration;
  492. nvt->rawir.pulse = rawir.pulse;
  493. }
  494. continue;
  495. }
  496. rawir.duration += nvt->rawir.duration;
  497. nvt->rawir.duration = 0;
  498. nvt->rawir.pulse = rawir.pulse;
  499. if (sample == BUF_PULSE_BIT)
  500. rawir.pulse = false;
  501. if (rawir.duration) {
  502. nvt_dbg("Storing %s with duration %d",
  503. rawir.pulse ? "pulse" : "space",
  504. rawir.duration);
  505. ir_raw_event_store(nvt->rdev, &rawir);
  506. }
  507. /*
  508. * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
  509. * indicates end of IR signal, but new data incoming. In both
  510. * cases, it means we're ready to call ir_raw_event_handle
  511. */
  512. if (sample == BUF_PULSE_BIT || ((sample != BUF_LEN_MASK) &&
  513. (sample & BUF_REPEAT_MASK) == BUF_REPEAT_BYTE))
  514. ir_raw_event_handle(nvt->rdev);
  515. }
  516. if (nvt->pkts) {
  517. nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
  518. nvt->pkts = 0;
  519. }
  520. nvt_dbg_verbose("%s done", __func__);
  521. }
  522. /* copy data from hardware rx fifo into driver buffer */
  523. static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
  524. {
  525. unsigned long flags;
  526. u8 fifocount, val;
  527. unsigned int b_idx;
  528. int i;
  529. /* Get count of how many bytes to read from RX FIFO */
  530. fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);
  531. /* if we get 0xff, probably means the logical dev is disabled */
  532. if (fifocount == 0xff)
  533. return;
  534. /* this would suggest a fifo overrun, not good... */
  535. else if (fifocount > RX_BUF_LEN) {
  536. nvt_pr(KERN_WARNING, "fifocount %d over fifo len (%d)!",
  537. fifocount, RX_BUF_LEN);
  538. return;
  539. }
  540. nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
  541. spin_lock_irqsave(&nvt->nvt_lock, flags);
  542. b_idx = nvt->pkts;
  543. /* This should never happen, but lets check anyway... */
  544. if (b_idx + fifocount > RX_BUF_LEN) {
  545. nvt_process_rx_ir_data(nvt);
  546. b_idx = 0;
  547. }
  548. /* Read fifocount bytes from CIR Sample RX FIFO register */
  549. for (i = 0; i < fifocount; i++) {
  550. val = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
  551. nvt->buf[b_idx + i] = val;
  552. }
  553. nvt->pkts += fifocount;
  554. nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
  555. nvt_process_rx_ir_data(nvt);
  556. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  557. }
  558. static void nvt_cir_log_irqs(u8 status, u8 iren)
  559. {
  560. nvt_pr(KERN_INFO, "IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s",
  561. status, iren,
  562. status & CIR_IRSTS_RDR ? " RDR" : "",
  563. status & CIR_IRSTS_RTR ? " RTR" : "",
  564. status & CIR_IRSTS_PE ? " PE" : "",
  565. status & CIR_IRSTS_RFO ? " RFO" : "",
  566. status & CIR_IRSTS_TE ? " TE" : "",
  567. status & CIR_IRSTS_TTR ? " TTR" : "",
  568. status & CIR_IRSTS_TFU ? " TFU" : "",
  569. status & CIR_IRSTS_GH ? " GH" : "",
  570. status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE |
  571. CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR |
  572. CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : "");
  573. }
  574. static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
  575. {
  576. unsigned long flags;
  577. bool tx_inactive;
  578. u8 tx_state;
  579. spin_lock_irqsave(&nvt->tx.lock, flags);
  580. tx_state = nvt->tx.tx_state;
  581. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  582. tx_inactive = (tx_state == ST_TX_NONE);
  583. return tx_inactive;
  584. }
  585. /* interrupt service routine for incoming and outgoing CIR data */
  586. static irqreturn_t nvt_cir_isr(int irq, void *data)
  587. {
  588. struct nvt_dev *nvt = data;
  589. u8 status, iren, cur_state;
  590. unsigned long flags;
  591. nvt_dbg_verbose("%s firing", __func__);
  592. nvt_efm_enable(nvt);
  593. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  594. nvt_efm_disable(nvt);
  595. /*
  596. * Get IR Status register contents. Write 1 to ack/clear
  597. *
  598. * bit: reg name - description
  599. * 7: CIR_IRSTS_RDR - RX Data Ready
  600. * 6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach
  601. * 5: CIR_IRSTS_PE - Packet End
  602. * 4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set)
  603. * 3: CIR_IRSTS_TE - TX FIFO Empty
  604. * 2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach
  605. * 1: CIR_IRSTS_TFU - TX FIFO Underrun
  606. * 0: CIR_IRSTS_GH - Min Length Detected
  607. */
  608. status = nvt_cir_reg_read(nvt, CIR_IRSTS);
  609. if (!status) {
  610. nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
  611. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  612. return IRQ_RETVAL(IRQ_NONE);
  613. }
  614. /* ack/clear all irq flags we've got */
  615. nvt_cir_reg_write(nvt, status, CIR_IRSTS);
  616. nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
  617. /* Interrupt may be shared with CIR Wake, bail if CIR not enabled */
  618. iren = nvt_cir_reg_read(nvt, CIR_IREN);
  619. if (!iren) {
  620. nvt_dbg_verbose("%s exiting, CIR not enabled", __func__);
  621. return IRQ_RETVAL(IRQ_NONE);
  622. }
  623. if (debug)
  624. nvt_cir_log_irqs(status, iren);
  625. if (status & CIR_IRSTS_RTR) {
  626. /* FIXME: add code for study/learn mode */
  627. /* We only do rx if not tx'ing */
  628. if (nvt_cir_tx_inactive(nvt))
  629. nvt_get_rx_ir_data(nvt);
  630. }
  631. if (status & CIR_IRSTS_PE) {
  632. if (nvt_cir_tx_inactive(nvt))
  633. nvt_get_rx_ir_data(nvt);
  634. spin_lock_irqsave(&nvt->nvt_lock, flags);
  635. cur_state = nvt->study_state;
  636. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  637. if (cur_state == ST_STUDY_NONE)
  638. nvt_clear_cir_fifo(nvt);
  639. }
  640. if (status & CIR_IRSTS_TE)
  641. nvt_clear_tx_fifo(nvt);
  642. if (status & CIR_IRSTS_TTR) {
  643. unsigned int pos, count;
  644. u8 tmp;
  645. spin_lock_irqsave(&nvt->tx.lock, flags);
  646. pos = nvt->tx.cur_buf_num;
  647. count = nvt->tx.buf_count;
  648. /* Write data into the hardware tx fifo while pos < count */
  649. if (pos < count) {
  650. nvt_cir_reg_write(nvt, nvt->tx.buf[pos], CIR_STXFIFO);
  651. nvt->tx.cur_buf_num++;
  652. /* Disable TX FIFO Trigger Level Reach (TTR) interrupt */
  653. } else {
  654. tmp = nvt_cir_reg_read(nvt, CIR_IREN);
  655. nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN);
  656. }
  657. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  658. }
  659. if (status & CIR_IRSTS_TFU) {
  660. spin_lock_irqsave(&nvt->tx.lock, flags);
  661. if (nvt->tx.tx_state == ST_TX_REPLY) {
  662. nvt->tx.tx_state = ST_TX_REQUEST;
  663. wake_up(&nvt->tx.queue);
  664. }
  665. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  666. }
  667. nvt_dbg_verbose("%s done", __func__);
  668. return IRQ_RETVAL(IRQ_HANDLED);
  669. }
  670. /* Interrupt service routine for CIR Wake */
  671. static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
  672. {
  673. u8 status, iren, val;
  674. struct nvt_dev *nvt = data;
  675. unsigned long flags;
  676. nvt_dbg_wake("%s firing", __func__);
  677. status = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS);
  678. if (!status)
  679. return IRQ_RETVAL(IRQ_NONE);
  680. if (status & CIR_WAKE_IRSTS_IR_PENDING)
  681. nvt_clear_cir_wake_fifo(nvt);
  682. nvt_cir_wake_reg_write(nvt, status, CIR_WAKE_IRSTS);
  683. nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IRSTS);
  684. /* Interrupt may be shared with CIR, bail if Wake not enabled */
  685. iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN);
  686. if (!iren) {
  687. nvt_dbg_wake("%s exiting, wake not enabled", __func__);
  688. return IRQ_RETVAL(IRQ_HANDLED);
  689. }
  690. if ((status & CIR_WAKE_IRSTS_PE) &&
  691. (nvt->wake_state == ST_WAKE_START)) {
  692. while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)) {
  693. val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
  694. nvt_dbg("setting wake up key: 0x%x", val);
  695. }
  696. nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
  697. spin_lock_irqsave(&nvt->nvt_lock, flags);
  698. nvt->wake_state = ST_WAKE_FINISH;
  699. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  700. }
  701. nvt_dbg_wake("%s done", __func__);
  702. return IRQ_RETVAL(IRQ_HANDLED);
  703. }
  704. static void nvt_enable_cir(struct nvt_dev *nvt)
  705. {
  706. /* set function enable flags */
  707. nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
  708. CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
  709. CIR_IRCON);
  710. nvt_efm_enable(nvt);
  711. /* enable the CIR logical device */
  712. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  713. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  714. nvt_efm_disable(nvt);
  715. /* clear all pending interrupts */
  716. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  717. /* enable interrupts */
  718. nvt_cir_reg_write(nvt, CIR_IREN_RTR | CIR_IREN_PE, CIR_IREN);
  719. }
  720. static void nvt_disable_cir(struct nvt_dev *nvt)
  721. {
  722. /* disable CIR interrupts */
  723. nvt_cir_reg_write(nvt, 0, CIR_IREN);
  724. /* clear any and all pending interrupts */
  725. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  726. /* clear all function enable flags */
  727. nvt_cir_reg_write(nvt, 0, CIR_IRCON);
  728. /* clear hardware rx and tx fifos */
  729. nvt_clear_cir_fifo(nvt);
  730. nvt_clear_tx_fifo(nvt);
  731. nvt_efm_enable(nvt);
  732. /* disable the CIR logical device */
  733. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  734. nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
  735. nvt_efm_disable(nvt);
  736. }
  737. static int nvt_open(void *data)
  738. {
  739. struct nvt_dev *nvt = (struct nvt_dev *)data;
  740. unsigned long flags;
  741. spin_lock_irqsave(&nvt->nvt_lock, flags);
  742. nvt->in_use = true;
  743. nvt_enable_cir(nvt);
  744. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  745. return 0;
  746. }
  747. static void nvt_close(void *data)
  748. {
  749. struct nvt_dev *nvt = (struct nvt_dev *)data;
  750. unsigned long flags;
  751. spin_lock_irqsave(&nvt->nvt_lock, flags);
  752. nvt->in_use = false;
  753. nvt_disable_cir(nvt);
  754. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  755. }
  756. /* Allocate memory, probe hardware, and initialize everything */
  757. static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
  758. {
  759. struct nvt_dev *nvt = NULL;
  760. struct input_dev *rdev = NULL;
  761. struct ir_dev_props *props = NULL;
  762. int ret = -ENOMEM;
  763. nvt = kzalloc(sizeof(struct nvt_dev), GFP_KERNEL);
  764. if (!nvt)
  765. return ret;
  766. props = kzalloc(sizeof(struct ir_dev_props), GFP_KERNEL);
  767. if (!props)
  768. goto failure;
  769. /* input device for IR remote (and tx) */
  770. rdev = input_allocate_device();
  771. if (!rdev)
  772. goto failure;
  773. ret = -ENODEV;
  774. /* validate pnp resources */
  775. if (!pnp_port_valid(pdev, 0) ||
  776. pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
  777. dev_err(&pdev->dev, "IR PNP Port not valid!\n");
  778. goto failure;
  779. }
  780. if (!pnp_irq_valid(pdev, 0)) {
  781. dev_err(&pdev->dev, "PNP IRQ not valid!\n");
  782. goto failure;
  783. }
  784. if (!pnp_port_valid(pdev, 1) ||
  785. pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
  786. dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
  787. goto failure;
  788. }
  789. nvt->cir_addr = pnp_port_start(pdev, 0);
  790. nvt->cir_irq = pnp_irq(pdev, 0);
  791. nvt->cir_wake_addr = pnp_port_start(pdev, 1);
  792. /* irq is always shared between cir and cir wake */
  793. nvt->cir_wake_irq = nvt->cir_irq;
  794. nvt->cr_efir = CR_EFIR;
  795. nvt->cr_efdr = CR_EFDR;
  796. spin_lock_init(&nvt->nvt_lock);
  797. spin_lock_init(&nvt->tx.lock);
  798. ret = -EBUSY;
  799. /* now claim resources */
  800. if (!request_region(nvt->cir_addr,
  801. CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
  802. goto failure;
  803. if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
  804. NVT_DRIVER_NAME, (void *)nvt))
  805. goto failure;
  806. if (!request_region(nvt->cir_wake_addr,
  807. CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
  808. goto failure;
  809. if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
  810. NVT_DRIVER_NAME, (void *)nvt))
  811. goto failure;
  812. pnp_set_drvdata(pdev, nvt);
  813. nvt->pdev = pdev;
  814. init_waitqueue_head(&nvt->tx.queue);
  815. ret = nvt_hw_detect(nvt);
  816. if (ret)
  817. goto failure;
  818. /* Initialize CIR & CIR Wake Logical Devices */
  819. nvt_efm_enable(nvt);
  820. nvt_cir_ldev_init(nvt);
  821. nvt_cir_wake_ldev_init(nvt);
  822. nvt_efm_disable(nvt);
  823. /* Initialize CIR & CIR Wake Config Registers */
  824. nvt_cir_regs_init(nvt);
  825. nvt_cir_wake_regs_init(nvt);
  826. /* Set up ir-core props */
  827. props->priv = nvt;
  828. props->driver_type = RC_DRIVER_IR_RAW;
  829. props->allowed_protos = IR_TYPE_ALL;
  830. props->open = nvt_open;
  831. props->close = nvt_close;
  832. #if 0
  833. props->min_timeout = XYZ;
  834. props->max_timeout = XYZ;
  835. props->timeout = XYZ;
  836. /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
  837. props->rx_resolution = XYZ;
  838. /* tx bits */
  839. props->tx_resolution = XYZ;
  840. #endif
  841. props->tx_ir = nvt_tx_ir;
  842. props->s_tx_carrier = nvt_set_tx_carrier;
  843. rdev->name = "Nuvoton w836x7hg Infrared Remote Transceiver";
  844. rdev->id.bustype = BUS_HOST;
  845. rdev->id.vendor = PCI_VENDOR_ID_WINBOND2;
  846. rdev->id.product = nvt->chip_major;
  847. rdev->id.version = nvt->chip_minor;
  848. nvt->props = props;
  849. nvt->rdev = rdev;
  850. device_set_wakeup_capable(&pdev->dev, 1);
  851. device_set_wakeup_enable(&pdev->dev, 1);
  852. ret = ir_input_register(rdev, RC_MAP_RC6_MCE, props, NVT_DRIVER_NAME);
  853. if (ret)
  854. goto failure;
  855. nvt_pr(KERN_NOTICE, "driver has been successfully loaded\n");
  856. if (debug) {
  857. cir_dump_regs(nvt);
  858. cir_wake_dump_regs(nvt);
  859. }
  860. return 0;
  861. failure:
  862. if (nvt->cir_irq)
  863. free_irq(nvt->cir_irq, nvt);
  864. if (nvt->cir_addr)
  865. release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
  866. if (nvt->cir_wake_irq)
  867. free_irq(nvt->cir_wake_irq, nvt);
  868. if (nvt->cir_wake_addr)
  869. release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
  870. input_free_device(rdev);
  871. kfree(props);
  872. kfree(nvt);
  873. return ret;
  874. }
  875. static void __devexit nvt_remove(struct pnp_dev *pdev)
  876. {
  877. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  878. unsigned long flags;
  879. spin_lock_irqsave(&nvt->nvt_lock, flags);
  880. /* disable CIR */
  881. nvt_cir_reg_write(nvt, 0, CIR_IREN);
  882. nvt_disable_cir(nvt);
  883. /* enable CIR Wake (for IR power-on) */
  884. nvt_enable_wake(nvt);
  885. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  886. /* free resources */
  887. free_irq(nvt->cir_irq, nvt);
  888. free_irq(nvt->cir_wake_irq, nvt);
  889. release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
  890. release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
  891. ir_input_unregister(nvt->rdev);
  892. kfree(nvt->props);
  893. kfree(nvt);
  894. }
  895. static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
  896. {
  897. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  898. unsigned long flags;
  899. nvt_dbg("%s called", __func__);
  900. /* zero out misc state tracking */
  901. spin_lock_irqsave(&nvt->nvt_lock, flags);
  902. nvt->study_state = ST_STUDY_NONE;
  903. nvt->wake_state = ST_WAKE_NONE;
  904. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  905. spin_lock_irqsave(&nvt->tx.lock, flags);
  906. nvt->tx.tx_state = ST_TX_NONE;
  907. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  908. /* disable all CIR interrupts */
  909. nvt_cir_reg_write(nvt, 0, CIR_IREN);
  910. nvt_efm_enable(nvt);
  911. /* disable cir logical dev */
  912. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  913. nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
  914. nvt_efm_disable(nvt);
  915. /* make sure wake is enabled */
  916. nvt_enable_wake(nvt);
  917. return 0;
  918. }
  919. static int nvt_resume(struct pnp_dev *pdev)
  920. {
  921. int ret = 0;
  922. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  923. nvt_dbg("%s called", __func__);
  924. /* open interrupt */
  925. nvt_cir_reg_write(nvt, CIR_IREN_RTR | CIR_IREN_PE, CIR_IREN);
  926. /* Enable CIR logical device */
  927. nvt_efm_enable(nvt);
  928. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  929. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  930. nvt_efm_disable(nvt);
  931. nvt_cir_regs_init(nvt);
  932. nvt_cir_wake_regs_init(nvt);
  933. return ret;
  934. }
  935. static void nvt_shutdown(struct pnp_dev *pdev)
  936. {
  937. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  938. nvt_enable_wake(nvt);
  939. }
  940. static const struct pnp_device_id nvt_ids[] = {
  941. { "WEC0530", 0 }, /* CIR */
  942. { "NTN0530", 0 }, /* CIR for new chip's pnp id*/
  943. { "", 0 },
  944. };
  945. static struct pnp_driver nvt_driver = {
  946. .name = NVT_DRIVER_NAME,
  947. .id_table = nvt_ids,
  948. .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
  949. .probe = nvt_probe,
  950. .remove = __devexit_p(nvt_remove),
  951. .suspend = nvt_suspend,
  952. .resume = nvt_resume,
  953. .shutdown = nvt_shutdown,
  954. };
  955. int nvt_init(void)
  956. {
  957. return pnp_register_driver(&nvt_driver);
  958. }
  959. void nvt_exit(void)
  960. {
  961. pnp_unregister_driver(&nvt_driver);
  962. }
  963. module_param(debug, int, S_IRUGO | S_IWUSR);
  964. MODULE_PARM_DESC(debug, "Enable debugging output");
  965. MODULE_DEVICE_TABLE(pnp, nvt_ids);
  966. MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");
  967. MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
  968. MODULE_LICENSE("GPL");
  969. module_init(nvt_init);
  970. module_exit(nvt_exit);