nuvoton-cir.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278
  1. /*
  2. * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR
  3. *
  4. * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com>
  5. * Copyright (C) 2009 Nuvoton PS Team
  6. *
  7. * Special thanks to Nuvoton for providing hardware, spec sheets and
  8. * sample code upon which portions of this driver are based. Indirect
  9. * thanks also to Maxim Levitsky, whose ene_ir driver this driver is
  10. * modeled after.
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License as
  14. * published by the Free Software Foundation; either version 2 of the
  15. * License, or (at your option) any later version.
  16. *
  17. * This program is distributed in the hope that it will be useful, but
  18. * WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU General Public License
  23. * along with this program; if not, write to the Free Software
  24. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  25. * USA
  26. */
  27. #include <linux/kernel.h>
  28. #include <linux/module.h>
  29. #include <linux/pnp.h>
  30. #include <linux/io.h>
  31. #include <linux/interrupt.h>
  32. #include <linux/sched.h>
  33. #include <linux/slab.h>
  34. #include <media/rc-core.h>
  35. #include <linux/pci_ids.h>
  36. #include "nuvoton-cir.h"
  37. /* write val to config reg */
  38. static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg)
  39. {
  40. outb(reg, nvt->cr_efir);
  41. outb(val, nvt->cr_efdr);
  42. }
  43. /* read val from config reg */
  44. static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg)
  45. {
  46. outb(reg, nvt->cr_efir);
  47. return inb(nvt->cr_efdr);
  48. }
  49. /* update config register bit without changing other bits */
  50. static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  51. {
  52. u8 tmp = nvt_cr_read(nvt, reg) | val;
  53. nvt_cr_write(nvt, tmp, reg);
  54. }
  55. /* clear config register bit without changing other bits */
  56. static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg)
  57. {
  58. u8 tmp = nvt_cr_read(nvt, reg) & ~val;
  59. nvt_cr_write(nvt, tmp, reg);
  60. }
  61. /* enter extended function mode */
  62. static inline void nvt_efm_enable(struct nvt_dev *nvt)
  63. {
  64. /* Enabling Extended Function Mode explicitly requires writing 2x */
  65. outb(EFER_EFM_ENABLE, nvt->cr_efir);
  66. outb(EFER_EFM_ENABLE, nvt->cr_efir);
  67. }
  68. /* exit extended function mode */
  69. static inline void nvt_efm_disable(struct nvt_dev *nvt)
  70. {
  71. outb(EFER_EFM_DISABLE, nvt->cr_efir);
  72. }
  73. /*
  74. * When you want to address a specific logical device, write its logical
  75. * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing
  76. * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN.
  77. */
  78. static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev)
  79. {
  80. outb(CR_LOGICAL_DEV_SEL, nvt->cr_efir);
  81. outb(ldev, nvt->cr_efdr);
  82. }
  83. /* write val to cir config register */
  84. static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset)
  85. {
  86. outb(val, nvt->cir_addr + offset);
  87. }
  88. /* read val from cir config register */
  89. static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset)
  90. {
  91. u8 val;
  92. val = inb(nvt->cir_addr + offset);
  93. return val;
  94. }
  95. /* write val to cir wake register */
  96. static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt,
  97. u8 val, u8 offset)
  98. {
  99. outb(val, nvt->cir_wake_addr + offset);
  100. }
  101. /* read val from cir wake config register */
  102. static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset)
  103. {
  104. u8 val;
  105. val = inb(nvt->cir_wake_addr + offset);
  106. return val;
  107. }
  108. #define pr_reg(text, ...) \
  109. printk(KERN_INFO KBUILD_MODNAME ": " text, ## __VA_ARGS__)
  110. /* dump current cir register contents */
  111. static void cir_dump_regs(struct nvt_dev *nvt)
  112. {
  113. nvt_efm_enable(nvt);
  114. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  115. pr_reg("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME);
  116. pr_reg(" * CR CIR ACTIVE : 0x%x\n",
  117. nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
  118. pr_reg(" * CR CIR BASE ADDR: 0x%x\n",
  119. (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
  120. nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
  121. pr_reg(" * CR CIR IRQ NUM: 0x%x\n",
  122. nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
  123. nvt_efm_disable(nvt);
  124. pr_reg("%s: Dump CIR registers:\n", NVT_DRIVER_NAME);
  125. pr_reg(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON));
  126. pr_reg(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS));
  127. pr_reg(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN));
  128. pr_reg(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT));
  129. pr_reg(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP));
  130. pr_reg(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC));
  131. pr_reg(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH));
  132. pr_reg(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL));
  133. pr_reg(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON));
  134. pr_reg(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS));
  135. pr_reg(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO));
  136. pr_reg(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT));
  137. pr_reg(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO));
  138. pr_reg(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH));
  139. pr_reg(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL));
  140. pr_reg(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM));
  141. }
  142. /* dump current cir wake register contents */
  143. static void cir_wake_dump_regs(struct nvt_dev *nvt)
  144. {
  145. u8 i, fifo_len;
  146. nvt_efm_enable(nvt);
  147. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  148. pr_reg("%s: Dump CIR WAKE logical device registers:\n",
  149. NVT_DRIVER_NAME);
  150. pr_reg(" * CR CIR WAKE ACTIVE : 0x%x\n",
  151. nvt_cr_read(nvt, CR_LOGICAL_DEV_EN));
  152. pr_reg(" * CR CIR WAKE BASE ADDR: 0x%x\n",
  153. (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) |
  154. nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO));
  155. pr_reg(" * CR CIR WAKE IRQ NUM: 0x%x\n",
  156. nvt_cr_read(nvt, CR_CIR_IRQ_RSRC));
  157. nvt_efm_disable(nvt);
  158. pr_reg("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME);
  159. pr_reg(" * IRCON: 0x%x\n",
  160. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON));
  161. pr_reg(" * IRSTS: 0x%x\n",
  162. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS));
  163. pr_reg(" * IREN: 0x%x\n",
  164. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN));
  165. pr_reg(" * FIFO CMP DEEP: 0x%x\n",
  166. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP));
  167. pr_reg(" * FIFO CMP TOL: 0x%x\n",
  168. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL));
  169. pr_reg(" * FIFO COUNT: 0x%x\n",
  170. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT));
  171. pr_reg(" * SLCH: 0x%x\n",
  172. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH));
  173. pr_reg(" * SLCL: 0x%x\n",
  174. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL));
  175. pr_reg(" * FIFOCON: 0x%x\n",
  176. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON));
  177. pr_reg(" * SRXFSTS: 0x%x\n",
  178. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS));
  179. pr_reg(" * SAMPLE RX FIFO: 0x%x\n",
  180. nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO));
  181. pr_reg(" * WR FIFO DATA: 0x%x\n",
  182. nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA));
  183. pr_reg(" * RD FIFO ONLY: 0x%x\n",
  184. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
  185. pr_reg(" * RD FIFO ONLY IDX: 0x%x\n",
  186. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX));
  187. pr_reg(" * FIFO IGNORE: 0x%x\n",
  188. nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE));
  189. pr_reg(" * IRFSM: 0x%x\n",
  190. nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM));
  191. fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT);
  192. pr_reg("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len);
  193. pr_reg("* Contents = ");
  194. for (i = 0; i < fifo_len; i++)
  195. printk(KERN_CONT "%02x ",
  196. nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY));
  197. printk(KERN_CONT "\n");
  198. }
  199. /* detect hardware features */
  200. static int nvt_hw_detect(struct nvt_dev *nvt)
  201. {
  202. unsigned long flags;
  203. u8 chip_major, chip_minor;
  204. int ret = 0;
  205. char chip_id[12];
  206. bool chip_unknown = false;
  207. nvt_efm_enable(nvt);
  208. /* Check if we're wired for the alternate EFER setup */
  209. chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
  210. if (chip_major == 0xff) {
  211. nvt->cr_efir = CR_EFIR2;
  212. nvt->cr_efdr = CR_EFDR2;
  213. nvt_efm_enable(nvt);
  214. chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI);
  215. }
  216. chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO);
  217. /* these are the known working chip revisions... */
  218. switch (chip_major) {
  219. case CHIP_ID_HIGH_667:
  220. strcpy(chip_id, "w83667hg\0");
  221. if (chip_minor != CHIP_ID_LOW_667)
  222. chip_unknown = true;
  223. break;
  224. case CHIP_ID_HIGH_677B:
  225. strcpy(chip_id, "w83677hg\0");
  226. if (chip_minor != CHIP_ID_LOW_677B2 &&
  227. chip_minor != CHIP_ID_LOW_677B3)
  228. chip_unknown = true;
  229. break;
  230. case CHIP_ID_HIGH_677C:
  231. strcpy(chip_id, "w83677hg-c\0");
  232. if (chip_minor != CHIP_ID_LOW_677C)
  233. chip_unknown = true;
  234. break;
  235. default:
  236. strcpy(chip_id, "w836x7hg\0");
  237. chip_unknown = true;
  238. break;
  239. }
  240. /* warn, but still let the driver load, if we don't know this chip */
  241. if (chip_unknown)
  242. nvt_pr(KERN_WARNING, "%s: unknown chip, id: 0x%02x 0x%02x, "
  243. "it may not work...", chip_id, chip_major, chip_minor);
  244. else
  245. nvt_dbg("%s: chip id: 0x%02x 0x%02x",
  246. chip_id, chip_major, chip_minor);
  247. nvt_efm_disable(nvt);
  248. spin_lock_irqsave(&nvt->nvt_lock, flags);
  249. nvt->chip_major = chip_major;
  250. nvt->chip_minor = chip_minor;
  251. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  252. return ret;
  253. }
  254. static void nvt_cir_ldev_init(struct nvt_dev *nvt)
  255. {
  256. u8 val, psreg, psmask, psval;
  257. if (nvt->chip_major == CHIP_ID_HIGH_667) {
  258. psreg = CR_MULTIFUNC_PIN_SEL;
  259. psmask = MULTIFUNC_PIN_SEL_MASK;
  260. psval = MULTIFUNC_ENABLE_CIR | MULTIFUNC_ENABLE_CIRWB;
  261. } else {
  262. psreg = CR_OUTPUT_PIN_SEL;
  263. psmask = OUTPUT_PIN_SEL_MASK;
  264. psval = OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB;
  265. }
  266. /* output pin selection: enable CIR, with WB sensor enabled */
  267. val = nvt_cr_read(nvt, psreg);
  268. val &= psmask;
  269. val |= psval;
  270. nvt_cr_write(nvt, val, psreg);
  271. /* Select CIR logical device and enable */
  272. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  273. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  274. nvt_cr_write(nvt, nvt->cir_addr >> 8, CR_CIR_BASE_ADDR_HI);
  275. nvt_cr_write(nvt, nvt->cir_addr & 0xff, CR_CIR_BASE_ADDR_LO);
  276. nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC);
  277. nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d",
  278. nvt->cir_addr, nvt->cir_irq);
  279. }
  280. static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt)
  281. {
  282. /* Select ACPI logical device, enable it and CIR Wake */
  283. nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
  284. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  285. /* Enable CIR Wake via PSOUT# (Pin60) */
  286. nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
  287. /* enable cir interrupt of mouse/keyboard IRQ event */
  288. nvt_set_reg_bit(nvt, CIR_INTR_MOUSE_IRQ_BIT, CR_ACPI_IRQ_EVENTS);
  289. /* enable pme interrupt of cir wakeup event */
  290. nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
  291. /* Select CIR Wake logical device and enable */
  292. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  293. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  294. nvt_cr_write(nvt, nvt->cir_wake_addr >> 8, CR_CIR_BASE_ADDR_HI);
  295. nvt_cr_write(nvt, nvt->cir_wake_addr & 0xff, CR_CIR_BASE_ADDR_LO);
  296. nvt_cr_write(nvt, nvt->cir_wake_irq, CR_CIR_IRQ_RSRC);
  297. nvt_dbg("CIR Wake initialized, base io port address: 0x%lx, irq: %d",
  298. nvt->cir_wake_addr, nvt->cir_wake_irq);
  299. }
  300. /* clear out the hardware's cir rx fifo */
  301. static void nvt_clear_cir_fifo(struct nvt_dev *nvt)
  302. {
  303. u8 val;
  304. val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
  305. nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
  306. }
  307. /* clear out the hardware's cir wake rx fifo */
  308. static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt)
  309. {
  310. u8 val;
  311. val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON);
  312. nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR,
  313. CIR_WAKE_FIFOCON);
  314. }
  315. /* clear out the hardware's cir tx fifo */
  316. static void nvt_clear_tx_fifo(struct nvt_dev *nvt)
  317. {
  318. u8 val;
  319. val = nvt_cir_reg_read(nvt, CIR_FIFOCON);
  320. nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON);
  321. }
  322. /* enable RX Trigger Level Reach and Packet End interrupts */
  323. static void nvt_set_cir_iren(struct nvt_dev *nvt)
  324. {
  325. u8 iren;
  326. iren = CIR_IREN_RTR | CIR_IREN_PE;
  327. nvt_cir_reg_write(nvt, iren, CIR_IREN);
  328. }
  329. static void nvt_cir_regs_init(struct nvt_dev *nvt)
  330. {
  331. /* set sample limit count (PE interrupt raised when reached) */
  332. nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH);
  333. nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL);
  334. /* set fifo irq trigger levels */
  335. nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV |
  336. CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON);
  337. /*
  338. * Enable TX and RX, specify carrier on = low, off = high, and set
  339. * sample period (currently 50us)
  340. */
  341. nvt_cir_reg_write(nvt,
  342. CIR_IRCON_TXEN | CIR_IRCON_RXEN |
  343. CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
  344. CIR_IRCON);
  345. /* clear hardware rx and tx fifos */
  346. nvt_clear_cir_fifo(nvt);
  347. nvt_clear_tx_fifo(nvt);
  348. /* clear any and all stray interrupts */
  349. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  350. /* and finally, enable interrupts */
  351. nvt_set_cir_iren(nvt);
  352. }
  353. static void nvt_cir_wake_regs_init(struct nvt_dev *nvt)
  354. {
  355. /* set number of bytes needed for wake from s3 (default 65) */
  356. nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_CMP_BYTES,
  357. CIR_WAKE_FIFO_CMP_DEEP);
  358. /* set tolerance/variance allowed per byte during wake compare */
  359. nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE,
  360. CIR_WAKE_FIFO_CMP_TOL);
  361. /* set sample limit count (PE interrupt raised when reached) */
  362. nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_WAKE_SLCH);
  363. nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_WAKE_SLCL);
  364. /* set cir wake fifo rx trigger level (currently 67) */
  365. nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFOCON_RX_TRIGGER_LEV,
  366. CIR_WAKE_FIFOCON);
  367. /*
  368. * Enable TX and RX, specific carrier on = low, off = high, and set
  369. * sample period (currently 50us)
  370. */
  371. nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
  372. CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
  373. CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
  374. CIR_WAKE_IRCON);
  375. /* clear cir wake rx fifo */
  376. nvt_clear_cir_wake_fifo(nvt);
  377. /* clear any and all stray interrupts */
  378. nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
  379. }
  380. static void nvt_enable_wake(struct nvt_dev *nvt)
  381. {
  382. nvt_efm_enable(nvt);
  383. nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI);
  384. nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE);
  385. nvt_set_reg_bit(nvt, CIR_INTR_MOUSE_IRQ_BIT, CR_ACPI_IRQ_EVENTS);
  386. nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2);
  387. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE);
  388. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  389. nvt_efm_disable(nvt);
  390. nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN |
  391. CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV |
  392. CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL,
  393. CIR_WAKE_IRCON);
  394. nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS);
  395. nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
  396. }
  397. /* rx carrier detect only works in learning mode, must be called w/nvt_lock */
  398. static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt)
  399. {
  400. u32 count, carrier, duration = 0;
  401. int i;
  402. count = nvt_cir_reg_read(nvt, CIR_FCCL) |
  403. nvt_cir_reg_read(nvt, CIR_FCCH) << 8;
  404. for (i = 0; i < nvt->pkts; i++) {
  405. if (nvt->buf[i] & BUF_PULSE_BIT)
  406. duration += nvt->buf[i] & BUF_LEN_MASK;
  407. }
  408. duration *= SAMPLE_PERIOD;
  409. if (!count || !duration) {
  410. nvt_pr(KERN_NOTICE, "Unable to determine carrier! (c:%u, d:%u)",
  411. count, duration);
  412. return 0;
  413. }
  414. carrier = MS_TO_NS(count) / duration;
  415. if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER))
  416. nvt_dbg("WTF? Carrier frequency out of range!");
  417. nvt_dbg("Carrier frequency: %u (count %u, duration %u)",
  418. carrier, count, duration);
  419. return carrier;
  420. }
  421. /*
  422. * set carrier frequency
  423. *
  424. * set carrier on 2 registers: CP & CC
  425. * always set CP as 0x81
  426. * set CC by SPEC, CC = 3MHz/carrier - 1
  427. */
  428. static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier)
  429. {
  430. struct nvt_dev *nvt = dev->priv;
  431. u16 val;
  432. nvt_cir_reg_write(nvt, 1, CIR_CP);
  433. val = 3000000 / (carrier) - 1;
  434. nvt_cir_reg_write(nvt, val & 0xff, CIR_CC);
  435. nvt_dbg("cp: 0x%x cc: 0x%x\n",
  436. nvt_cir_reg_read(nvt, CIR_CP), nvt_cir_reg_read(nvt, CIR_CC));
  437. return 0;
  438. }
  439. /*
  440. * nvt_tx_ir
  441. *
  442. * 1) clean TX fifo first (handled by AP)
  443. * 2) copy data from user space
  444. * 3) disable RX interrupts, enable TX interrupts: TTR & TFU
  445. * 4) send 9 packets to TX FIFO to open TTR
  446. * in interrupt_handler:
  447. * 5) send all data out
  448. * go back to write():
  449. * 6) disable TX interrupts, re-enable RX interupts
  450. *
  451. * The key problem of this function is user space data may larger than
  452. * driver's data buf length. So nvt_tx_ir() will only copy TX_BUF_LEN data to
  453. * buf, and keep current copied data buf num in cur_buf_num. But driver's buf
  454. * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to
  455. * set TXFCONT as 0xff, until buf_count less than 0xff.
  456. */
  457. static int nvt_tx_ir(struct rc_dev *dev, int *txbuf, u32 n)
  458. {
  459. struct nvt_dev *nvt = dev->priv;
  460. unsigned long flags;
  461. size_t cur_count;
  462. unsigned int i;
  463. u8 iren;
  464. int ret;
  465. spin_lock_irqsave(&nvt->tx.lock, flags);
  466. if (n >= TX_BUF_LEN) {
  467. nvt->tx.buf_count = cur_count = TX_BUF_LEN;
  468. ret = TX_BUF_LEN;
  469. } else {
  470. nvt->tx.buf_count = cur_count = n;
  471. ret = n;
  472. }
  473. memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count);
  474. nvt->tx.cur_buf_num = 0;
  475. /* save currently enabled interrupts */
  476. iren = nvt_cir_reg_read(nvt, CIR_IREN);
  477. /* now disable all interrupts, save TFU & TTR */
  478. nvt_cir_reg_write(nvt, CIR_IREN_TFU | CIR_IREN_TTR, CIR_IREN);
  479. nvt->tx.tx_state = ST_TX_REPLY;
  480. nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV_8 |
  481. CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON);
  482. /* trigger TTR interrupt by writing out ones, (yes, it's ugly) */
  483. for (i = 0; i < 9; i++)
  484. nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO);
  485. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  486. wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST);
  487. spin_lock_irqsave(&nvt->tx.lock, flags);
  488. nvt->tx.tx_state = ST_TX_NONE;
  489. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  490. /* restore enabled interrupts to prior state */
  491. nvt_cir_reg_write(nvt, iren, CIR_IREN);
  492. return ret;
  493. }
  494. /* dump contents of the last rx buffer we got from the hw rx fifo */
  495. static void nvt_dump_rx_buf(struct nvt_dev *nvt)
  496. {
  497. int i;
  498. printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts);
  499. for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++)
  500. printk(KERN_CONT "0x%02x ", nvt->buf[i]);
  501. printk(KERN_CONT "\n");
  502. }
  503. /*
  504. * Process raw data in rx driver buffer, store it in raw IR event kfifo,
  505. * trigger decode when appropriate.
  506. *
  507. * We get IR data samples one byte at a time. If the msb is set, its a pulse,
  508. * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD
  509. * (default 50us) intervals for that pulse/space. A discrete signal is
  510. * followed by a series of 0x7f packets, then either 0x7<something> or 0x80
  511. * to signal more IR coming (repeats) or end of IR, respectively. We store
  512. * sample data in the raw event kfifo until we see 0x7<something> (except f)
  513. * or 0x80, at which time, we trigger a decode operation.
  514. */
  515. static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
  516. {
  517. DEFINE_IR_RAW_EVENT(rawir);
  518. unsigned int count;
  519. u32 carrier;
  520. u8 sample;
  521. int i;
  522. nvt_dbg_verbose("%s firing", __func__);
  523. if (debug)
  524. nvt_dump_rx_buf(nvt);
  525. if (nvt->carrier_detect_enabled)
  526. carrier = nvt_rx_carrier_detect(nvt);
  527. count = nvt->pkts;
  528. nvt_dbg_verbose("Processing buffer of len %d", count);
  529. init_ir_raw_event(&rawir);
  530. for (i = 0; i < count; i++) {
  531. nvt->pkts--;
  532. sample = nvt->buf[i];
  533. rawir.pulse = ((sample & BUF_PULSE_BIT) != 0);
  534. rawir.duration = US_TO_NS((sample & BUF_LEN_MASK)
  535. * SAMPLE_PERIOD);
  536. if ((sample & BUF_LEN_MASK) == BUF_LEN_MASK) {
  537. if (nvt->rawir.pulse == rawir.pulse)
  538. nvt->rawir.duration += rawir.duration;
  539. else {
  540. nvt->rawir.duration = rawir.duration;
  541. nvt->rawir.pulse = rawir.pulse;
  542. }
  543. continue;
  544. }
  545. rawir.duration += nvt->rawir.duration;
  546. init_ir_raw_event(&nvt->rawir);
  547. nvt->rawir.duration = 0;
  548. nvt->rawir.pulse = rawir.pulse;
  549. if (sample == BUF_PULSE_BIT)
  550. rawir.pulse = false;
  551. if (rawir.duration) {
  552. nvt_dbg("Storing %s with duration %d",
  553. rawir.pulse ? "pulse" : "space",
  554. rawir.duration);
  555. ir_raw_event_store_with_filter(nvt->rdev, &rawir);
  556. }
  557. /*
  558. * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE
  559. * indicates end of IR signal, but new data incoming. In both
  560. * cases, it means we're ready to call ir_raw_event_handle
  561. */
  562. if ((sample == BUF_PULSE_BIT) && nvt->pkts) {
  563. nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
  564. ir_raw_event_handle(nvt->rdev);
  565. }
  566. }
  567. nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
  568. ir_raw_event_handle(nvt->rdev);
  569. if (nvt->pkts) {
  570. nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
  571. nvt->pkts = 0;
  572. }
  573. nvt_dbg_verbose("%s done", __func__);
  574. }
  575. static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt)
  576. {
  577. nvt_pr(KERN_WARNING, "RX FIFO overrun detected, flushing data!");
  578. nvt->pkts = 0;
  579. nvt_clear_cir_fifo(nvt);
  580. ir_raw_event_reset(nvt->rdev);
  581. }
  582. /* copy data from hardware rx fifo into driver buffer */
  583. static void nvt_get_rx_ir_data(struct nvt_dev *nvt)
  584. {
  585. unsigned long flags;
  586. u8 fifocount, val;
  587. unsigned int b_idx;
  588. bool overrun = false;
  589. int i;
  590. /* Get count of how many bytes to read from RX FIFO */
  591. fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT);
  592. /* if we get 0xff, probably means the logical dev is disabled */
  593. if (fifocount == 0xff)
  594. return;
  595. /* watch out for a fifo overrun condition */
  596. else if (fifocount > RX_BUF_LEN) {
  597. overrun = true;
  598. fifocount = RX_BUF_LEN;
  599. }
  600. nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount);
  601. spin_lock_irqsave(&nvt->nvt_lock, flags);
  602. b_idx = nvt->pkts;
  603. /* This should never happen, but lets check anyway... */
  604. if (b_idx + fifocount > RX_BUF_LEN) {
  605. nvt_process_rx_ir_data(nvt);
  606. b_idx = 0;
  607. }
  608. /* Read fifocount bytes from CIR Sample RX FIFO register */
  609. for (i = 0; i < fifocount; i++) {
  610. val = nvt_cir_reg_read(nvt, CIR_SRXFIFO);
  611. nvt->buf[b_idx + i] = val;
  612. }
  613. nvt->pkts += fifocount;
  614. nvt_dbg("%s: pkts now %d", __func__, nvt->pkts);
  615. nvt_process_rx_ir_data(nvt);
  616. if (overrun)
  617. nvt_handle_rx_fifo_overrun(nvt);
  618. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  619. }
  620. static void nvt_cir_log_irqs(u8 status, u8 iren)
  621. {
  622. nvt_pr(KERN_INFO, "IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s",
  623. status, iren,
  624. status & CIR_IRSTS_RDR ? " RDR" : "",
  625. status & CIR_IRSTS_RTR ? " RTR" : "",
  626. status & CIR_IRSTS_PE ? " PE" : "",
  627. status & CIR_IRSTS_RFO ? " RFO" : "",
  628. status & CIR_IRSTS_TE ? " TE" : "",
  629. status & CIR_IRSTS_TTR ? " TTR" : "",
  630. status & CIR_IRSTS_TFU ? " TFU" : "",
  631. status & CIR_IRSTS_GH ? " GH" : "",
  632. status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE |
  633. CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR |
  634. CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : "");
  635. }
  636. static bool nvt_cir_tx_inactive(struct nvt_dev *nvt)
  637. {
  638. unsigned long flags;
  639. bool tx_inactive;
  640. u8 tx_state;
  641. spin_lock_irqsave(&nvt->tx.lock, flags);
  642. tx_state = nvt->tx.tx_state;
  643. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  644. tx_inactive = (tx_state == ST_TX_NONE);
  645. return tx_inactive;
  646. }
  647. /* interrupt service routine for incoming and outgoing CIR data */
  648. static irqreturn_t nvt_cir_isr(int irq, void *data)
  649. {
  650. struct nvt_dev *nvt = data;
  651. u8 status, iren, cur_state;
  652. unsigned long flags;
  653. nvt_dbg_verbose("%s firing", __func__);
  654. nvt_efm_enable(nvt);
  655. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  656. nvt_efm_disable(nvt);
  657. /*
  658. * Get IR Status register contents. Write 1 to ack/clear
  659. *
  660. * bit: reg name - description
  661. * 7: CIR_IRSTS_RDR - RX Data Ready
  662. * 6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach
  663. * 5: CIR_IRSTS_PE - Packet End
  664. * 4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set)
  665. * 3: CIR_IRSTS_TE - TX FIFO Empty
  666. * 2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach
  667. * 1: CIR_IRSTS_TFU - TX FIFO Underrun
  668. * 0: CIR_IRSTS_GH - Min Length Detected
  669. */
  670. status = nvt_cir_reg_read(nvt, CIR_IRSTS);
  671. if (!status) {
  672. nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__);
  673. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  674. return IRQ_RETVAL(IRQ_NONE);
  675. }
  676. /* ack/clear all irq flags we've got */
  677. nvt_cir_reg_write(nvt, status, CIR_IRSTS);
  678. nvt_cir_reg_write(nvt, 0, CIR_IRSTS);
  679. /* Interrupt may be shared with CIR Wake, bail if CIR not enabled */
  680. iren = nvt_cir_reg_read(nvt, CIR_IREN);
  681. if (!iren) {
  682. nvt_dbg_verbose("%s exiting, CIR not enabled", __func__);
  683. return IRQ_RETVAL(IRQ_NONE);
  684. }
  685. if (debug)
  686. nvt_cir_log_irqs(status, iren);
  687. if (status & CIR_IRSTS_RTR) {
  688. /* FIXME: add code for study/learn mode */
  689. /* We only do rx if not tx'ing */
  690. if (nvt_cir_tx_inactive(nvt))
  691. nvt_get_rx_ir_data(nvt);
  692. }
  693. if (status & CIR_IRSTS_PE) {
  694. if (nvt_cir_tx_inactive(nvt))
  695. nvt_get_rx_ir_data(nvt);
  696. spin_lock_irqsave(&nvt->nvt_lock, flags);
  697. cur_state = nvt->study_state;
  698. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  699. if (cur_state == ST_STUDY_NONE)
  700. nvt_clear_cir_fifo(nvt);
  701. }
  702. if (status & CIR_IRSTS_TE)
  703. nvt_clear_tx_fifo(nvt);
  704. if (status & CIR_IRSTS_TTR) {
  705. unsigned int pos, count;
  706. u8 tmp;
  707. spin_lock_irqsave(&nvt->tx.lock, flags);
  708. pos = nvt->tx.cur_buf_num;
  709. count = nvt->tx.buf_count;
  710. /* Write data into the hardware tx fifo while pos < count */
  711. if (pos < count) {
  712. nvt_cir_reg_write(nvt, nvt->tx.buf[pos], CIR_STXFIFO);
  713. nvt->tx.cur_buf_num++;
  714. /* Disable TX FIFO Trigger Level Reach (TTR) interrupt */
  715. } else {
  716. tmp = nvt_cir_reg_read(nvt, CIR_IREN);
  717. nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN);
  718. }
  719. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  720. }
  721. if (status & CIR_IRSTS_TFU) {
  722. spin_lock_irqsave(&nvt->tx.lock, flags);
  723. if (nvt->tx.tx_state == ST_TX_REPLY) {
  724. nvt->tx.tx_state = ST_TX_REQUEST;
  725. wake_up(&nvt->tx.queue);
  726. }
  727. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  728. }
  729. nvt_dbg_verbose("%s done", __func__);
  730. return IRQ_RETVAL(IRQ_HANDLED);
  731. }
  732. /* Interrupt service routine for CIR Wake */
  733. static irqreturn_t nvt_cir_wake_isr(int irq, void *data)
  734. {
  735. u8 status, iren, val;
  736. struct nvt_dev *nvt = data;
  737. unsigned long flags;
  738. nvt_dbg_wake("%s firing", __func__);
  739. status = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS);
  740. if (!status)
  741. return IRQ_RETVAL(IRQ_NONE);
  742. if (status & CIR_WAKE_IRSTS_IR_PENDING)
  743. nvt_clear_cir_wake_fifo(nvt);
  744. nvt_cir_wake_reg_write(nvt, status, CIR_WAKE_IRSTS);
  745. nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IRSTS);
  746. /* Interrupt may be shared with CIR, bail if Wake not enabled */
  747. iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN);
  748. if (!iren) {
  749. nvt_dbg_wake("%s exiting, wake not enabled", __func__);
  750. return IRQ_RETVAL(IRQ_HANDLED);
  751. }
  752. if ((status & CIR_WAKE_IRSTS_PE) &&
  753. (nvt->wake_state == ST_WAKE_START)) {
  754. while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)) {
  755. val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY);
  756. nvt_dbg("setting wake up key: 0x%x", val);
  757. }
  758. nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN);
  759. spin_lock_irqsave(&nvt->nvt_lock, flags);
  760. nvt->wake_state = ST_WAKE_FINISH;
  761. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  762. }
  763. nvt_dbg_wake("%s done", __func__);
  764. return IRQ_RETVAL(IRQ_HANDLED);
  765. }
  766. static void nvt_enable_cir(struct nvt_dev *nvt)
  767. {
  768. /* set function enable flags */
  769. nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN |
  770. CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL,
  771. CIR_IRCON);
  772. nvt_efm_enable(nvt);
  773. /* enable the CIR logical device */
  774. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  775. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  776. nvt_efm_disable(nvt);
  777. /* clear all pending interrupts */
  778. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  779. /* enable interrupts */
  780. nvt_set_cir_iren(nvt);
  781. }
  782. static void nvt_disable_cir(struct nvt_dev *nvt)
  783. {
  784. /* disable CIR interrupts */
  785. nvt_cir_reg_write(nvt, 0, CIR_IREN);
  786. /* clear any and all pending interrupts */
  787. nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS);
  788. /* clear all function enable flags */
  789. nvt_cir_reg_write(nvt, 0, CIR_IRCON);
  790. /* clear hardware rx and tx fifos */
  791. nvt_clear_cir_fifo(nvt);
  792. nvt_clear_tx_fifo(nvt);
  793. nvt_efm_enable(nvt);
  794. /* disable the CIR logical device */
  795. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  796. nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
  797. nvt_efm_disable(nvt);
  798. }
  799. static int nvt_open(struct rc_dev *dev)
  800. {
  801. struct nvt_dev *nvt = dev->priv;
  802. unsigned long flags;
  803. spin_lock_irqsave(&nvt->nvt_lock, flags);
  804. nvt_enable_cir(nvt);
  805. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  806. return 0;
  807. }
  808. static void nvt_close(struct rc_dev *dev)
  809. {
  810. struct nvt_dev *nvt = dev->priv;
  811. unsigned long flags;
  812. spin_lock_irqsave(&nvt->nvt_lock, flags);
  813. nvt_disable_cir(nvt);
  814. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  815. }
  816. /* Allocate memory, probe hardware, and initialize everything */
  817. static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id)
  818. {
  819. struct nvt_dev *nvt;
  820. struct rc_dev *rdev;
  821. int ret = -ENOMEM;
  822. nvt = kzalloc(sizeof(struct nvt_dev), GFP_KERNEL);
  823. if (!nvt)
  824. return ret;
  825. /* input device for IR remote (and tx) */
  826. rdev = rc_allocate_device();
  827. if (!rdev)
  828. goto failure;
  829. ret = -ENODEV;
  830. /* validate pnp resources */
  831. if (!pnp_port_valid(pdev, 0) ||
  832. pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) {
  833. dev_err(&pdev->dev, "IR PNP Port not valid!\n");
  834. goto failure;
  835. }
  836. if (!pnp_irq_valid(pdev, 0)) {
  837. dev_err(&pdev->dev, "PNP IRQ not valid!\n");
  838. goto failure;
  839. }
  840. if (!pnp_port_valid(pdev, 1) ||
  841. pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) {
  842. dev_err(&pdev->dev, "Wake PNP Port not valid!\n");
  843. goto failure;
  844. }
  845. nvt->cir_addr = pnp_port_start(pdev, 0);
  846. nvt->cir_irq = pnp_irq(pdev, 0);
  847. nvt->cir_wake_addr = pnp_port_start(pdev, 1);
  848. /* irq is always shared between cir and cir wake */
  849. nvt->cir_wake_irq = nvt->cir_irq;
  850. nvt->cr_efir = CR_EFIR;
  851. nvt->cr_efdr = CR_EFDR;
  852. spin_lock_init(&nvt->nvt_lock);
  853. spin_lock_init(&nvt->tx.lock);
  854. init_ir_raw_event(&nvt->rawir);
  855. ret = -EBUSY;
  856. /* now claim resources */
  857. if (!request_region(nvt->cir_addr,
  858. CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
  859. goto failure;
  860. if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED,
  861. NVT_DRIVER_NAME, (void *)nvt))
  862. goto failure;
  863. if (!request_region(nvt->cir_wake_addr,
  864. CIR_IOREG_LENGTH, NVT_DRIVER_NAME))
  865. goto failure;
  866. if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED,
  867. NVT_DRIVER_NAME, (void *)nvt))
  868. goto failure;
  869. pnp_set_drvdata(pdev, nvt);
  870. nvt->pdev = pdev;
  871. init_waitqueue_head(&nvt->tx.queue);
  872. ret = nvt_hw_detect(nvt);
  873. if (ret)
  874. goto failure;
  875. /* Initialize CIR & CIR Wake Logical Devices */
  876. nvt_efm_enable(nvt);
  877. nvt_cir_ldev_init(nvt);
  878. nvt_cir_wake_ldev_init(nvt);
  879. nvt_efm_disable(nvt);
  880. /* Initialize CIR & CIR Wake Config Registers */
  881. nvt_cir_regs_init(nvt);
  882. nvt_cir_wake_regs_init(nvt);
  883. /* Set up the rc device */
  884. rdev->priv = nvt;
  885. rdev->driver_type = RC_DRIVER_IR_RAW;
  886. rdev->allowed_protos = RC_TYPE_ALL;
  887. rdev->open = nvt_open;
  888. rdev->close = nvt_close;
  889. rdev->tx_ir = nvt_tx_ir;
  890. rdev->s_tx_carrier = nvt_set_tx_carrier;
  891. rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver";
  892. rdev->input_phys = "nuvoton/cir0";
  893. rdev->input_id.bustype = BUS_HOST;
  894. rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2;
  895. rdev->input_id.product = nvt->chip_major;
  896. rdev->input_id.version = nvt->chip_minor;
  897. rdev->dev.parent = &pdev->dev;
  898. rdev->driver_name = NVT_DRIVER_NAME;
  899. rdev->map_name = RC_MAP_RC6_MCE;
  900. rdev->timeout = MS_TO_NS(100);
  901. /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */
  902. rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD);
  903. #if 0
  904. rdev->min_timeout = XYZ;
  905. rdev->max_timeout = XYZ;
  906. /* tx bits */
  907. rdev->tx_resolution = XYZ;
  908. #endif
  909. ret = rc_register_device(rdev);
  910. if (ret)
  911. goto failure;
  912. device_init_wakeup(&pdev->dev, true);
  913. nvt->rdev = rdev;
  914. nvt_pr(KERN_NOTICE, "driver has been successfully loaded\n");
  915. if (debug) {
  916. cir_dump_regs(nvt);
  917. cir_wake_dump_regs(nvt);
  918. }
  919. return 0;
  920. failure:
  921. if (nvt->cir_irq)
  922. free_irq(nvt->cir_irq, nvt);
  923. if (nvt->cir_addr)
  924. release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
  925. if (nvt->cir_wake_irq)
  926. free_irq(nvt->cir_wake_irq, nvt);
  927. if (nvt->cir_wake_addr)
  928. release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
  929. rc_free_device(rdev);
  930. kfree(nvt);
  931. return ret;
  932. }
  933. static void __devexit nvt_remove(struct pnp_dev *pdev)
  934. {
  935. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  936. unsigned long flags;
  937. spin_lock_irqsave(&nvt->nvt_lock, flags);
  938. /* disable CIR */
  939. nvt_cir_reg_write(nvt, 0, CIR_IREN);
  940. nvt_disable_cir(nvt);
  941. /* enable CIR Wake (for IR power-on) */
  942. nvt_enable_wake(nvt);
  943. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  944. /* free resources */
  945. free_irq(nvt->cir_irq, nvt);
  946. free_irq(nvt->cir_wake_irq, nvt);
  947. release_region(nvt->cir_addr, CIR_IOREG_LENGTH);
  948. release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH);
  949. rc_unregister_device(nvt->rdev);
  950. kfree(nvt);
  951. }
  952. static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state)
  953. {
  954. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  955. unsigned long flags;
  956. nvt_dbg("%s called", __func__);
  957. /* zero out misc state tracking */
  958. spin_lock_irqsave(&nvt->nvt_lock, flags);
  959. nvt->study_state = ST_STUDY_NONE;
  960. nvt->wake_state = ST_WAKE_NONE;
  961. spin_unlock_irqrestore(&nvt->nvt_lock, flags);
  962. spin_lock_irqsave(&nvt->tx.lock, flags);
  963. nvt->tx.tx_state = ST_TX_NONE;
  964. spin_unlock_irqrestore(&nvt->tx.lock, flags);
  965. /* disable all CIR interrupts */
  966. nvt_cir_reg_write(nvt, 0, CIR_IREN);
  967. nvt_efm_enable(nvt);
  968. /* disable cir logical dev */
  969. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  970. nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN);
  971. nvt_efm_disable(nvt);
  972. /* make sure wake is enabled */
  973. nvt_enable_wake(nvt);
  974. return 0;
  975. }
  976. static int nvt_resume(struct pnp_dev *pdev)
  977. {
  978. int ret = 0;
  979. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  980. nvt_dbg("%s called", __func__);
  981. /* open interrupt */
  982. nvt_set_cir_iren(nvt);
  983. /* Enable CIR logical device */
  984. nvt_efm_enable(nvt);
  985. nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR);
  986. nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN);
  987. nvt_efm_disable(nvt);
  988. nvt_cir_regs_init(nvt);
  989. nvt_cir_wake_regs_init(nvt);
  990. return ret;
  991. }
  992. static void nvt_shutdown(struct pnp_dev *pdev)
  993. {
  994. struct nvt_dev *nvt = pnp_get_drvdata(pdev);
  995. nvt_enable_wake(nvt);
  996. }
  997. static const struct pnp_device_id nvt_ids[] = {
  998. { "WEC0530", 0 }, /* CIR */
  999. { "NTN0530", 0 }, /* CIR for new chip's pnp id*/
  1000. { "", 0 },
  1001. };
  1002. static struct pnp_driver nvt_driver = {
  1003. .name = NVT_DRIVER_NAME,
  1004. .id_table = nvt_ids,
  1005. .flags = PNP_DRIVER_RES_DO_NOT_CHANGE,
  1006. .probe = nvt_probe,
  1007. .remove = __devexit_p(nvt_remove),
  1008. .suspend = nvt_suspend,
  1009. .resume = nvt_resume,
  1010. .shutdown = nvt_shutdown,
  1011. };
  1012. int nvt_init(void)
  1013. {
  1014. return pnp_register_driver(&nvt_driver);
  1015. }
  1016. void nvt_exit(void)
  1017. {
  1018. pnp_unregister_driver(&nvt_driver);
  1019. }
  1020. module_param(debug, int, S_IRUGO | S_IWUSR);
  1021. MODULE_PARM_DESC(debug, "Enable debugging output");
  1022. MODULE_DEVICE_TABLE(pnp, nvt_ids);
  1023. MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver");
  1024. MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>");
  1025. MODULE_LICENSE("GPL");
  1026. module_init(nvt_init);
  1027. module_exit(nvt_exit);