ezx-pcap.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560
  1. /*
  2. * Driver for Motorola PCAP2 as present in EZX phones
  3. *
  4. * Copyright (C) 2006 Harald Welte <laforge@openezx.org>
  5. * Copyright (C) 2009 Daniel Ribeiro <drwyrm@gmail.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/irq.h>
  17. #include <linux/mfd/ezx-pcap.h>
  18. #include <linux/spi/spi.h>
  19. #include <linux/gpio.h>
  20. #include <linux/slab.h>
  21. #define PCAP_ADC_MAXQ 8
  22. struct pcap_adc_request {
  23. u8 bank;
  24. u8 ch[2];
  25. u32 flags;
  26. void (*callback)(void *, u16[]);
  27. void *data;
  28. };
  29. struct pcap_adc_sync_request {
  30. u16 res[2];
  31. struct completion completion;
  32. };
  33. struct pcap_chip {
  34. struct spi_device *spi;
  35. /* IO */
  36. u32 buf;
  37. struct mutex io_mutex;
  38. /* IRQ */
  39. unsigned int irq_base;
  40. u32 msr;
  41. struct work_struct isr_work;
  42. struct work_struct msr_work;
  43. struct workqueue_struct *workqueue;
  44. /* ADC */
  45. struct pcap_adc_request *adc_queue[PCAP_ADC_MAXQ];
  46. u8 adc_head;
  47. u8 adc_tail;
  48. struct mutex adc_mutex;
  49. };
  50. /* IO */
  51. static int ezx_pcap_putget(struct pcap_chip *pcap, u32 *data)
  52. {
  53. struct spi_transfer t;
  54. struct spi_message m;
  55. int status;
  56. memset(&t, 0, sizeof t);
  57. spi_message_init(&m);
  58. t.len = sizeof(u32);
  59. spi_message_add_tail(&t, &m);
  60. pcap->buf = *data;
  61. t.tx_buf = (u8 *) &pcap->buf;
  62. t.rx_buf = (u8 *) &pcap->buf;
  63. status = spi_sync(pcap->spi, &m);
  64. if (status == 0)
  65. *data = pcap->buf;
  66. return status;
  67. }
  68. int ezx_pcap_write(struct pcap_chip *pcap, u8 reg_num, u32 value)
  69. {
  70. int ret;
  71. mutex_lock(&pcap->io_mutex);
  72. value &= PCAP_REGISTER_VALUE_MASK;
  73. value |= PCAP_REGISTER_WRITE_OP_BIT
  74. | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
  75. ret = ezx_pcap_putget(pcap, &value);
  76. mutex_unlock(&pcap->io_mutex);
  77. return ret;
  78. }
  79. EXPORT_SYMBOL_GPL(ezx_pcap_write);
  80. int ezx_pcap_read(struct pcap_chip *pcap, u8 reg_num, u32 *value)
  81. {
  82. int ret;
  83. mutex_lock(&pcap->io_mutex);
  84. *value = PCAP_REGISTER_READ_OP_BIT
  85. | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
  86. ret = ezx_pcap_putget(pcap, value);
  87. mutex_unlock(&pcap->io_mutex);
  88. return ret;
  89. }
  90. EXPORT_SYMBOL_GPL(ezx_pcap_read);
  91. int ezx_pcap_set_bits(struct pcap_chip *pcap, u8 reg_num, u32 mask, u32 val)
  92. {
  93. int ret;
  94. u32 tmp = PCAP_REGISTER_READ_OP_BIT |
  95. (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
  96. mutex_lock(&pcap->io_mutex);
  97. ret = ezx_pcap_putget(pcap, &tmp);
  98. if (ret)
  99. goto out_unlock;
  100. tmp &= (PCAP_REGISTER_VALUE_MASK & ~mask);
  101. tmp |= (val & mask) | PCAP_REGISTER_WRITE_OP_BIT |
  102. (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
  103. ret = ezx_pcap_putget(pcap, &tmp);
  104. out_unlock:
  105. mutex_unlock(&pcap->io_mutex);
  106. return ret;
  107. }
  108. EXPORT_SYMBOL_GPL(ezx_pcap_set_bits);
  109. /* IRQ */
  110. int irq_to_pcap(struct pcap_chip *pcap, int irq)
  111. {
  112. return irq - pcap->irq_base;
  113. }
  114. EXPORT_SYMBOL_GPL(irq_to_pcap);
  115. int pcap_to_irq(struct pcap_chip *pcap, int irq)
  116. {
  117. return pcap->irq_base + irq;
  118. }
  119. EXPORT_SYMBOL_GPL(pcap_to_irq);
  120. static void pcap_mask_irq(unsigned int irq)
  121. {
  122. struct pcap_chip *pcap = get_irq_chip_data(irq);
  123. pcap->msr |= 1 << irq_to_pcap(pcap, irq);
  124. queue_work(pcap->workqueue, &pcap->msr_work);
  125. }
  126. static void pcap_unmask_irq(unsigned int irq)
  127. {
  128. struct pcap_chip *pcap = get_irq_chip_data(irq);
  129. pcap->msr &= ~(1 << irq_to_pcap(pcap, irq));
  130. queue_work(pcap->workqueue, &pcap->msr_work);
  131. }
  132. static struct irq_chip pcap_irq_chip = {
  133. .name = "pcap",
  134. .mask = pcap_mask_irq,
  135. .unmask = pcap_unmask_irq,
  136. };
  137. static void pcap_msr_work(struct work_struct *work)
  138. {
  139. struct pcap_chip *pcap = container_of(work, struct pcap_chip, msr_work);
  140. ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
  141. }
  142. static void pcap_isr_work(struct work_struct *work)
  143. {
  144. struct pcap_chip *pcap = container_of(work, struct pcap_chip, isr_work);
  145. struct pcap_platform_data *pdata = pcap->spi->dev.platform_data;
  146. u32 msr, isr, int_sel, service;
  147. int irq;
  148. do {
  149. ezx_pcap_read(pcap, PCAP_REG_MSR, &msr);
  150. ezx_pcap_read(pcap, PCAP_REG_ISR, &isr);
  151. /* We cant service/ack irqs that are assigned to port 2 */
  152. if (!(pdata->config & PCAP_SECOND_PORT)) {
  153. ezx_pcap_read(pcap, PCAP_REG_INT_SEL, &int_sel);
  154. isr &= ~int_sel;
  155. }
  156. ezx_pcap_write(pcap, PCAP_REG_MSR, isr | msr);
  157. ezx_pcap_write(pcap, PCAP_REG_ISR, isr);
  158. local_irq_disable();
  159. service = isr & ~msr;
  160. for (irq = pcap->irq_base; service; service >>= 1, irq++) {
  161. if (service & 1) {
  162. struct irq_desc *desc = irq_to_desc(irq);
  163. if (WARN(!desc, KERN_WARNING
  164. "Invalid PCAP IRQ %d\n", irq))
  165. break;
  166. if (desc->status & IRQ_DISABLED)
  167. note_interrupt(irq, desc, IRQ_NONE);
  168. else
  169. desc->handle_irq(irq, desc);
  170. }
  171. }
  172. local_irq_enable();
  173. ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
  174. } while (gpio_get_value(irq_to_gpio(pcap->spi->irq)));
  175. }
  176. static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
  177. {
  178. struct pcap_chip *pcap = get_irq_data(irq);
  179. desc->chip->ack(irq);
  180. queue_work(pcap->workqueue, &pcap->isr_work);
  181. return;
  182. }
  183. /* ADC */
  184. void pcap_set_ts_bits(struct pcap_chip *pcap, u32 bits)
  185. {
  186. u32 tmp;
  187. mutex_lock(&pcap->adc_mutex);
  188. ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
  189. tmp &= ~(PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
  190. tmp |= bits & (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
  191. ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
  192. mutex_unlock(&pcap->adc_mutex);
  193. }
  194. EXPORT_SYMBOL_GPL(pcap_set_ts_bits);
  195. static void pcap_disable_adc(struct pcap_chip *pcap)
  196. {
  197. u32 tmp;
  198. ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
  199. tmp &= ~(PCAP_ADC_ADEN|PCAP_ADC_BATT_I_ADC|PCAP_ADC_BATT_I_POLARITY);
  200. ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
  201. }
  202. static void pcap_adc_trigger(struct pcap_chip *pcap)
  203. {
  204. u32 tmp;
  205. u8 head;
  206. mutex_lock(&pcap->adc_mutex);
  207. head = pcap->adc_head;
  208. if (!pcap->adc_queue[head]) {
  209. /* queue is empty, save power */
  210. pcap_disable_adc(pcap);
  211. mutex_unlock(&pcap->adc_mutex);
  212. return;
  213. }
  214. /* start conversion on requested bank, save TS_M bits */
  215. ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
  216. tmp &= (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
  217. tmp |= pcap->adc_queue[head]->flags | PCAP_ADC_ADEN;
  218. if (pcap->adc_queue[head]->bank == PCAP_ADC_BANK_1)
  219. tmp |= PCAP_ADC_AD_SEL1;
  220. ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
  221. mutex_unlock(&pcap->adc_mutex);
  222. ezx_pcap_write(pcap, PCAP_REG_ADR, PCAP_ADR_ASC);
  223. }
  224. static irqreturn_t pcap_adc_irq(int irq, void *_pcap)
  225. {
  226. struct pcap_chip *pcap = _pcap;
  227. struct pcap_adc_request *req;
  228. u16 res[2];
  229. u32 tmp;
  230. mutex_lock(&pcap->adc_mutex);
  231. req = pcap->adc_queue[pcap->adc_head];
  232. if (WARN(!req, KERN_WARNING "adc irq without pending request\n")) {
  233. mutex_unlock(&pcap->adc_mutex);
  234. return IRQ_HANDLED;
  235. }
  236. /* read requested channels results */
  237. ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
  238. tmp &= ~(PCAP_ADC_ADA1_MASK | PCAP_ADC_ADA2_MASK);
  239. tmp |= (req->ch[0] << PCAP_ADC_ADA1_SHIFT);
  240. tmp |= (req->ch[1] << PCAP_ADC_ADA2_SHIFT);
  241. ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
  242. ezx_pcap_read(pcap, PCAP_REG_ADR, &tmp);
  243. res[0] = (tmp & PCAP_ADR_ADD1_MASK) >> PCAP_ADR_ADD1_SHIFT;
  244. res[1] = (tmp & PCAP_ADR_ADD2_MASK) >> PCAP_ADR_ADD2_SHIFT;
  245. pcap->adc_queue[pcap->adc_head] = NULL;
  246. pcap->adc_head = (pcap->adc_head + 1) & (PCAP_ADC_MAXQ - 1);
  247. mutex_unlock(&pcap->adc_mutex);
  248. /* pass the results and release memory */
  249. req->callback(req->data, res);
  250. kfree(req);
  251. /* trigger next conversion (if any) on queue */
  252. pcap_adc_trigger(pcap);
  253. return IRQ_HANDLED;
  254. }
  255. int pcap_adc_async(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
  256. void *callback, void *data)
  257. {
  258. struct pcap_adc_request *req;
  259. /* This will be freed after we have a result */
  260. req = kmalloc(sizeof(struct pcap_adc_request), GFP_KERNEL);
  261. if (!req)
  262. return -ENOMEM;
  263. req->bank = bank;
  264. req->flags = flags;
  265. req->ch[0] = ch[0];
  266. req->ch[1] = ch[1];
  267. req->callback = callback;
  268. req->data = data;
  269. mutex_lock(&pcap->adc_mutex);
  270. if (pcap->adc_queue[pcap->adc_tail]) {
  271. mutex_unlock(&pcap->adc_mutex);
  272. kfree(req);
  273. return -EBUSY;
  274. }
  275. pcap->adc_queue[pcap->adc_tail] = req;
  276. pcap->adc_tail = (pcap->adc_tail + 1) & (PCAP_ADC_MAXQ - 1);
  277. mutex_unlock(&pcap->adc_mutex);
  278. /* start conversion */
  279. pcap_adc_trigger(pcap);
  280. return 0;
  281. }
  282. EXPORT_SYMBOL_GPL(pcap_adc_async);
  283. static void pcap_adc_sync_cb(void *param, u16 res[])
  284. {
  285. struct pcap_adc_sync_request *req = param;
  286. req->res[0] = res[0];
  287. req->res[1] = res[1];
  288. complete(&req->completion);
  289. }
  290. int pcap_adc_sync(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
  291. u16 res[])
  292. {
  293. struct pcap_adc_sync_request sync_data;
  294. int ret;
  295. init_completion(&sync_data.completion);
  296. ret = pcap_adc_async(pcap, bank, flags, ch, pcap_adc_sync_cb,
  297. &sync_data);
  298. if (ret)
  299. return ret;
  300. wait_for_completion(&sync_data.completion);
  301. res[0] = sync_data.res[0];
  302. res[1] = sync_data.res[1];
  303. return 0;
  304. }
  305. EXPORT_SYMBOL_GPL(pcap_adc_sync);
  306. /* subdevs */
  307. static int pcap_remove_subdev(struct device *dev, void *unused)
  308. {
  309. platform_device_unregister(to_platform_device(dev));
  310. return 0;
  311. }
  312. static int __devinit pcap_add_subdev(struct pcap_chip *pcap,
  313. struct pcap_subdev *subdev)
  314. {
  315. struct platform_device *pdev;
  316. int ret;
  317. pdev = platform_device_alloc(subdev->name, subdev->id);
  318. if (!pdev)
  319. return -ENOMEM;
  320. pdev->dev.parent = &pcap->spi->dev;
  321. pdev->dev.platform_data = subdev->platform_data;
  322. ret = platform_device_add(pdev);
  323. if (ret)
  324. platform_device_put(pdev);
  325. return ret;
  326. }
  327. static int __devexit ezx_pcap_remove(struct spi_device *spi)
  328. {
  329. struct pcap_chip *pcap = dev_get_drvdata(&spi->dev);
  330. struct pcap_platform_data *pdata = spi->dev.platform_data;
  331. int i, adc_irq;
  332. /* remove all registered subdevs */
  333. device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
  334. /* cleanup ADC */
  335. adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
  336. PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
  337. free_irq(adc_irq, pcap);
  338. mutex_lock(&pcap->adc_mutex);
  339. for (i = 0; i < PCAP_ADC_MAXQ; i++)
  340. kfree(pcap->adc_queue[i]);
  341. mutex_unlock(&pcap->adc_mutex);
  342. /* cleanup irqchip */
  343. for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
  344. set_irq_chip_and_handler(i, NULL, NULL);
  345. destroy_workqueue(pcap->workqueue);
  346. kfree(pcap);
  347. return 0;
  348. }
  349. static int __devinit ezx_pcap_probe(struct spi_device *spi)
  350. {
  351. struct pcap_platform_data *pdata = spi->dev.platform_data;
  352. struct pcap_chip *pcap;
  353. int i, adc_irq;
  354. int ret = -ENODEV;
  355. /* platform data is required */
  356. if (!pdata)
  357. goto ret;
  358. pcap = kzalloc(sizeof(*pcap), GFP_KERNEL);
  359. if (!pcap) {
  360. ret = -ENOMEM;
  361. goto ret;
  362. }
  363. mutex_init(&pcap->io_mutex);
  364. mutex_init(&pcap->adc_mutex);
  365. INIT_WORK(&pcap->isr_work, pcap_isr_work);
  366. INIT_WORK(&pcap->msr_work, pcap_msr_work);
  367. dev_set_drvdata(&spi->dev, pcap);
  368. /* setup spi */
  369. spi->bits_per_word = 32;
  370. spi->mode = SPI_MODE_0 | (pdata->config & PCAP_CS_AH ? SPI_CS_HIGH : 0);
  371. ret = spi_setup(spi);
  372. if (ret)
  373. goto free_pcap;
  374. pcap->spi = spi;
  375. /* setup irq */
  376. pcap->irq_base = pdata->irq_base;
  377. pcap->workqueue = create_singlethread_workqueue("pcapd");
  378. if (!pcap->workqueue) {
  379. ret = -ENOMEM;
  380. dev_err(&spi->dev, "cant create pcap thread\n");
  381. goto free_pcap;
  382. }
  383. /* redirect interrupts to AP, except adcdone2 */
  384. if (!(pdata->config & PCAP_SECOND_PORT))
  385. ezx_pcap_write(pcap, PCAP_REG_INT_SEL,
  386. (1 << PCAP_IRQ_ADCDONE2));
  387. /* setup irq chip */
  388. for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) {
  389. set_irq_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq);
  390. set_irq_chip_data(i, pcap);
  391. #ifdef CONFIG_ARM
  392. set_irq_flags(i, IRQF_VALID);
  393. #else
  394. set_irq_noprobe(i);
  395. #endif
  396. }
  397. /* mask/ack all PCAP interrupts */
  398. ezx_pcap_write(pcap, PCAP_REG_MSR, PCAP_MASK_ALL_INTERRUPT);
  399. ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER);
  400. pcap->msr = PCAP_MASK_ALL_INTERRUPT;
  401. set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING);
  402. set_irq_data(spi->irq, pcap);
  403. set_irq_chained_handler(spi->irq, pcap_irq_handler);
  404. set_irq_wake(spi->irq, 1);
  405. /* ADC */
  406. adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
  407. PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
  408. ret = request_irq(adc_irq, pcap_adc_irq, 0, "ADC", pcap);
  409. if (ret)
  410. goto free_irqchip;
  411. /* setup subdevs */
  412. for (i = 0; i < pdata->num_subdevs; i++) {
  413. ret = pcap_add_subdev(pcap, &pdata->subdevs[i]);
  414. if (ret)
  415. goto remove_subdevs;
  416. }
  417. /* board specific quirks */
  418. if (pdata->init)
  419. pdata->init(pcap);
  420. return 0;
  421. remove_subdevs:
  422. device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
  423. /* free_adc: */
  424. free_irq(adc_irq, pcap);
  425. free_irqchip:
  426. for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
  427. set_irq_chip_and_handler(i, NULL, NULL);
  428. /* destroy_workqueue: */
  429. destroy_workqueue(pcap->workqueue);
  430. free_pcap:
  431. kfree(pcap);
  432. ret:
  433. return ret;
  434. }
  435. static struct spi_driver ezxpcap_driver = {
  436. .probe = ezx_pcap_probe,
  437. .remove = __devexit_p(ezx_pcap_remove),
  438. .driver = {
  439. .name = "ezx-pcap",
  440. .owner = THIS_MODULE,
  441. },
  442. };
  443. static int __init ezx_pcap_init(void)
  444. {
  445. return spi_register_driver(&ezxpcap_driver);
  446. }
  447. static void __exit ezx_pcap_exit(void)
  448. {
  449. spi_unregister_driver(&ezxpcap_driver);
  450. }
  451. subsys_initcall(ezx_pcap_init);
  452. module_exit(ezx_pcap_exit);
  453. MODULE_LICENSE("GPL");
  454. MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
  455. MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver");
  456. MODULE_ALIAS("spi:ezx-pcap");