ezx-pcap.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559
  1. /*
  2. * Driver for Motorola PCAP2 as present in EZX phones
  3. *
  4. * Copyright (C) 2006 Harald Welte <laforge@openezx.org>
  5. * Copyright (C) 2009 Daniel Ribeiro <drwyrm@gmail.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/platform_device.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/irq.h>
  17. #include <linux/mfd/ezx-pcap.h>
  18. #include <linux/spi/spi.h>
  19. #include <linux/gpio.h>
  20. #include <linux/slab.h>
  21. #define PCAP_ADC_MAXQ 8
  22. struct pcap_adc_request {
  23. u8 bank;
  24. u8 ch[2];
  25. u32 flags;
  26. void (*callback)(void *, u16[]);
  27. void *data;
  28. };
  29. struct pcap_adc_sync_request {
  30. u16 res[2];
  31. struct completion completion;
  32. };
  33. struct pcap_chip {
  34. struct spi_device *spi;
  35. /* IO */
  36. u32 buf;
  37. struct mutex io_mutex;
  38. /* IRQ */
  39. unsigned int irq_base;
  40. u32 msr;
  41. struct work_struct isr_work;
  42. struct work_struct msr_work;
  43. struct workqueue_struct *workqueue;
  44. /* ADC */
  45. struct pcap_adc_request *adc_queue[PCAP_ADC_MAXQ];
  46. u8 adc_head;
  47. u8 adc_tail;
  48. struct mutex adc_mutex;
  49. };
  50. /* IO */
  51. static int ezx_pcap_putget(struct pcap_chip *pcap, u32 *data)
  52. {
  53. struct spi_transfer t;
  54. struct spi_message m;
  55. int status;
  56. memset(&t, 0, sizeof t);
  57. spi_message_init(&m);
  58. t.len = sizeof(u32);
  59. spi_message_add_tail(&t, &m);
  60. pcap->buf = *data;
  61. t.tx_buf = (u8 *) &pcap->buf;
  62. t.rx_buf = (u8 *) &pcap->buf;
  63. status = spi_sync(pcap->spi, &m);
  64. if (status == 0)
  65. *data = pcap->buf;
  66. return status;
  67. }
  68. int ezx_pcap_write(struct pcap_chip *pcap, u8 reg_num, u32 value)
  69. {
  70. int ret;
  71. mutex_lock(&pcap->io_mutex);
  72. value &= PCAP_REGISTER_VALUE_MASK;
  73. value |= PCAP_REGISTER_WRITE_OP_BIT
  74. | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
  75. ret = ezx_pcap_putget(pcap, &value);
  76. mutex_unlock(&pcap->io_mutex);
  77. return ret;
  78. }
  79. EXPORT_SYMBOL_GPL(ezx_pcap_write);
  80. int ezx_pcap_read(struct pcap_chip *pcap, u8 reg_num, u32 *value)
  81. {
  82. int ret;
  83. mutex_lock(&pcap->io_mutex);
  84. *value = PCAP_REGISTER_READ_OP_BIT
  85. | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
  86. ret = ezx_pcap_putget(pcap, value);
  87. mutex_unlock(&pcap->io_mutex);
  88. return ret;
  89. }
  90. EXPORT_SYMBOL_GPL(ezx_pcap_read);
  91. int ezx_pcap_set_bits(struct pcap_chip *pcap, u8 reg_num, u32 mask, u32 val)
  92. {
  93. int ret;
  94. u32 tmp = PCAP_REGISTER_READ_OP_BIT |
  95. (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
  96. mutex_lock(&pcap->io_mutex);
  97. ret = ezx_pcap_putget(pcap, &tmp);
  98. if (ret)
  99. goto out_unlock;
  100. tmp &= (PCAP_REGISTER_VALUE_MASK & ~mask);
  101. tmp |= (val & mask) | PCAP_REGISTER_WRITE_OP_BIT |
  102. (reg_num << PCAP_REGISTER_ADDRESS_SHIFT);
  103. ret = ezx_pcap_putget(pcap, &tmp);
  104. out_unlock:
  105. mutex_unlock(&pcap->io_mutex);
  106. return ret;
  107. }
  108. EXPORT_SYMBOL_GPL(ezx_pcap_set_bits);
  109. /* IRQ */
  110. int irq_to_pcap(struct pcap_chip *pcap, int irq)
  111. {
  112. return irq - pcap->irq_base;
  113. }
  114. EXPORT_SYMBOL_GPL(irq_to_pcap);
  115. int pcap_to_irq(struct pcap_chip *pcap, int irq)
  116. {
  117. return pcap->irq_base + irq;
  118. }
  119. EXPORT_SYMBOL_GPL(pcap_to_irq);
  120. static void pcap_mask_irq(struct irq_data *d)
  121. {
  122. struct pcap_chip *pcap = irq_data_get_irq_chip_data(d);
  123. pcap->msr |= 1 << irq_to_pcap(pcap, d->irq);
  124. queue_work(pcap->workqueue, &pcap->msr_work);
  125. }
  126. static void pcap_unmask_irq(struct irq_data *d)
  127. {
  128. struct pcap_chip *pcap = irq_data_get_irq_chip_data(d);
  129. pcap->msr &= ~(1 << irq_to_pcap(pcap, d->irq));
  130. queue_work(pcap->workqueue, &pcap->msr_work);
  131. }
  132. static struct irq_chip pcap_irq_chip = {
  133. .name = "pcap",
  134. .irq_mask = pcap_mask_irq,
  135. .irq_unmask = pcap_unmask_irq,
  136. };
  137. static void pcap_msr_work(struct work_struct *work)
  138. {
  139. struct pcap_chip *pcap = container_of(work, struct pcap_chip, msr_work);
  140. ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
  141. }
  142. static void pcap_isr_work(struct work_struct *work)
  143. {
  144. struct pcap_chip *pcap = container_of(work, struct pcap_chip, isr_work);
  145. struct pcap_platform_data *pdata = pcap->spi->dev.platform_data;
  146. u32 msr, isr, int_sel, service;
  147. int irq;
  148. do {
  149. ezx_pcap_read(pcap, PCAP_REG_MSR, &msr);
  150. ezx_pcap_read(pcap, PCAP_REG_ISR, &isr);
  151. /* We cant service/ack irqs that are assigned to port 2 */
  152. if (!(pdata->config & PCAP_SECOND_PORT)) {
  153. ezx_pcap_read(pcap, PCAP_REG_INT_SEL, &int_sel);
  154. isr &= ~int_sel;
  155. }
  156. ezx_pcap_write(pcap, PCAP_REG_MSR, isr | msr);
  157. ezx_pcap_write(pcap, PCAP_REG_ISR, isr);
  158. local_irq_disable();
  159. service = isr & ~msr;
  160. for (irq = pcap->irq_base; service; service >>= 1, irq++) {
  161. if (service & 1) {
  162. struct irq_desc *desc = irq_to_desc(irq);
  163. if (WARN(!desc, "Invalid PCAP IRQ %d\n", irq))
  164. break;
  165. if (desc->status & IRQ_DISABLED)
  166. note_interrupt(irq, desc, IRQ_NONE);
  167. else
  168. desc->handle_irq(irq, desc);
  169. }
  170. }
  171. local_irq_enable();
  172. ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
  173. } while (gpio_get_value(irq_to_gpio(pcap->spi->irq)));
  174. }
  175. static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
  176. {
  177. struct pcap_chip *pcap = get_irq_data(irq);
  178. desc->irq_data.chip->irq_ack(&desc->irq_data);
  179. queue_work(pcap->workqueue, &pcap->isr_work);
  180. return;
  181. }
  182. /* ADC */
  183. void pcap_set_ts_bits(struct pcap_chip *pcap, u32 bits)
  184. {
  185. u32 tmp;
  186. mutex_lock(&pcap->adc_mutex);
  187. ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
  188. tmp &= ~(PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
  189. tmp |= bits & (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
  190. ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
  191. mutex_unlock(&pcap->adc_mutex);
  192. }
  193. EXPORT_SYMBOL_GPL(pcap_set_ts_bits);
  194. static void pcap_disable_adc(struct pcap_chip *pcap)
  195. {
  196. u32 tmp;
  197. ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
  198. tmp &= ~(PCAP_ADC_ADEN|PCAP_ADC_BATT_I_ADC|PCAP_ADC_BATT_I_POLARITY);
  199. ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
  200. }
  201. static void pcap_adc_trigger(struct pcap_chip *pcap)
  202. {
  203. u32 tmp;
  204. u8 head;
  205. mutex_lock(&pcap->adc_mutex);
  206. head = pcap->adc_head;
  207. if (!pcap->adc_queue[head]) {
  208. /* queue is empty, save power */
  209. pcap_disable_adc(pcap);
  210. mutex_unlock(&pcap->adc_mutex);
  211. return;
  212. }
  213. /* start conversion on requested bank, save TS_M bits */
  214. ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
  215. tmp &= (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR);
  216. tmp |= pcap->adc_queue[head]->flags | PCAP_ADC_ADEN;
  217. if (pcap->adc_queue[head]->bank == PCAP_ADC_BANK_1)
  218. tmp |= PCAP_ADC_AD_SEL1;
  219. ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
  220. mutex_unlock(&pcap->adc_mutex);
  221. ezx_pcap_write(pcap, PCAP_REG_ADR, PCAP_ADR_ASC);
  222. }
  223. static irqreturn_t pcap_adc_irq(int irq, void *_pcap)
  224. {
  225. struct pcap_chip *pcap = _pcap;
  226. struct pcap_adc_request *req;
  227. u16 res[2];
  228. u32 tmp;
  229. mutex_lock(&pcap->adc_mutex);
  230. req = pcap->adc_queue[pcap->adc_head];
  231. if (WARN(!req, "adc irq without pending request\n")) {
  232. mutex_unlock(&pcap->adc_mutex);
  233. return IRQ_HANDLED;
  234. }
  235. /* read requested channels results */
  236. ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp);
  237. tmp &= ~(PCAP_ADC_ADA1_MASK | PCAP_ADC_ADA2_MASK);
  238. tmp |= (req->ch[0] << PCAP_ADC_ADA1_SHIFT);
  239. tmp |= (req->ch[1] << PCAP_ADC_ADA2_SHIFT);
  240. ezx_pcap_write(pcap, PCAP_REG_ADC, tmp);
  241. ezx_pcap_read(pcap, PCAP_REG_ADR, &tmp);
  242. res[0] = (tmp & PCAP_ADR_ADD1_MASK) >> PCAP_ADR_ADD1_SHIFT;
  243. res[1] = (tmp & PCAP_ADR_ADD2_MASK) >> PCAP_ADR_ADD2_SHIFT;
  244. pcap->adc_queue[pcap->adc_head] = NULL;
  245. pcap->adc_head = (pcap->adc_head + 1) & (PCAP_ADC_MAXQ - 1);
  246. mutex_unlock(&pcap->adc_mutex);
  247. /* pass the results and release memory */
  248. req->callback(req->data, res);
  249. kfree(req);
  250. /* trigger next conversion (if any) on queue */
  251. pcap_adc_trigger(pcap);
  252. return IRQ_HANDLED;
  253. }
  254. int pcap_adc_async(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
  255. void *callback, void *data)
  256. {
  257. struct pcap_adc_request *req;
  258. /* This will be freed after we have a result */
  259. req = kmalloc(sizeof(struct pcap_adc_request), GFP_KERNEL);
  260. if (!req)
  261. return -ENOMEM;
  262. req->bank = bank;
  263. req->flags = flags;
  264. req->ch[0] = ch[0];
  265. req->ch[1] = ch[1];
  266. req->callback = callback;
  267. req->data = data;
  268. mutex_lock(&pcap->adc_mutex);
  269. if (pcap->adc_queue[pcap->adc_tail]) {
  270. mutex_unlock(&pcap->adc_mutex);
  271. kfree(req);
  272. return -EBUSY;
  273. }
  274. pcap->adc_queue[pcap->adc_tail] = req;
  275. pcap->adc_tail = (pcap->adc_tail + 1) & (PCAP_ADC_MAXQ - 1);
  276. mutex_unlock(&pcap->adc_mutex);
  277. /* start conversion */
  278. pcap_adc_trigger(pcap);
  279. return 0;
  280. }
  281. EXPORT_SYMBOL_GPL(pcap_adc_async);
  282. static void pcap_adc_sync_cb(void *param, u16 res[])
  283. {
  284. struct pcap_adc_sync_request *req = param;
  285. req->res[0] = res[0];
  286. req->res[1] = res[1];
  287. complete(&req->completion);
  288. }
  289. int pcap_adc_sync(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[],
  290. u16 res[])
  291. {
  292. struct pcap_adc_sync_request sync_data;
  293. int ret;
  294. init_completion(&sync_data.completion);
  295. ret = pcap_adc_async(pcap, bank, flags, ch, pcap_adc_sync_cb,
  296. &sync_data);
  297. if (ret)
  298. return ret;
  299. wait_for_completion(&sync_data.completion);
  300. res[0] = sync_data.res[0];
  301. res[1] = sync_data.res[1];
  302. return 0;
  303. }
  304. EXPORT_SYMBOL_GPL(pcap_adc_sync);
  305. /* subdevs */
  306. static int pcap_remove_subdev(struct device *dev, void *unused)
  307. {
  308. platform_device_unregister(to_platform_device(dev));
  309. return 0;
  310. }
  311. static int __devinit pcap_add_subdev(struct pcap_chip *pcap,
  312. struct pcap_subdev *subdev)
  313. {
  314. struct platform_device *pdev;
  315. int ret;
  316. pdev = platform_device_alloc(subdev->name, subdev->id);
  317. if (!pdev)
  318. return -ENOMEM;
  319. pdev->dev.parent = &pcap->spi->dev;
  320. pdev->dev.platform_data = subdev->platform_data;
  321. ret = platform_device_add(pdev);
  322. if (ret)
  323. platform_device_put(pdev);
  324. return ret;
  325. }
  326. static int __devexit ezx_pcap_remove(struct spi_device *spi)
  327. {
  328. struct pcap_chip *pcap = dev_get_drvdata(&spi->dev);
  329. struct pcap_platform_data *pdata = spi->dev.platform_data;
  330. int i, adc_irq;
  331. /* remove all registered subdevs */
  332. device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
  333. /* cleanup ADC */
  334. adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
  335. PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
  336. free_irq(adc_irq, pcap);
  337. mutex_lock(&pcap->adc_mutex);
  338. for (i = 0; i < PCAP_ADC_MAXQ; i++)
  339. kfree(pcap->adc_queue[i]);
  340. mutex_unlock(&pcap->adc_mutex);
  341. /* cleanup irqchip */
  342. for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
  343. set_irq_chip_and_handler(i, NULL, NULL);
  344. destroy_workqueue(pcap->workqueue);
  345. kfree(pcap);
  346. return 0;
  347. }
  348. static int __devinit ezx_pcap_probe(struct spi_device *spi)
  349. {
  350. struct pcap_platform_data *pdata = spi->dev.platform_data;
  351. struct pcap_chip *pcap;
  352. int i, adc_irq;
  353. int ret = -ENODEV;
  354. /* platform data is required */
  355. if (!pdata)
  356. goto ret;
  357. pcap = kzalloc(sizeof(*pcap), GFP_KERNEL);
  358. if (!pcap) {
  359. ret = -ENOMEM;
  360. goto ret;
  361. }
  362. mutex_init(&pcap->io_mutex);
  363. mutex_init(&pcap->adc_mutex);
  364. INIT_WORK(&pcap->isr_work, pcap_isr_work);
  365. INIT_WORK(&pcap->msr_work, pcap_msr_work);
  366. dev_set_drvdata(&spi->dev, pcap);
  367. /* setup spi */
  368. spi->bits_per_word = 32;
  369. spi->mode = SPI_MODE_0 | (pdata->config & PCAP_CS_AH ? SPI_CS_HIGH : 0);
  370. ret = spi_setup(spi);
  371. if (ret)
  372. goto free_pcap;
  373. pcap->spi = spi;
  374. /* setup irq */
  375. pcap->irq_base = pdata->irq_base;
  376. pcap->workqueue = create_singlethread_workqueue("pcapd");
  377. if (!pcap->workqueue) {
  378. ret = -ENOMEM;
  379. dev_err(&spi->dev, "cant create pcap thread\n");
  380. goto free_pcap;
  381. }
  382. /* redirect interrupts to AP, except adcdone2 */
  383. if (!(pdata->config & PCAP_SECOND_PORT))
  384. ezx_pcap_write(pcap, PCAP_REG_INT_SEL,
  385. (1 << PCAP_IRQ_ADCDONE2));
  386. /* setup irq chip */
  387. for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) {
  388. set_irq_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq);
  389. set_irq_chip_data(i, pcap);
  390. #ifdef CONFIG_ARM
  391. set_irq_flags(i, IRQF_VALID);
  392. #else
  393. set_irq_noprobe(i);
  394. #endif
  395. }
  396. /* mask/ack all PCAP interrupts */
  397. ezx_pcap_write(pcap, PCAP_REG_MSR, PCAP_MASK_ALL_INTERRUPT);
  398. ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER);
  399. pcap->msr = PCAP_MASK_ALL_INTERRUPT;
  400. set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING);
  401. set_irq_data(spi->irq, pcap);
  402. set_irq_chained_handler(spi->irq, pcap_irq_handler);
  403. set_irq_wake(spi->irq, 1);
  404. /* ADC */
  405. adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ?
  406. PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE);
  407. ret = request_irq(adc_irq, pcap_adc_irq, 0, "ADC", pcap);
  408. if (ret)
  409. goto free_irqchip;
  410. /* setup subdevs */
  411. for (i = 0; i < pdata->num_subdevs; i++) {
  412. ret = pcap_add_subdev(pcap, &pdata->subdevs[i]);
  413. if (ret)
  414. goto remove_subdevs;
  415. }
  416. /* board specific quirks */
  417. if (pdata->init)
  418. pdata->init(pcap);
  419. return 0;
  420. remove_subdevs:
  421. device_for_each_child(&spi->dev, NULL, pcap_remove_subdev);
  422. /* free_adc: */
  423. free_irq(adc_irq, pcap);
  424. free_irqchip:
  425. for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++)
  426. set_irq_chip_and_handler(i, NULL, NULL);
  427. /* destroy_workqueue: */
  428. destroy_workqueue(pcap->workqueue);
  429. free_pcap:
  430. kfree(pcap);
  431. ret:
  432. return ret;
  433. }
  434. static struct spi_driver ezxpcap_driver = {
  435. .probe = ezx_pcap_probe,
  436. .remove = __devexit_p(ezx_pcap_remove),
  437. .driver = {
  438. .name = "ezx-pcap",
  439. .owner = THIS_MODULE,
  440. },
  441. };
  442. static int __init ezx_pcap_init(void)
  443. {
  444. return spi_register_driver(&ezxpcap_driver);
  445. }
  446. static void __exit ezx_pcap_exit(void)
  447. {
  448. spi_unregister_driver(&ezxpcap_driver);
  449. }
  450. subsys_initcall(ezx_pcap_init);
  451. module_exit(ezx_pcap_exit);
  452. MODULE_LICENSE("GPL");
  453. MODULE_AUTHOR("Daniel Ribeiro / Harald Welte");
  454. MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver");
  455. MODULE_ALIAS("spi:ezx-pcap");