i2c-at91.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852
  1. /*
  2. * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
  3. *
  4. * Copyright (C) 2011 Weinmann Medical GmbH
  5. * Author: Nikolaus Voss <n.voss@weinmann.de>
  6. *
  7. * Evolved from original work by:
  8. * Copyright (C) 2004 Rick Bronson
  9. * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
  10. *
  11. * Borrowed heavily from original work by:
  12. * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. */
  19. #include <linux/clk.h>
  20. #include <linux/completion.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/err.h>
  24. #include <linux/i2c.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/io.h>
  27. #include <linux/module.h>
  28. #include <linux/of.h>
  29. #include <linux/of_device.h>
  30. #include <linux/of_i2c.h>
  31. #include <linux/platform_device.h>
  32. #include <linux/slab.h>
  33. #include <linux/platform_data/dma-atmel.h>
  34. #define TWI_CLK_HZ 100000 /* max 400 Kbits/s */
  35. #define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
  36. #define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
  37. /* AT91 TWI register definitions */
  38. #define AT91_TWI_CR 0x0000 /* Control Register */
  39. #define AT91_TWI_START 0x0001 /* Send a Start Condition */
  40. #define AT91_TWI_STOP 0x0002 /* Send a Stop Condition */
  41. #define AT91_TWI_MSEN 0x0004 /* Master Transfer Enable */
  42. #define AT91_TWI_SVDIS 0x0020 /* Slave Transfer Disable */
  43. #define AT91_TWI_QUICK 0x0040 /* SMBus quick command */
  44. #define AT91_TWI_SWRST 0x0080 /* Software Reset */
  45. #define AT91_TWI_MMR 0x0004 /* Master Mode Register */
  46. #define AT91_TWI_IADRSZ_1 0x0100 /* Internal Device Address Size */
  47. #define AT91_TWI_MREAD 0x1000 /* Master Read Direction */
  48. #define AT91_TWI_IADR 0x000c /* Internal Address Register */
  49. #define AT91_TWI_CWGR 0x0010 /* Clock Waveform Generator Reg */
  50. #define AT91_TWI_SR 0x0020 /* Status Register */
  51. #define AT91_TWI_TXCOMP 0x0001 /* Transmission Complete */
  52. #define AT91_TWI_RXRDY 0x0002 /* Receive Holding Register Ready */
  53. #define AT91_TWI_TXRDY 0x0004 /* Transmit Holding Register Ready */
  54. #define AT91_TWI_OVRE 0x0040 /* Overrun Error */
  55. #define AT91_TWI_UNRE 0x0080 /* Underrun Error */
  56. #define AT91_TWI_NACK 0x0100 /* Not Acknowledged */
  57. #define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */
  58. #define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */
  59. #define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */
  60. #define AT91_TWI_RHR 0x0030 /* Receive Holding Register */
  61. #define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
  62. struct at91_twi_pdata {
  63. unsigned clk_max_div;
  64. unsigned clk_offset;
  65. bool has_unre_flag;
  66. bool has_dma_support;
  67. struct at_dma_slave dma_slave;
  68. };
  69. struct at91_twi_dma {
  70. struct dma_chan *chan_rx;
  71. struct dma_chan *chan_tx;
  72. struct scatterlist sg;
  73. struct dma_async_tx_descriptor *data_desc;
  74. enum dma_data_direction direction;
  75. bool buf_mapped;
  76. bool xfer_in_progress;
  77. };
  78. struct at91_twi_dev {
  79. struct device *dev;
  80. void __iomem *base;
  81. struct completion cmd_complete;
  82. struct clk *clk;
  83. u8 *buf;
  84. size_t buf_len;
  85. struct i2c_msg *msg;
  86. int irq;
  87. unsigned imr;
  88. unsigned transfer_status;
  89. struct i2c_adapter adapter;
  90. unsigned twi_cwgr_reg;
  91. struct at91_twi_pdata *pdata;
  92. bool use_dma;
  93. struct at91_twi_dma dma;
  94. };
  95. static unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
  96. {
  97. return readl_relaxed(dev->base + reg);
  98. }
  99. static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
  100. {
  101. writel_relaxed(val, dev->base + reg);
  102. }
  103. static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
  104. {
  105. at91_twi_write(dev, AT91_TWI_IDR,
  106. AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY);
  107. }
  108. static void at91_twi_irq_save(struct at91_twi_dev *dev)
  109. {
  110. dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & 0x7;
  111. at91_disable_twi_interrupts(dev);
  112. }
  113. static void at91_twi_irq_restore(struct at91_twi_dev *dev)
  114. {
  115. at91_twi_write(dev, AT91_TWI_IER, dev->imr);
  116. }
  117. static void at91_init_twi_bus(struct at91_twi_dev *dev)
  118. {
  119. at91_disable_twi_interrupts(dev);
  120. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SWRST);
  121. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
  122. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
  123. at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
  124. }
  125. /*
  126. * Calculate symmetric clock as stated in datasheet:
  127. * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
  128. */
  129. static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
  130. {
  131. int ckdiv, cdiv, div;
  132. struct at91_twi_pdata *pdata = dev->pdata;
  133. int offset = pdata->clk_offset;
  134. int max_ckdiv = pdata->clk_max_div;
  135. div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
  136. 2 * twi_clk) - offset);
  137. ckdiv = fls(div >> 8);
  138. cdiv = div >> ckdiv;
  139. if (ckdiv > max_ckdiv) {
  140. dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
  141. ckdiv, max_ckdiv);
  142. ckdiv = max_ckdiv;
  143. cdiv = 255;
  144. }
  145. dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv;
  146. dev_dbg(dev->dev, "cdiv %d ckdiv %d\n", cdiv, ckdiv);
  147. }
  148. static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
  149. {
  150. struct at91_twi_dma *dma = &dev->dma;
  151. at91_twi_irq_save(dev);
  152. if (dma->xfer_in_progress) {
  153. if (dma->direction == DMA_FROM_DEVICE)
  154. dmaengine_terminate_all(dma->chan_rx);
  155. else
  156. dmaengine_terminate_all(dma->chan_tx);
  157. dma->xfer_in_progress = false;
  158. }
  159. if (dma->buf_mapped) {
  160. dma_unmap_single(dev->dev, sg_dma_address(&dma->sg),
  161. dev->buf_len, dma->direction);
  162. dma->buf_mapped = false;
  163. }
  164. at91_twi_irq_restore(dev);
  165. }
  166. static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
  167. {
  168. if (dev->buf_len <= 0)
  169. return;
  170. at91_twi_write(dev, AT91_TWI_THR, *dev->buf);
  171. /* send stop when last byte has been written */
  172. if (--dev->buf_len == 0)
  173. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  174. dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len);
  175. ++dev->buf;
  176. }
  177. static void at91_twi_write_data_dma_callback(void *data)
  178. {
  179. struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
  180. dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
  181. dev->buf_len, DMA_MEM_TO_DEV);
  182. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  183. }
  184. static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
  185. {
  186. dma_addr_t dma_addr;
  187. struct dma_async_tx_descriptor *txdesc;
  188. struct at91_twi_dma *dma = &dev->dma;
  189. struct dma_chan *chan_tx = dma->chan_tx;
  190. if (dev->buf_len <= 0)
  191. return;
  192. dma->direction = DMA_TO_DEVICE;
  193. at91_twi_irq_save(dev);
  194. dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
  195. DMA_TO_DEVICE);
  196. if (dma_mapping_error(dev->dev, dma_addr)) {
  197. dev_err(dev->dev, "dma map failed\n");
  198. return;
  199. }
  200. dma->buf_mapped = true;
  201. at91_twi_irq_restore(dev);
  202. sg_dma_len(&dma->sg) = dev->buf_len;
  203. sg_dma_address(&dma->sg) = dma_addr;
  204. txdesc = dmaengine_prep_slave_sg(chan_tx, &dma->sg, 1, DMA_MEM_TO_DEV,
  205. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  206. if (!txdesc) {
  207. dev_err(dev->dev, "dma prep slave sg failed\n");
  208. goto error;
  209. }
  210. txdesc->callback = at91_twi_write_data_dma_callback;
  211. txdesc->callback_param = dev;
  212. dma->xfer_in_progress = true;
  213. dmaengine_submit(txdesc);
  214. dma_async_issue_pending(chan_tx);
  215. return;
  216. error:
  217. at91_twi_dma_cleanup(dev);
  218. }
  219. static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
  220. {
  221. if (dev->buf_len <= 0)
  222. return;
  223. *dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff;
  224. --dev->buf_len;
  225. /* handle I2C_SMBUS_BLOCK_DATA */
  226. if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
  227. dev->msg->flags &= ~I2C_M_RECV_LEN;
  228. dev->buf_len += *dev->buf;
  229. dev->msg->len = dev->buf_len + 1;
  230. dev_dbg(dev->dev, "received block length %d\n", dev->buf_len);
  231. }
  232. /* send stop if second but last byte has been read */
  233. if (dev->buf_len == 1)
  234. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  235. dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len);
  236. ++dev->buf;
  237. }
  238. static void at91_twi_read_data_dma_callback(void *data)
  239. {
  240. struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
  241. dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
  242. dev->buf_len, DMA_DEV_TO_MEM);
  243. /* The last two bytes have to be read without using dma */
  244. dev->buf += dev->buf_len - 2;
  245. dev->buf_len = 2;
  246. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY);
  247. }
  248. static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
  249. {
  250. dma_addr_t dma_addr;
  251. struct dma_async_tx_descriptor *rxdesc;
  252. struct at91_twi_dma *dma = &dev->dma;
  253. struct dma_chan *chan_rx = dma->chan_rx;
  254. dma->direction = DMA_FROM_DEVICE;
  255. /* Keep in mind that we won't use dma to read the last two bytes */
  256. at91_twi_irq_save(dev);
  257. dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len - 2,
  258. DMA_FROM_DEVICE);
  259. if (dma_mapping_error(dev->dev, dma_addr)) {
  260. dev_err(dev->dev, "dma map failed\n");
  261. return;
  262. }
  263. dma->buf_mapped = true;
  264. at91_twi_irq_restore(dev);
  265. dma->sg.dma_address = dma_addr;
  266. sg_dma_len(&dma->sg) = dev->buf_len - 2;
  267. rxdesc = dmaengine_prep_slave_sg(chan_rx, &dma->sg, 1, DMA_DEV_TO_MEM,
  268. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  269. if (!rxdesc) {
  270. dev_err(dev->dev, "dma prep slave sg failed\n");
  271. goto error;
  272. }
  273. rxdesc->callback = at91_twi_read_data_dma_callback;
  274. rxdesc->callback_param = dev;
  275. dma->xfer_in_progress = true;
  276. dmaengine_submit(rxdesc);
  277. dma_async_issue_pending(dma->chan_rx);
  278. return;
  279. error:
  280. at91_twi_dma_cleanup(dev);
  281. }
  282. static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
  283. {
  284. struct at91_twi_dev *dev = dev_id;
  285. const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
  286. const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
  287. if (!irqstatus)
  288. return IRQ_NONE;
  289. else if (irqstatus & AT91_TWI_RXRDY)
  290. at91_twi_read_next_byte(dev);
  291. else if (irqstatus & AT91_TWI_TXRDY)
  292. at91_twi_write_next_byte(dev);
  293. /* catch error flags */
  294. dev->transfer_status |= status;
  295. if (irqstatus & AT91_TWI_TXCOMP) {
  296. at91_disable_twi_interrupts(dev);
  297. complete(&dev->cmd_complete);
  298. }
  299. return IRQ_HANDLED;
  300. }
  301. static int at91_do_twi_transfer(struct at91_twi_dev *dev)
  302. {
  303. int ret;
  304. bool has_unre_flag = dev->pdata->has_unre_flag;
  305. dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
  306. (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
  307. INIT_COMPLETION(dev->cmd_complete);
  308. dev->transfer_status = 0;
  309. if (!dev->buf_len) {
  310. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
  311. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
  312. } else if (dev->msg->flags & I2C_M_RD) {
  313. unsigned start_flags = AT91_TWI_START;
  314. if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
  315. dev_err(dev->dev, "RXRDY still set!");
  316. at91_twi_read(dev, AT91_TWI_RHR);
  317. }
  318. /* if only one byte is to be read, immediately stop transfer */
  319. if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN))
  320. start_flags |= AT91_TWI_STOP;
  321. at91_twi_write(dev, AT91_TWI_CR, start_flags);
  322. /*
  323. * When using dma, the last byte has to be read manually in
  324. * order to not send the stop command too late and then
  325. * to receive extra data. In practice, there are some issues
  326. * if you use the dma to read n-1 bytes because of latency.
  327. * Reading n-2 bytes with dma and the two last ones manually
  328. * seems to be the best solution.
  329. */
  330. if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
  331. at91_twi_read_data_dma(dev);
  332. /*
  333. * It is important to enable TXCOMP irq here because
  334. * doing it only when transferring the last two bytes
  335. * will mask NACK errors since TXCOMP is set when a
  336. * NACK occurs.
  337. */
  338. at91_twi_write(dev, AT91_TWI_IER,
  339. AT91_TWI_TXCOMP);
  340. } else
  341. at91_twi_write(dev, AT91_TWI_IER,
  342. AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
  343. } else {
  344. if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
  345. at91_twi_write_data_dma(dev);
  346. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
  347. } else {
  348. at91_twi_write_next_byte(dev);
  349. at91_twi_write(dev, AT91_TWI_IER,
  350. AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
  351. }
  352. }
  353. ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
  354. dev->adapter.timeout);
  355. if (ret == 0) {
  356. dev_err(dev->dev, "controller timed out\n");
  357. at91_init_twi_bus(dev);
  358. ret = -ETIMEDOUT;
  359. goto error;
  360. }
  361. if (dev->transfer_status & AT91_TWI_NACK) {
  362. dev_dbg(dev->dev, "received nack\n");
  363. ret = -EREMOTEIO;
  364. goto error;
  365. }
  366. if (dev->transfer_status & AT91_TWI_OVRE) {
  367. dev_err(dev->dev, "overrun while reading\n");
  368. ret = -EIO;
  369. goto error;
  370. }
  371. if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
  372. dev_err(dev->dev, "underrun while writing\n");
  373. ret = -EIO;
  374. goto error;
  375. }
  376. dev_dbg(dev->dev, "transfer complete\n");
  377. return 0;
  378. error:
  379. at91_twi_dma_cleanup(dev);
  380. return ret;
  381. }
  382. static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
  383. {
  384. struct at91_twi_dev *dev = i2c_get_adapdata(adap);
  385. int ret;
  386. unsigned int_addr_flag = 0;
  387. struct i2c_msg *m_start = msg;
  388. dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
  389. /*
  390. * The hardware can handle at most two messages concatenated by a
  391. * repeated start via it's internal address feature.
  392. */
  393. if (num > 2) {
  394. dev_err(dev->dev,
  395. "cannot handle more than two concatenated messages.\n");
  396. return 0;
  397. } else if (num == 2) {
  398. int internal_address = 0;
  399. int i;
  400. if (msg->flags & I2C_M_RD) {
  401. dev_err(dev->dev, "first transfer must be write.\n");
  402. return -EINVAL;
  403. }
  404. if (msg->len > 3) {
  405. dev_err(dev->dev, "first message size must be <= 3.\n");
  406. return -EINVAL;
  407. }
  408. /* 1st msg is put into the internal address, start with 2nd */
  409. m_start = &msg[1];
  410. for (i = 0; i < msg->len; ++i) {
  411. const unsigned addr = msg->buf[msg->len - 1 - i];
  412. internal_address |= addr << (8 * i);
  413. int_addr_flag += AT91_TWI_IADRSZ_1;
  414. }
  415. at91_twi_write(dev, AT91_TWI_IADR, internal_address);
  416. }
  417. at91_twi_write(dev, AT91_TWI_MMR, (m_start->addr << 16) | int_addr_flag
  418. | ((m_start->flags & I2C_M_RD) ? AT91_TWI_MREAD : 0));
  419. dev->buf_len = m_start->len;
  420. dev->buf = m_start->buf;
  421. dev->msg = m_start;
  422. ret = at91_do_twi_transfer(dev);
  423. return (ret < 0) ? ret : num;
  424. }
  425. static u32 at91_twi_func(struct i2c_adapter *adapter)
  426. {
  427. return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
  428. | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
  429. }
  430. static struct i2c_algorithm at91_twi_algorithm = {
  431. .master_xfer = at91_twi_xfer,
  432. .functionality = at91_twi_func,
  433. };
  434. static struct at91_twi_pdata at91rm9200_config = {
  435. .clk_max_div = 5,
  436. .clk_offset = 3,
  437. .has_unre_flag = true,
  438. .has_dma_support = false,
  439. };
  440. static struct at91_twi_pdata at91sam9261_config = {
  441. .clk_max_div = 5,
  442. .clk_offset = 4,
  443. .has_unre_flag = false,
  444. .has_dma_support = false,
  445. };
  446. static struct at91_twi_pdata at91sam9260_config = {
  447. .clk_max_div = 7,
  448. .clk_offset = 4,
  449. .has_unre_flag = false,
  450. .has_dma_support = false,
  451. };
  452. static struct at91_twi_pdata at91sam9g20_config = {
  453. .clk_max_div = 7,
  454. .clk_offset = 4,
  455. .has_unre_flag = false,
  456. .has_dma_support = false,
  457. };
  458. static struct at91_twi_pdata at91sam9g10_config = {
  459. .clk_max_div = 7,
  460. .clk_offset = 4,
  461. .has_unre_flag = false,
  462. .has_dma_support = false,
  463. };
  464. static const struct platform_device_id at91_twi_devtypes[] = {
  465. {
  466. .name = "i2c-at91rm9200",
  467. .driver_data = (unsigned long) &at91rm9200_config,
  468. }, {
  469. .name = "i2c-at91sam9261",
  470. .driver_data = (unsigned long) &at91sam9261_config,
  471. }, {
  472. .name = "i2c-at91sam9260",
  473. .driver_data = (unsigned long) &at91sam9260_config,
  474. }, {
  475. .name = "i2c-at91sam9g20",
  476. .driver_data = (unsigned long) &at91sam9g20_config,
  477. }, {
  478. .name = "i2c-at91sam9g10",
  479. .driver_data = (unsigned long) &at91sam9g10_config,
  480. }, {
  481. /* sentinel */
  482. }
  483. };
  484. #if defined(CONFIG_OF)
  485. static struct at91_twi_pdata at91sam9x5_config = {
  486. .clk_max_div = 7,
  487. .clk_offset = 4,
  488. .has_unre_flag = false,
  489. .has_dma_support = true,
  490. };
  491. static const struct of_device_id atmel_twi_dt_ids[] = {
  492. {
  493. .compatible = "atmel,at91rm9200-i2c",
  494. .data = &at91rm9200_config,
  495. } , {
  496. .compatible = "atmel,at91sam9260-i2c",
  497. .data = &at91sam9260_config,
  498. } , {
  499. .compatible = "atmel,at91sam9g20-i2c",
  500. .data = &at91sam9g20_config,
  501. } , {
  502. .compatible = "atmel,at91sam9g10-i2c",
  503. .data = &at91sam9g10_config,
  504. }, {
  505. .compatible = "atmel,at91sam9x5-i2c",
  506. .data = &at91sam9x5_config,
  507. }, {
  508. /* sentinel */
  509. }
  510. };
  511. MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
  512. #else
  513. #define atmel_twi_dt_ids NULL
  514. #endif
  515. static bool filter(struct dma_chan *chan, void *slave)
  516. {
  517. struct at_dma_slave *sl = slave;
  518. if (sl->dma_dev == chan->device->dev) {
  519. chan->private = sl;
  520. return true;
  521. } else {
  522. return false;
  523. }
  524. }
  525. static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
  526. {
  527. int ret = 0;
  528. struct at_dma_slave *sdata;
  529. struct dma_slave_config slave_config;
  530. struct at91_twi_dma *dma = &dev->dma;
  531. sdata = &dev->pdata->dma_slave;
  532. memset(&slave_config, 0, sizeof(slave_config));
  533. slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
  534. slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  535. slave_config.src_maxburst = 1;
  536. slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
  537. slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  538. slave_config.dst_maxburst = 1;
  539. slave_config.device_fc = false;
  540. if (sdata && sdata->dma_dev) {
  541. dma_cap_mask_t mask;
  542. dma_cap_zero(mask);
  543. dma_cap_set(DMA_SLAVE, mask);
  544. dma->chan_tx = dma_request_channel(mask, filter, sdata);
  545. if (!dma->chan_tx) {
  546. dev_err(dev->dev, "no DMA channel available for tx\n");
  547. ret = -EBUSY;
  548. goto error;
  549. }
  550. dma->chan_rx = dma_request_channel(mask, filter, sdata);
  551. if (!dma->chan_rx) {
  552. dev_err(dev->dev, "no DMA channel available for rx\n");
  553. ret = -EBUSY;
  554. goto error;
  555. }
  556. } else {
  557. ret = -EINVAL;
  558. goto error;
  559. }
  560. slave_config.direction = DMA_MEM_TO_DEV;
  561. if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
  562. dev_err(dev->dev, "failed to configure tx channel\n");
  563. ret = -EINVAL;
  564. goto error;
  565. }
  566. slave_config.direction = DMA_DEV_TO_MEM;
  567. if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
  568. dev_err(dev->dev, "failed to configure rx channel\n");
  569. ret = -EINVAL;
  570. goto error;
  571. }
  572. sg_init_table(&dma->sg, 1);
  573. dma->buf_mapped = false;
  574. dma->xfer_in_progress = false;
  575. dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
  576. dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
  577. return ret;
  578. error:
  579. dev_info(dev->dev, "can't use DMA\n");
  580. if (dma->chan_rx)
  581. dma_release_channel(dma->chan_rx);
  582. if (dma->chan_tx)
  583. dma_release_channel(dma->chan_tx);
  584. return ret;
  585. }
  586. static struct at91_twi_pdata *at91_twi_get_driver_data(
  587. struct platform_device *pdev)
  588. {
  589. if (pdev->dev.of_node) {
  590. const struct of_device_id *match;
  591. match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
  592. if (!match)
  593. return NULL;
  594. return (struct at91_twi_pdata *)match->data;
  595. }
  596. return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
  597. }
  598. static int at91_twi_probe(struct platform_device *pdev)
  599. {
  600. struct at91_twi_dev *dev;
  601. struct resource *mem;
  602. int rc;
  603. u32 phy_addr;
  604. dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
  605. if (!dev)
  606. return -ENOMEM;
  607. init_completion(&dev->cmd_complete);
  608. dev->dev = &pdev->dev;
  609. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  610. if (!mem)
  611. return -ENODEV;
  612. phy_addr = mem->start;
  613. dev->pdata = at91_twi_get_driver_data(pdev);
  614. if (!dev->pdata)
  615. return -ENODEV;
  616. dev->base = devm_ioremap_resource(&pdev->dev, mem);
  617. if (IS_ERR(dev->base))
  618. return PTR_ERR(dev->base);
  619. dev->irq = platform_get_irq(pdev, 0);
  620. if (dev->irq < 0)
  621. return dev->irq;
  622. rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
  623. dev_name(dev->dev), dev);
  624. if (rc) {
  625. dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
  626. return rc;
  627. }
  628. platform_set_drvdata(pdev, dev);
  629. dev->clk = devm_clk_get(dev->dev, NULL);
  630. if (IS_ERR(dev->clk)) {
  631. dev_err(dev->dev, "no clock defined\n");
  632. return -ENODEV;
  633. }
  634. clk_prepare_enable(dev->clk);
  635. if (dev->pdata->has_dma_support) {
  636. if (at91_twi_configure_dma(dev, phy_addr) == 0)
  637. dev->use_dma = true;
  638. }
  639. at91_calc_twi_clock(dev, TWI_CLK_HZ);
  640. at91_init_twi_bus(dev);
  641. snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
  642. i2c_set_adapdata(&dev->adapter, dev);
  643. dev->adapter.owner = THIS_MODULE;
  644. dev->adapter.class = I2C_CLASS_HWMON;
  645. dev->adapter.algo = &at91_twi_algorithm;
  646. dev->adapter.dev.parent = dev->dev;
  647. dev->adapter.nr = pdev->id;
  648. dev->adapter.timeout = AT91_I2C_TIMEOUT;
  649. dev->adapter.dev.of_node = pdev->dev.of_node;
  650. rc = i2c_add_numbered_adapter(&dev->adapter);
  651. if (rc) {
  652. dev_err(dev->dev, "Adapter %s registration failed\n",
  653. dev->adapter.name);
  654. clk_disable_unprepare(dev->clk);
  655. return rc;
  656. }
  657. of_i2c_register_devices(&dev->adapter);
  658. dev_info(dev->dev, "AT91 i2c bus driver.\n");
  659. return 0;
  660. }
  661. static int at91_twi_remove(struct platform_device *pdev)
  662. {
  663. struct at91_twi_dev *dev = platform_get_drvdata(pdev);
  664. int rc;
  665. rc = i2c_del_adapter(&dev->adapter);
  666. clk_disable_unprepare(dev->clk);
  667. return rc;
  668. }
  669. #ifdef CONFIG_PM
  670. static int at91_twi_runtime_suspend(struct device *dev)
  671. {
  672. struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
  673. clk_disable(twi_dev->clk);
  674. return 0;
  675. }
  676. static int at91_twi_runtime_resume(struct device *dev)
  677. {
  678. struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
  679. return clk_enable(twi_dev->clk);
  680. }
  681. static const struct dev_pm_ops at91_twi_pm = {
  682. .runtime_suspend = at91_twi_runtime_suspend,
  683. .runtime_resume = at91_twi_runtime_resume,
  684. };
  685. #define at91_twi_pm_ops (&at91_twi_pm)
  686. #else
  687. #define at91_twi_pm_ops NULL
  688. #endif
  689. static struct platform_driver at91_twi_driver = {
  690. .probe = at91_twi_probe,
  691. .remove = at91_twi_remove,
  692. .id_table = at91_twi_devtypes,
  693. .driver = {
  694. .name = "at91_i2c",
  695. .owner = THIS_MODULE,
  696. .of_match_table = atmel_twi_dt_ids,
  697. .pm = at91_twi_pm_ops,
  698. },
  699. };
  700. static int __init at91_twi_init(void)
  701. {
  702. return platform_driver_register(&at91_twi_driver);
  703. }
  704. static void __exit at91_twi_exit(void)
  705. {
  706. platform_driver_unregister(&at91_twi_driver);
  707. }
  708. subsys_initcall(at91_twi_init);
  709. module_exit(at91_twi_exit);
  710. MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
  711. MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
  712. MODULE_LICENSE("GPL");
  713. MODULE_ALIAS("platform:at91_i2c");