i2c-at91.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847
  1. /*
  2. * i2c Support for Atmel's AT91 Two-Wire Interface (TWI)
  3. *
  4. * Copyright (C) 2011 Weinmann Medical GmbH
  5. * Author: Nikolaus Voss <n.voss@weinmann.de>
  6. *
  7. * Evolved from original work by:
  8. * Copyright (C) 2004 Rick Bronson
  9. * Converted to 2.6 by Andrew Victor <andrew@sanpeople.com>
  10. *
  11. * Borrowed heavily from original work by:
  12. * Copyright (C) 2000 Philip Edelbrock <phil@stimpy.netroedge.com>
  13. *
  14. * This program is free software; you can redistribute it and/or modify
  15. * it under the terms of the GNU General Public License as published by
  16. * the Free Software Foundation; either version 2 of the License, or
  17. * (at your option) any later version.
  18. */
  19. #include <linux/clk.h>
  20. #include <linux/completion.h>
  21. #include <linux/dma-mapping.h>
  22. #include <linux/dmaengine.h>
  23. #include <linux/err.h>
  24. #include <linux/i2c.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/io.h>
  27. #include <linux/module.h>
  28. #include <linux/of.h>
  29. #include <linux/of_device.h>
  30. #include <linux/platform_device.h>
  31. #include <linux/slab.h>
  32. #include <linux/platform_data/dma-atmel.h>
  33. #define TWI_CLK_HZ 100000 /* max 400 Kbits/s */
  34. #define AT91_I2C_TIMEOUT msecs_to_jiffies(100) /* transfer timeout */
  35. #define AT91_I2C_DMA_THRESHOLD 8 /* enable DMA if transfer size is bigger than this threshold */
  36. /* AT91 TWI register definitions */
  37. #define AT91_TWI_CR 0x0000 /* Control Register */
  38. #define AT91_TWI_START 0x0001 /* Send a Start Condition */
  39. #define AT91_TWI_STOP 0x0002 /* Send a Stop Condition */
  40. #define AT91_TWI_MSEN 0x0004 /* Master Transfer Enable */
  41. #define AT91_TWI_SVDIS 0x0020 /* Slave Transfer Disable */
  42. #define AT91_TWI_QUICK 0x0040 /* SMBus quick command */
  43. #define AT91_TWI_SWRST 0x0080 /* Software Reset */
  44. #define AT91_TWI_MMR 0x0004 /* Master Mode Register */
  45. #define AT91_TWI_IADRSZ_1 0x0100 /* Internal Device Address Size */
  46. #define AT91_TWI_MREAD 0x1000 /* Master Read Direction */
  47. #define AT91_TWI_IADR 0x000c /* Internal Address Register */
  48. #define AT91_TWI_CWGR 0x0010 /* Clock Waveform Generator Reg */
  49. #define AT91_TWI_SR 0x0020 /* Status Register */
  50. #define AT91_TWI_TXCOMP 0x0001 /* Transmission Complete */
  51. #define AT91_TWI_RXRDY 0x0002 /* Receive Holding Register Ready */
  52. #define AT91_TWI_TXRDY 0x0004 /* Transmit Holding Register Ready */
  53. #define AT91_TWI_OVRE 0x0040 /* Overrun Error */
  54. #define AT91_TWI_UNRE 0x0080 /* Underrun Error */
  55. #define AT91_TWI_NACK 0x0100 /* Not Acknowledged */
  56. #define AT91_TWI_IER 0x0024 /* Interrupt Enable Register */
  57. #define AT91_TWI_IDR 0x0028 /* Interrupt Disable Register */
  58. #define AT91_TWI_IMR 0x002c /* Interrupt Mask Register */
  59. #define AT91_TWI_RHR 0x0030 /* Receive Holding Register */
  60. #define AT91_TWI_THR 0x0034 /* Transmit Holding Register */
  61. struct at91_twi_pdata {
  62. unsigned clk_max_div;
  63. unsigned clk_offset;
  64. bool has_unre_flag;
  65. bool has_dma_support;
  66. struct at_dma_slave dma_slave;
  67. };
  68. struct at91_twi_dma {
  69. struct dma_chan *chan_rx;
  70. struct dma_chan *chan_tx;
  71. struct scatterlist sg;
  72. struct dma_async_tx_descriptor *data_desc;
  73. enum dma_data_direction direction;
  74. bool buf_mapped;
  75. bool xfer_in_progress;
  76. };
  77. struct at91_twi_dev {
  78. struct device *dev;
  79. void __iomem *base;
  80. struct completion cmd_complete;
  81. struct clk *clk;
  82. u8 *buf;
  83. size_t buf_len;
  84. struct i2c_msg *msg;
  85. int irq;
  86. unsigned imr;
  87. unsigned transfer_status;
  88. struct i2c_adapter adapter;
  89. unsigned twi_cwgr_reg;
  90. struct at91_twi_pdata *pdata;
  91. bool use_dma;
  92. struct at91_twi_dma dma;
  93. };
  94. static unsigned at91_twi_read(struct at91_twi_dev *dev, unsigned reg)
  95. {
  96. return readl_relaxed(dev->base + reg);
  97. }
  98. static void at91_twi_write(struct at91_twi_dev *dev, unsigned reg, unsigned val)
  99. {
  100. writel_relaxed(val, dev->base + reg);
  101. }
  102. static void at91_disable_twi_interrupts(struct at91_twi_dev *dev)
  103. {
  104. at91_twi_write(dev, AT91_TWI_IDR,
  105. AT91_TWI_TXCOMP | AT91_TWI_RXRDY | AT91_TWI_TXRDY);
  106. }
  107. static void at91_twi_irq_save(struct at91_twi_dev *dev)
  108. {
  109. dev->imr = at91_twi_read(dev, AT91_TWI_IMR) & 0x7;
  110. at91_disable_twi_interrupts(dev);
  111. }
  112. static void at91_twi_irq_restore(struct at91_twi_dev *dev)
  113. {
  114. at91_twi_write(dev, AT91_TWI_IER, dev->imr);
  115. }
  116. static void at91_init_twi_bus(struct at91_twi_dev *dev)
  117. {
  118. at91_disable_twi_interrupts(dev);
  119. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SWRST);
  120. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_MSEN);
  121. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_SVDIS);
  122. at91_twi_write(dev, AT91_TWI_CWGR, dev->twi_cwgr_reg);
  123. }
  124. /*
  125. * Calculate symmetric clock as stated in datasheet:
  126. * twi_clk = F_MAIN / (2 * (cdiv * (1 << ckdiv) + offset))
  127. */
  128. static void at91_calc_twi_clock(struct at91_twi_dev *dev, int twi_clk)
  129. {
  130. int ckdiv, cdiv, div;
  131. struct at91_twi_pdata *pdata = dev->pdata;
  132. int offset = pdata->clk_offset;
  133. int max_ckdiv = pdata->clk_max_div;
  134. div = max(0, (int)DIV_ROUND_UP(clk_get_rate(dev->clk),
  135. 2 * twi_clk) - offset);
  136. ckdiv = fls(div >> 8);
  137. cdiv = div >> ckdiv;
  138. if (ckdiv > max_ckdiv) {
  139. dev_warn(dev->dev, "%d exceeds ckdiv max value which is %d.\n",
  140. ckdiv, max_ckdiv);
  141. ckdiv = max_ckdiv;
  142. cdiv = 255;
  143. }
  144. dev->twi_cwgr_reg = (ckdiv << 16) | (cdiv << 8) | cdiv;
  145. dev_dbg(dev->dev, "cdiv %d ckdiv %d\n", cdiv, ckdiv);
  146. }
  147. static void at91_twi_dma_cleanup(struct at91_twi_dev *dev)
  148. {
  149. struct at91_twi_dma *dma = &dev->dma;
  150. at91_twi_irq_save(dev);
  151. if (dma->xfer_in_progress) {
  152. if (dma->direction == DMA_FROM_DEVICE)
  153. dmaengine_terminate_all(dma->chan_rx);
  154. else
  155. dmaengine_terminate_all(dma->chan_tx);
  156. dma->xfer_in_progress = false;
  157. }
  158. if (dma->buf_mapped) {
  159. dma_unmap_single(dev->dev, sg_dma_address(&dma->sg),
  160. dev->buf_len, dma->direction);
  161. dma->buf_mapped = false;
  162. }
  163. at91_twi_irq_restore(dev);
  164. }
  165. static void at91_twi_write_next_byte(struct at91_twi_dev *dev)
  166. {
  167. if (dev->buf_len <= 0)
  168. return;
  169. at91_twi_write(dev, AT91_TWI_THR, *dev->buf);
  170. /* send stop when last byte has been written */
  171. if (--dev->buf_len == 0)
  172. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  173. dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len);
  174. ++dev->buf;
  175. }
  176. static void at91_twi_write_data_dma_callback(void *data)
  177. {
  178. struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
  179. dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
  180. dev->buf_len, DMA_MEM_TO_DEV);
  181. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  182. }
  183. static void at91_twi_write_data_dma(struct at91_twi_dev *dev)
  184. {
  185. dma_addr_t dma_addr;
  186. struct dma_async_tx_descriptor *txdesc;
  187. struct at91_twi_dma *dma = &dev->dma;
  188. struct dma_chan *chan_tx = dma->chan_tx;
  189. if (dev->buf_len <= 0)
  190. return;
  191. dma->direction = DMA_TO_DEVICE;
  192. at91_twi_irq_save(dev);
  193. dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len,
  194. DMA_TO_DEVICE);
  195. if (dma_mapping_error(dev->dev, dma_addr)) {
  196. dev_err(dev->dev, "dma map failed\n");
  197. return;
  198. }
  199. dma->buf_mapped = true;
  200. at91_twi_irq_restore(dev);
  201. sg_dma_len(&dma->sg) = dev->buf_len;
  202. sg_dma_address(&dma->sg) = dma_addr;
  203. txdesc = dmaengine_prep_slave_sg(chan_tx, &dma->sg, 1, DMA_MEM_TO_DEV,
  204. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  205. if (!txdesc) {
  206. dev_err(dev->dev, "dma prep slave sg failed\n");
  207. goto error;
  208. }
  209. txdesc->callback = at91_twi_write_data_dma_callback;
  210. txdesc->callback_param = dev;
  211. dma->xfer_in_progress = true;
  212. dmaengine_submit(txdesc);
  213. dma_async_issue_pending(chan_tx);
  214. return;
  215. error:
  216. at91_twi_dma_cleanup(dev);
  217. }
  218. static void at91_twi_read_next_byte(struct at91_twi_dev *dev)
  219. {
  220. if (dev->buf_len <= 0)
  221. return;
  222. *dev->buf = at91_twi_read(dev, AT91_TWI_RHR) & 0xff;
  223. --dev->buf_len;
  224. /* handle I2C_SMBUS_BLOCK_DATA */
  225. if (unlikely(dev->msg->flags & I2C_M_RECV_LEN)) {
  226. dev->msg->flags &= ~I2C_M_RECV_LEN;
  227. dev->buf_len += *dev->buf;
  228. dev->msg->len = dev->buf_len + 1;
  229. dev_dbg(dev->dev, "received block length %d\n", dev->buf_len);
  230. }
  231. /* send stop if second but last byte has been read */
  232. if (dev->buf_len == 1)
  233. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP);
  234. dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len);
  235. ++dev->buf;
  236. }
  237. static void at91_twi_read_data_dma_callback(void *data)
  238. {
  239. struct at91_twi_dev *dev = (struct at91_twi_dev *)data;
  240. dma_unmap_single(dev->dev, sg_dma_address(&dev->dma.sg),
  241. dev->buf_len, DMA_DEV_TO_MEM);
  242. /* The last two bytes have to be read without using dma */
  243. dev->buf += dev->buf_len - 2;
  244. dev->buf_len = 2;
  245. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_RXRDY);
  246. }
  247. static void at91_twi_read_data_dma(struct at91_twi_dev *dev)
  248. {
  249. dma_addr_t dma_addr;
  250. struct dma_async_tx_descriptor *rxdesc;
  251. struct at91_twi_dma *dma = &dev->dma;
  252. struct dma_chan *chan_rx = dma->chan_rx;
  253. dma->direction = DMA_FROM_DEVICE;
  254. /* Keep in mind that we won't use dma to read the last two bytes */
  255. at91_twi_irq_save(dev);
  256. dma_addr = dma_map_single(dev->dev, dev->buf, dev->buf_len - 2,
  257. DMA_FROM_DEVICE);
  258. if (dma_mapping_error(dev->dev, dma_addr)) {
  259. dev_err(dev->dev, "dma map failed\n");
  260. return;
  261. }
  262. dma->buf_mapped = true;
  263. at91_twi_irq_restore(dev);
  264. dma->sg.dma_address = dma_addr;
  265. sg_dma_len(&dma->sg) = dev->buf_len - 2;
  266. rxdesc = dmaengine_prep_slave_sg(chan_rx, &dma->sg, 1, DMA_DEV_TO_MEM,
  267. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  268. if (!rxdesc) {
  269. dev_err(dev->dev, "dma prep slave sg failed\n");
  270. goto error;
  271. }
  272. rxdesc->callback = at91_twi_read_data_dma_callback;
  273. rxdesc->callback_param = dev;
  274. dma->xfer_in_progress = true;
  275. dmaengine_submit(rxdesc);
  276. dma_async_issue_pending(dma->chan_rx);
  277. return;
  278. error:
  279. at91_twi_dma_cleanup(dev);
  280. }
  281. static irqreturn_t atmel_twi_interrupt(int irq, void *dev_id)
  282. {
  283. struct at91_twi_dev *dev = dev_id;
  284. const unsigned status = at91_twi_read(dev, AT91_TWI_SR);
  285. const unsigned irqstatus = status & at91_twi_read(dev, AT91_TWI_IMR);
  286. if (!irqstatus)
  287. return IRQ_NONE;
  288. else if (irqstatus & AT91_TWI_RXRDY)
  289. at91_twi_read_next_byte(dev);
  290. else if (irqstatus & AT91_TWI_TXRDY)
  291. at91_twi_write_next_byte(dev);
  292. /* catch error flags */
  293. dev->transfer_status |= status;
  294. if (irqstatus & AT91_TWI_TXCOMP) {
  295. at91_disable_twi_interrupts(dev);
  296. complete(&dev->cmd_complete);
  297. }
  298. return IRQ_HANDLED;
  299. }
  300. static int at91_do_twi_transfer(struct at91_twi_dev *dev)
  301. {
  302. int ret;
  303. bool has_unre_flag = dev->pdata->has_unre_flag;
  304. dev_dbg(dev->dev, "transfer: %s %d bytes.\n",
  305. (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len);
  306. INIT_COMPLETION(dev->cmd_complete);
  307. dev->transfer_status = 0;
  308. if (!dev->buf_len) {
  309. at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
  310. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
  311. } else if (dev->msg->flags & I2C_M_RD) {
  312. unsigned start_flags = AT91_TWI_START;
  313. if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
  314. dev_err(dev->dev, "RXRDY still set!");
  315. at91_twi_read(dev, AT91_TWI_RHR);
  316. }
  317. /* if only one byte is to be read, immediately stop transfer */
  318. if (dev->buf_len <= 1 && !(dev->msg->flags & I2C_M_RECV_LEN))
  319. start_flags |= AT91_TWI_STOP;
  320. at91_twi_write(dev, AT91_TWI_CR, start_flags);
  321. /*
  322. * When using dma, the last byte has to be read manually in
  323. * order to not send the stop command too late and then
  324. * to receive extra data. In practice, there are some issues
  325. * if you use the dma to read n-1 bytes because of latency.
  326. * Reading n-2 bytes with dma and the two last ones manually
  327. * seems to be the best solution.
  328. */
  329. if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
  330. at91_twi_read_data_dma(dev);
  331. /*
  332. * It is important to enable TXCOMP irq here because
  333. * doing it only when transferring the last two bytes
  334. * will mask NACK errors since TXCOMP is set when a
  335. * NACK occurs.
  336. */
  337. at91_twi_write(dev, AT91_TWI_IER,
  338. AT91_TWI_TXCOMP);
  339. } else
  340. at91_twi_write(dev, AT91_TWI_IER,
  341. AT91_TWI_TXCOMP | AT91_TWI_RXRDY);
  342. } else {
  343. if (dev->use_dma && (dev->buf_len > AT91_I2C_DMA_THRESHOLD)) {
  344. at91_twi_write_data_dma(dev);
  345. at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
  346. } else {
  347. at91_twi_write_next_byte(dev);
  348. at91_twi_write(dev, AT91_TWI_IER,
  349. AT91_TWI_TXCOMP | AT91_TWI_TXRDY);
  350. }
  351. }
  352. ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete,
  353. dev->adapter.timeout);
  354. if (ret == 0) {
  355. dev_err(dev->dev, "controller timed out\n");
  356. at91_init_twi_bus(dev);
  357. ret = -ETIMEDOUT;
  358. goto error;
  359. }
  360. if (dev->transfer_status & AT91_TWI_NACK) {
  361. dev_dbg(dev->dev, "received nack\n");
  362. ret = -EREMOTEIO;
  363. goto error;
  364. }
  365. if (dev->transfer_status & AT91_TWI_OVRE) {
  366. dev_err(dev->dev, "overrun while reading\n");
  367. ret = -EIO;
  368. goto error;
  369. }
  370. if (has_unre_flag && dev->transfer_status & AT91_TWI_UNRE) {
  371. dev_err(dev->dev, "underrun while writing\n");
  372. ret = -EIO;
  373. goto error;
  374. }
  375. dev_dbg(dev->dev, "transfer complete\n");
  376. return 0;
  377. error:
  378. at91_twi_dma_cleanup(dev);
  379. return ret;
  380. }
  381. static int at91_twi_xfer(struct i2c_adapter *adap, struct i2c_msg *msg, int num)
  382. {
  383. struct at91_twi_dev *dev = i2c_get_adapdata(adap);
  384. int ret;
  385. unsigned int_addr_flag = 0;
  386. struct i2c_msg *m_start = msg;
  387. dev_dbg(&adap->dev, "at91_xfer: processing %d messages:\n", num);
  388. /*
  389. * The hardware can handle at most two messages concatenated by a
  390. * repeated start via it's internal address feature.
  391. */
  392. if (num > 2) {
  393. dev_err(dev->dev,
  394. "cannot handle more than two concatenated messages.\n");
  395. return 0;
  396. } else if (num == 2) {
  397. int internal_address = 0;
  398. int i;
  399. if (msg->flags & I2C_M_RD) {
  400. dev_err(dev->dev, "first transfer must be write.\n");
  401. return -EINVAL;
  402. }
  403. if (msg->len > 3) {
  404. dev_err(dev->dev, "first message size must be <= 3.\n");
  405. return -EINVAL;
  406. }
  407. /* 1st msg is put into the internal address, start with 2nd */
  408. m_start = &msg[1];
  409. for (i = 0; i < msg->len; ++i) {
  410. const unsigned addr = msg->buf[msg->len - 1 - i];
  411. internal_address |= addr << (8 * i);
  412. int_addr_flag += AT91_TWI_IADRSZ_1;
  413. }
  414. at91_twi_write(dev, AT91_TWI_IADR, internal_address);
  415. }
  416. at91_twi_write(dev, AT91_TWI_MMR, (m_start->addr << 16) | int_addr_flag
  417. | ((m_start->flags & I2C_M_RD) ? AT91_TWI_MREAD : 0));
  418. dev->buf_len = m_start->len;
  419. dev->buf = m_start->buf;
  420. dev->msg = m_start;
  421. ret = at91_do_twi_transfer(dev);
  422. return (ret < 0) ? ret : num;
  423. }
  424. static u32 at91_twi_func(struct i2c_adapter *adapter)
  425. {
  426. return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL
  427. | I2C_FUNC_SMBUS_READ_BLOCK_DATA;
  428. }
  429. static struct i2c_algorithm at91_twi_algorithm = {
  430. .master_xfer = at91_twi_xfer,
  431. .functionality = at91_twi_func,
  432. };
  433. static struct at91_twi_pdata at91rm9200_config = {
  434. .clk_max_div = 5,
  435. .clk_offset = 3,
  436. .has_unre_flag = true,
  437. .has_dma_support = false,
  438. };
  439. static struct at91_twi_pdata at91sam9261_config = {
  440. .clk_max_div = 5,
  441. .clk_offset = 4,
  442. .has_unre_flag = false,
  443. .has_dma_support = false,
  444. };
  445. static struct at91_twi_pdata at91sam9260_config = {
  446. .clk_max_div = 7,
  447. .clk_offset = 4,
  448. .has_unre_flag = false,
  449. .has_dma_support = false,
  450. };
  451. static struct at91_twi_pdata at91sam9g20_config = {
  452. .clk_max_div = 7,
  453. .clk_offset = 4,
  454. .has_unre_flag = false,
  455. .has_dma_support = false,
  456. };
  457. static struct at91_twi_pdata at91sam9g10_config = {
  458. .clk_max_div = 7,
  459. .clk_offset = 4,
  460. .has_unre_flag = false,
  461. .has_dma_support = false,
  462. };
  463. static const struct platform_device_id at91_twi_devtypes[] = {
  464. {
  465. .name = "i2c-at91rm9200",
  466. .driver_data = (unsigned long) &at91rm9200_config,
  467. }, {
  468. .name = "i2c-at91sam9261",
  469. .driver_data = (unsigned long) &at91sam9261_config,
  470. }, {
  471. .name = "i2c-at91sam9260",
  472. .driver_data = (unsigned long) &at91sam9260_config,
  473. }, {
  474. .name = "i2c-at91sam9g20",
  475. .driver_data = (unsigned long) &at91sam9g20_config,
  476. }, {
  477. .name = "i2c-at91sam9g10",
  478. .driver_data = (unsigned long) &at91sam9g10_config,
  479. }, {
  480. /* sentinel */
  481. }
  482. };
  483. #if defined(CONFIG_OF)
  484. static struct at91_twi_pdata at91sam9x5_config = {
  485. .clk_max_div = 7,
  486. .clk_offset = 4,
  487. .has_unre_flag = false,
  488. .has_dma_support = true,
  489. };
  490. static const struct of_device_id atmel_twi_dt_ids[] = {
  491. {
  492. .compatible = "atmel,at91rm9200-i2c",
  493. .data = &at91rm9200_config,
  494. } , {
  495. .compatible = "atmel,at91sam9260-i2c",
  496. .data = &at91sam9260_config,
  497. } , {
  498. .compatible = "atmel,at91sam9g20-i2c",
  499. .data = &at91sam9g20_config,
  500. } , {
  501. .compatible = "atmel,at91sam9g10-i2c",
  502. .data = &at91sam9g10_config,
  503. }, {
  504. .compatible = "atmel,at91sam9x5-i2c",
  505. .data = &at91sam9x5_config,
  506. }, {
  507. /* sentinel */
  508. }
  509. };
  510. MODULE_DEVICE_TABLE(of, atmel_twi_dt_ids);
  511. #endif
  512. static bool filter(struct dma_chan *chan, void *pdata)
  513. {
  514. struct at91_twi_pdata *sl_pdata = pdata;
  515. struct at_dma_slave *sl;
  516. if (!sl_pdata)
  517. return false;
  518. sl = &sl_pdata->dma_slave;
  519. if (sl && (sl->dma_dev == chan->device->dev)) {
  520. chan->private = sl;
  521. return true;
  522. } else {
  523. return false;
  524. }
  525. }
  526. static int at91_twi_configure_dma(struct at91_twi_dev *dev, u32 phy_addr)
  527. {
  528. int ret = 0;
  529. struct at91_twi_pdata *pdata = dev->pdata;
  530. struct dma_slave_config slave_config;
  531. struct at91_twi_dma *dma = &dev->dma;
  532. dma_cap_mask_t mask;
  533. memset(&slave_config, 0, sizeof(slave_config));
  534. slave_config.src_addr = (dma_addr_t)phy_addr + AT91_TWI_RHR;
  535. slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  536. slave_config.src_maxburst = 1;
  537. slave_config.dst_addr = (dma_addr_t)phy_addr + AT91_TWI_THR;
  538. slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
  539. slave_config.dst_maxburst = 1;
  540. slave_config.device_fc = false;
  541. dma_cap_zero(mask);
  542. dma_cap_set(DMA_SLAVE, mask);
  543. dma->chan_tx = dma_request_slave_channel_compat(mask, filter, pdata,
  544. dev->dev, "tx");
  545. if (!dma->chan_tx) {
  546. dev_err(dev->dev, "can't get a DMA channel for tx\n");
  547. ret = -EBUSY;
  548. goto error;
  549. }
  550. dma->chan_rx = dma_request_slave_channel_compat(mask, filter, pdata,
  551. dev->dev, "rx");
  552. if (!dma->chan_rx) {
  553. dev_err(dev->dev, "can't get a DMA channel for rx\n");
  554. ret = -EBUSY;
  555. goto error;
  556. }
  557. slave_config.direction = DMA_MEM_TO_DEV;
  558. if (dmaengine_slave_config(dma->chan_tx, &slave_config)) {
  559. dev_err(dev->dev, "failed to configure tx channel\n");
  560. ret = -EINVAL;
  561. goto error;
  562. }
  563. slave_config.direction = DMA_DEV_TO_MEM;
  564. if (dmaengine_slave_config(dma->chan_rx, &slave_config)) {
  565. dev_err(dev->dev, "failed to configure rx channel\n");
  566. ret = -EINVAL;
  567. goto error;
  568. }
  569. sg_init_table(&dma->sg, 1);
  570. dma->buf_mapped = false;
  571. dma->xfer_in_progress = false;
  572. dev_info(dev->dev, "using %s (tx) and %s (rx) for DMA transfers\n",
  573. dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx));
  574. return ret;
  575. error:
  576. dev_info(dev->dev, "can't use DMA\n");
  577. if (dma->chan_rx)
  578. dma_release_channel(dma->chan_rx);
  579. if (dma->chan_tx)
  580. dma_release_channel(dma->chan_tx);
  581. return ret;
  582. }
  583. static struct at91_twi_pdata *at91_twi_get_driver_data(
  584. struct platform_device *pdev)
  585. {
  586. if (pdev->dev.of_node) {
  587. const struct of_device_id *match;
  588. match = of_match_node(atmel_twi_dt_ids, pdev->dev.of_node);
  589. if (!match)
  590. return NULL;
  591. return (struct at91_twi_pdata *)match->data;
  592. }
  593. return (struct at91_twi_pdata *) platform_get_device_id(pdev)->driver_data;
  594. }
  595. static int at91_twi_probe(struct platform_device *pdev)
  596. {
  597. struct at91_twi_dev *dev;
  598. struct resource *mem;
  599. int rc;
  600. u32 phy_addr;
  601. dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
  602. if (!dev)
  603. return -ENOMEM;
  604. init_completion(&dev->cmd_complete);
  605. dev->dev = &pdev->dev;
  606. mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  607. if (!mem)
  608. return -ENODEV;
  609. phy_addr = mem->start;
  610. dev->pdata = at91_twi_get_driver_data(pdev);
  611. if (!dev->pdata)
  612. return -ENODEV;
  613. dev->base = devm_ioremap_resource(&pdev->dev, mem);
  614. if (IS_ERR(dev->base))
  615. return PTR_ERR(dev->base);
  616. dev->irq = platform_get_irq(pdev, 0);
  617. if (dev->irq < 0)
  618. return dev->irq;
  619. rc = devm_request_irq(&pdev->dev, dev->irq, atmel_twi_interrupt, 0,
  620. dev_name(dev->dev), dev);
  621. if (rc) {
  622. dev_err(dev->dev, "Cannot get irq %d: %d\n", dev->irq, rc);
  623. return rc;
  624. }
  625. platform_set_drvdata(pdev, dev);
  626. dev->clk = devm_clk_get(dev->dev, NULL);
  627. if (IS_ERR(dev->clk)) {
  628. dev_err(dev->dev, "no clock defined\n");
  629. return -ENODEV;
  630. }
  631. clk_prepare_enable(dev->clk);
  632. if (dev->pdata->has_dma_support) {
  633. if (at91_twi_configure_dma(dev, phy_addr) == 0)
  634. dev->use_dma = true;
  635. }
  636. at91_calc_twi_clock(dev, TWI_CLK_HZ);
  637. at91_init_twi_bus(dev);
  638. snprintf(dev->adapter.name, sizeof(dev->adapter.name), "AT91");
  639. i2c_set_adapdata(&dev->adapter, dev);
  640. dev->adapter.owner = THIS_MODULE;
  641. dev->adapter.class = I2C_CLASS_HWMON;
  642. dev->adapter.algo = &at91_twi_algorithm;
  643. dev->adapter.dev.parent = dev->dev;
  644. dev->adapter.nr = pdev->id;
  645. dev->adapter.timeout = AT91_I2C_TIMEOUT;
  646. dev->adapter.dev.of_node = pdev->dev.of_node;
  647. rc = i2c_add_numbered_adapter(&dev->adapter);
  648. if (rc) {
  649. dev_err(dev->dev, "Adapter %s registration failed\n",
  650. dev->adapter.name);
  651. clk_disable_unprepare(dev->clk);
  652. return rc;
  653. }
  654. dev_info(dev->dev, "AT91 i2c bus driver.\n");
  655. return 0;
  656. }
  657. static int at91_twi_remove(struct platform_device *pdev)
  658. {
  659. struct at91_twi_dev *dev = platform_get_drvdata(pdev);
  660. i2c_del_adapter(&dev->adapter);
  661. clk_disable_unprepare(dev->clk);
  662. return 0;
  663. }
  664. #ifdef CONFIG_PM
  665. static int at91_twi_runtime_suspend(struct device *dev)
  666. {
  667. struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
  668. clk_disable(twi_dev->clk);
  669. return 0;
  670. }
  671. static int at91_twi_runtime_resume(struct device *dev)
  672. {
  673. struct at91_twi_dev *twi_dev = dev_get_drvdata(dev);
  674. return clk_enable(twi_dev->clk);
  675. }
  676. static const struct dev_pm_ops at91_twi_pm = {
  677. .runtime_suspend = at91_twi_runtime_suspend,
  678. .runtime_resume = at91_twi_runtime_resume,
  679. };
  680. #define at91_twi_pm_ops (&at91_twi_pm)
  681. #else
  682. #define at91_twi_pm_ops NULL
  683. #endif
  684. static struct platform_driver at91_twi_driver = {
  685. .probe = at91_twi_probe,
  686. .remove = at91_twi_remove,
  687. .id_table = at91_twi_devtypes,
  688. .driver = {
  689. .name = "at91_i2c",
  690. .owner = THIS_MODULE,
  691. .of_match_table = of_match_ptr(atmel_twi_dt_ids),
  692. .pm = at91_twi_pm_ops,
  693. },
  694. };
  695. static int __init at91_twi_init(void)
  696. {
  697. return platform_driver_register(&at91_twi_driver);
  698. }
  699. static void __exit at91_twi_exit(void)
  700. {
  701. platform_driver_unregister(&at91_twi_driver);
  702. }
  703. subsys_initcall(at91_twi_init);
  704. module_exit(at91_twi_exit);
  705. MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>");
  706. MODULE_DESCRIPTION("I2C (TWI) driver for Atmel AT91");
  707. MODULE_LICENSE("GPL");
  708. MODULE_ALIAS("platform:at91_i2c");