at91_mci.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249
  1. /*
  2. * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
  3. *
  4. * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
  5. *
  6. * Copyright (C) 2006 Malcolm Noyes
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. /*
  13. This is the AT91 MCI driver that has been tested with both MMC cards
  14. and SD-cards. Boards that support write protect are now supported.
  15. The CCAT91SBC001 board does not support SD cards.
  16. The three entry points are at91_mci_request, at91_mci_set_ios
  17. and at91_mci_get_ro.
  18. SET IOS
  19. This configures the device to put it into the correct mode and clock speed
  20. required.
  21. MCI REQUEST
  22. MCI request processes the commands sent in the mmc_request structure. This
  23. can consist of a processing command and a stop command in the case of
  24. multiple block transfers.
  25. There are three main types of request, commands, reads and writes.
  26. Commands are straight forward. The command is submitted to the controller and
  27. the request function returns. When the controller generates an interrupt to indicate
  28. the command is finished, the response to the command are read and the mmc_request_done
  29. function called to end the request.
  30. Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
  31. controller to manage the transfers.
  32. A read is done from the controller directly to the scatterlist passed in from the request.
  33. Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
  34. swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
  35. The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
  36. A write is slightly different in that the bytes to write are read from the scatterlist
  37. into a dma memory buffer (this is in case the source buffer should be read only). The
  38. entire write buffer is then done from this single dma memory buffer.
  39. The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
  40. GET RO
  41. Gets the status of the write protect pin, if available.
  42. */
  43. #include <linux/module.h>
  44. #include <linux/moduleparam.h>
  45. #include <linux/init.h>
  46. #include <linux/ioport.h>
  47. #include <linux/platform_device.h>
  48. #include <linux/interrupt.h>
  49. #include <linux/blkdev.h>
  50. #include <linux/delay.h>
  51. #include <linux/err.h>
  52. #include <linux/dma-mapping.h>
  53. #include <linux/clk.h>
  54. #include <linux/atmel_pdc.h>
  55. #include <linux/mmc/host.h>
  56. #include <asm/io.h>
  57. #include <asm/irq.h>
  58. #include <asm/gpio.h>
  59. #include <mach/board.h>
  60. #include <mach/cpu.h>
  61. #include <mach/at91_mci.h>
  62. #define DRIVER_NAME "at91_mci"
  63. #define FL_SENT_COMMAND (1 << 0)
  64. #define FL_SENT_STOP (1 << 1)
  65. #define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
  66. | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
  67. | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
  68. #define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
  69. #define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
  70. /*
  71. * Low level type for this driver
  72. */
  73. struct at91mci_host
  74. {
  75. struct mmc_host *mmc;
  76. struct mmc_command *cmd;
  77. struct mmc_request *request;
  78. void __iomem *baseaddr;
  79. int irq;
  80. struct at91_mmc_data *board;
  81. int present;
  82. struct clk *mci_clk;
  83. /*
  84. * Flag indicating when the command has been sent. This is used to
  85. * work out whether or not to send the stop
  86. */
  87. unsigned int flags;
  88. /* flag for current bus settings */
  89. u32 bus_mode;
  90. /* DMA buffer used for transmitting */
  91. unsigned int* buffer;
  92. dma_addr_t physical_address;
  93. unsigned int total_length;
  94. /* Latest in the scatterlist that has been enabled for transfer, but not freed */
  95. int in_use_index;
  96. /* Latest in the scatterlist that has been enabled for transfer */
  97. int transfer_index;
  98. /* Timer for timeouts */
  99. struct timer_list timer;
  100. };
  101. /*
  102. * Reset the controller and restore most of the state
  103. */
  104. static void at91_reset_host(struct at91mci_host *host)
  105. {
  106. unsigned long flags;
  107. u32 mr;
  108. u32 sdcr;
  109. u32 dtor;
  110. u32 imr;
  111. local_irq_save(flags);
  112. imr = at91_mci_read(host, AT91_MCI_IMR);
  113. at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
  114. /* save current state */
  115. mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
  116. sdcr = at91_mci_read(host, AT91_MCI_SDCR);
  117. dtor = at91_mci_read(host, AT91_MCI_DTOR);
  118. /* reset the controller */
  119. at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
  120. /* restore state */
  121. at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
  122. at91_mci_write(host, AT91_MCI_MR, mr);
  123. at91_mci_write(host, AT91_MCI_SDCR, sdcr);
  124. at91_mci_write(host, AT91_MCI_DTOR, dtor);
  125. at91_mci_write(host, AT91_MCI_IER, imr);
  126. /* make sure sdio interrupts will fire */
  127. at91_mci_read(host, AT91_MCI_SR);
  128. local_irq_restore(flags);
  129. }
  130. static void at91_timeout_timer(unsigned long data)
  131. {
  132. struct at91mci_host *host;
  133. host = (struct at91mci_host *)data;
  134. if (host->request) {
  135. dev_err(host->mmc->parent, "Timeout waiting end of packet\n");
  136. if (host->cmd && host->cmd->data) {
  137. host->cmd->data->error = -ETIMEDOUT;
  138. } else {
  139. if (host->cmd)
  140. host->cmd->error = -ETIMEDOUT;
  141. else
  142. host->request->cmd->error = -ETIMEDOUT;
  143. }
  144. at91_reset_host(host);
  145. mmc_request_done(host->mmc, host->request);
  146. }
  147. }
  148. /*
  149. * Copy from sg to a dma block - used for transfers
  150. */
  151. static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
  152. {
  153. unsigned int len, i, size;
  154. unsigned *dmabuf = host->buffer;
  155. size = data->blksz * data->blocks;
  156. len = data->sg_len;
  157. /* AT91SAM926[0/3] Data Write Operation and number of bytes erratum */
  158. if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
  159. if (host->total_length == 12)
  160. memset(dmabuf, 0, 12);
  161. /*
  162. * Just loop through all entries. Size might not
  163. * be the entire list though so make sure that
  164. * we do not transfer too much.
  165. */
  166. for (i = 0; i < len; i++) {
  167. struct scatterlist *sg;
  168. int amount;
  169. unsigned int *sgbuffer;
  170. sg = &data->sg[i];
  171. sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
  172. amount = min(size, sg->length);
  173. size -= amount;
  174. if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
  175. int index;
  176. for (index = 0; index < (amount / 4); index++)
  177. *dmabuf++ = swab32(sgbuffer[index]);
  178. } else {
  179. memcpy(dmabuf, sgbuffer, amount);
  180. dmabuf += amount;
  181. }
  182. kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
  183. if (size == 0)
  184. break;
  185. }
  186. /*
  187. * Check that we didn't get a request to transfer
  188. * more data than can fit into the SG list.
  189. */
  190. BUG_ON(size != 0);
  191. }
  192. /*
  193. * Prepare a dma read
  194. */
  195. static void at91_mci_pre_dma_read(struct at91mci_host *host)
  196. {
  197. int i;
  198. struct scatterlist *sg;
  199. struct mmc_command *cmd;
  200. struct mmc_data *data;
  201. pr_debug("pre dma read\n");
  202. cmd = host->cmd;
  203. if (!cmd) {
  204. pr_debug("no command\n");
  205. return;
  206. }
  207. data = cmd->data;
  208. if (!data) {
  209. pr_debug("no data\n");
  210. return;
  211. }
  212. for (i = 0; i < 2; i++) {
  213. /* nothing left to transfer */
  214. if (host->transfer_index >= data->sg_len) {
  215. pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
  216. break;
  217. }
  218. /* Check to see if this needs filling */
  219. if (i == 0) {
  220. if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
  221. pr_debug("Transfer active in current\n");
  222. continue;
  223. }
  224. }
  225. else {
  226. if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
  227. pr_debug("Transfer active in next\n");
  228. continue;
  229. }
  230. }
  231. /* Setup the next transfer */
  232. pr_debug("Using transfer index %d\n", host->transfer_index);
  233. sg = &data->sg[host->transfer_index++];
  234. pr_debug("sg = %p\n", sg);
  235. sg->dma_address = dma_map_page(NULL, sg_page(sg), sg->offset, sg->length, DMA_FROM_DEVICE);
  236. pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
  237. if (i == 0) {
  238. at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
  239. at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
  240. }
  241. else {
  242. at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
  243. at91_mci_write(host, ATMEL_PDC_RNCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
  244. }
  245. }
  246. pr_debug("pre dma read done\n");
  247. }
  248. /*
  249. * Handle after a dma read
  250. */
  251. static void at91_mci_post_dma_read(struct at91mci_host *host)
  252. {
  253. struct mmc_command *cmd;
  254. struct mmc_data *data;
  255. pr_debug("post dma read\n");
  256. cmd = host->cmd;
  257. if (!cmd) {
  258. pr_debug("no command\n");
  259. return;
  260. }
  261. data = cmd->data;
  262. if (!data) {
  263. pr_debug("no data\n");
  264. return;
  265. }
  266. while (host->in_use_index < host->transfer_index) {
  267. struct scatterlist *sg;
  268. pr_debug("finishing index %d\n", host->in_use_index);
  269. sg = &data->sg[host->in_use_index++];
  270. pr_debug("Unmapping page %08X\n", sg->dma_address);
  271. dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
  272. if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
  273. unsigned int *buffer;
  274. int index;
  275. /* Swap the contents of the buffer */
  276. buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
  277. pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
  278. for (index = 0; index < (sg->length / 4); index++)
  279. buffer[index] = swab32(buffer[index]);
  280. kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
  281. }
  282. flush_dcache_page(sg_page(sg));
  283. data->bytes_xfered += sg->length;
  284. }
  285. /* Is there another transfer to trigger? */
  286. if (host->transfer_index < data->sg_len)
  287. at91_mci_pre_dma_read(host);
  288. else {
  289. at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
  290. at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
  291. }
  292. pr_debug("post dma read done\n");
  293. }
  294. /*
  295. * Handle transmitted data
  296. */
  297. static void at91_mci_handle_transmitted(struct at91mci_host *host)
  298. {
  299. struct mmc_command *cmd;
  300. struct mmc_data *data;
  301. pr_debug("Handling the transmit\n");
  302. /* Disable the transfer */
  303. at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
  304. /* Now wait for cmd ready */
  305. at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
  306. cmd = host->cmd;
  307. if (!cmd) return;
  308. data = cmd->data;
  309. if (!data) return;
  310. if (cmd->data->blocks > 1) {
  311. pr_debug("multiple write : wait for BLKE...\n");
  312. at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
  313. } else
  314. at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
  315. }
  316. /*
  317. * Update bytes tranfered count during a write operation
  318. */
  319. static void at91_mci_update_bytes_xfered(struct at91mci_host *host)
  320. {
  321. struct mmc_data *data;
  322. /* always deal with the effective request (and not the current cmd) */
  323. if (host->request->cmd && host->request->cmd->error != 0)
  324. return;
  325. if (host->request->data) {
  326. data = host->request->data;
  327. if (data->flags & MMC_DATA_WRITE) {
  328. /* card is in IDLE mode now */
  329. pr_debug("-> bytes_xfered %d, total_length = %d\n",
  330. data->bytes_xfered, host->total_length);
  331. data->bytes_xfered = data->blksz * data->blocks;
  332. }
  333. }
  334. }
  335. /*Handle after command sent ready*/
  336. static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
  337. {
  338. if (!host->cmd)
  339. return 1;
  340. else if (!host->cmd->data) {
  341. if (host->flags & FL_SENT_STOP) {
  342. /*After multi block write, we must wait for NOTBUSY*/
  343. at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
  344. } else return 1;
  345. } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
  346. /*After sendding multi-block-write command, start DMA transfer*/
  347. at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE);
  348. at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
  349. }
  350. /* command not completed, have to wait */
  351. return 0;
  352. }
  353. /*
  354. * Enable the controller
  355. */
  356. static void at91_mci_enable(struct at91mci_host *host)
  357. {
  358. unsigned int mr;
  359. at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
  360. at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
  361. at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
  362. mr = AT91_MCI_PDCMODE | 0x34a;
  363. if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
  364. mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
  365. at91_mci_write(host, AT91_MCI_MR, mr);
  366. /* use Slot A or B (only one at same time) */
  367. at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
  368. }
  369. /*
  370. * Disable the controller
  371. */
  372. static void at91_mci_disable(struct at91mci_host *host)
  373. {
  374. at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
  375. }
  376. /*
  377. * Send a command
  378. */
  379. static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
  380. {
  381. unsigned int cmdr, mr;
  382. unsigned int block_length;
  383. struct mmc_data *data = cmd->data;
  384. unsigned int blocks;
  385. unsigned int ier = 0;
  386. host->cmd = cmd;
  387. /* Needed for leaving busy state before CMD1 */
  388. if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
  389. pr_debug("Clearing timeout\n");
  390. at91_mci_write(host, AT91_MCI_ARGR, 0);
  391. at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
  392. while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
  393. /* spin */
  394. pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
  395. }
  396. }
  397. cmdr = cmd->opcode;
  398. if (mmc_resp_type(cmd) == MMC_RSP_NONE)
  399. cmdr |= AT91_MCI_RSPTYP_NONE;
  400. else {
  401. /* if a response is expected then allow maximum response latancy */
  402. cmdr |= AT91_MCI_MAXLAT;
  403. /* set 136 bit response for R2, 48 bit response otherwise */
  404. if (mmc_resp_type(cmd) == MMC_RSP_R2)
  405. cmdr |= AT91_MCI_RSPTYP_136;
  406. else
  407. cmdr |= AT91_MCI_RSPTYP_48;
  408. }
  409. if (data) {
  410. if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) {
  411. if (data->blksz & 0x3) {
  412. pr_debug("Unsupported block size\n");
  413. cmd->error = -EINVAL;
  414. mmc_request_done(host->mmc, host->request);
  415. return;
  416. }
  417. if (data->flags & MMC_DATA_STREAM) {
  418. pr_debug("Stream commands not supported\n");
  419. cmd->error = -EINVAL;
  420. mmc_request_done(host->mmc, host->request);
  421. return;
  422. }
  423. }
  424. block_length = data->blksz;
  425. blocks = data->blocks;
  426. /* always set data start - also set direction flag for read */
  427. if (data->flags & MMC_DATA_READ)
  428. cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
  429. else if (data->flags & MMC_DATA_WRITE)
  430. cmdr |= AT91_MCI_TRCMD_START;
  431. if (data->flags & MMC_DATA_STREAM)
  432. cmdr |= AT91_MCI_TRTYP_STREAM;
  433. if (data->blocks > 1)
  434. cmdr |= AT91_MCI_TRTYP_MULTIPLE;
  435. }
  436. else {
  437. block_length = 0;
  438. blocks = 0;
  439. }
  440. if (host->flags & FL_SENT_STOP)
  441. cmdr |= AT91_MCI_TRCMD_STOP;
  442. if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
  443. cmdr |= AT91_MCI_OPDCMD;
  444. /*
  445. * Set the arguments and send the command
  446. */
  447. pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
  448. cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
  449. if (!data) {
  450. at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
  451. at91_mci_write(host, ATMEL_PDC_RPR, 0);
  452. at91_mci_write(host, ATMEL_PDC_RCR, 0);
  453. at91_mci_write(host, ATMEL_PDC_RNPR, 0);
  454. at91_mci_write(host, ATMEL_PDC_RNCR, 0);
  455. at91_mci_write(host, ATMEL_PDC_TPR, 0);
  456. at91_mci_write(host, ATMEL_PDC_TCR, 0);
  457. at91_mci_write(host, ATMEL_PDC_TNPR, 0);
  458. at91_mci_write(host, ATMEL_PDC_TNCR, 0);
  459. ier = AT91_MCI_CMDRDY;
  460. } else {
  461. /* zero block length and PDC mode */
  462. mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff;
  463. mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0;
  464. mr |= (block_length << 16);
  465. mr |= AT91_MCI_PDCMODE;
  466. at91_mci_write(host, AT91_MCI_MR, mr);
  467. if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261()))
  468. at91_mci_write(host, AT91_MCI_BLKR,
  469. AT91_MCI_BLKR_BCNT(blocks) |
  470. AT91_MCI_BLKR_BLKLEN(block_length));
  471. /*
  472. * Disable the PDC controller
  473. */
  474. at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
  475. if (cmdr & AT91_MCI_TRCMD_START) {
  476. data->bytes_xfered = 0;
  477. host->transfer_index = 0;
  478. host->in_use_index = 0;
  479. if (cmdr & AT91_MCI_TRDIR) {
  480. /*
  481. * Handle a read
  482. */
  483. host->buffer = NULL;
  484. host->total_length = 0;
  485. at91_mci_pre_dma_read(host);
  486. ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
  487. }
  488. else {
  489. /*
  490. * Handle a write
  491. */
  492. host->total_length = block_length * blocks;
  493. /*
  494. * AT91SAM926[0/3] Data Write Operation and
  495. * number of bytes erratum
  496. */
  497. if (cpu_is_at91sam9260 () || cpu_is_at91sam9263())
  498. if (host->total_length < 12)
  499. host->total_length = 12;
  500. host->buffer = kmalloc(host->total_length, GFP_KERNEL);
  501. if (!host->buffer) {
  502. pr_debug("Can't alloc tx buffer\n");
  503. cmd->error = -ENOMEM;
  504. mmc_request_done(host->mmc, host->request);
  505. return;
  506. }
  507. at91_mci_sg_to_dma(host, data);
  508. host->physical_address = dma_map_single(NULL,
  509. host->buffer, host->total_length,
  510. DMA_TO_DEVICE);
  511. pr_debug("Transmitting %d bytes\n", host->total_length);
  512. at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
  513. at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ?
  514. host->total_length : host->total_length / 4);
  515. ier = AT91_MCI_CMDRDY;
  516. }
  517. }
  518. }
  519. /*
  520. * Send the command and then enable the PDC - not the other way round as
  521. * the data sheet says
  522. */
  523. at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
  524. at91_mci_write(host, AT91_MCI_CMDR, cmdr);
  525. if (cmdr & AT91_MCI_TRCMD_START) {
  526. if (cmdr & AT91_MCI_TRDIR)
  527. at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
  528. }
  529. /* Enable selected interrupts */
  530. at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
  531. }
  532. /*
  533. * Process the next step in the request
  534. */
  535. static void at91_mci_process_next(struct at91mci_host *host)
  536. {
  537. if (!(host->flags & FL_SENT_COMMAND)) {
  538. host->flags |= FL_SENT_COMMAND;
  539. at91_mci_send_command(host, host->request->cmd);
  540. }
  541. else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
  542. host->flags |= FL_SENT_STOP;
  543. at91_mci_send_command(host, host->request->stop);
  544. } else {
  545. del_timer(&host->timer);
  546. /* the at91rm9200 mci controller hangs after some transfers,
  547. * and the workaround is to reset it after each transfer.
  548. */
  549. if (cpu_is_at91rm9200())
  550. at91_reset_host(host);
  551. mmc_request_done(host->mmc, host->request);
  552. }
  553. }
  554. /*
  555. * Handle a command that has been completed
  556. */
  557. static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status)
  558. {
  559. struct mmc_command *cmd = host->cmd;
  560. struct mmc_data *data = cmd->data;
  561. at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
  562. cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
  563. cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
  564. cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
  565. cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
  566. if (host->buffer) {
  567. dma_unmap_single(NULL,
  568. host->physical_address, host->total_length,
  569. DMA_TO_DEVICE);
  570. kfree(host->buffer);
  571. host->buffer = NULL;
  572. }
  573. pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n",
  574. status, at91_mci_read(host, AT91_MCI_SR),
  575. cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
  576. if (status & AT91_MCI_ERRORS) {
  577. if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
  578. cmd->error = 0;
  579. }
  580. else {
  581. if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) {
  582. if (data) {
  583. if (status & AT91_MCI_DTOE)
  584. data->error = -ETIMEDOUT;
  585. else if (status & AT91_MCI_DCRCE)
  586. data->error = -EILSEQ;
  587. }
  588. } else {
  589. if (status & AT91_MCI_RTOE)
  590. cmd->error = -ETIMEDOUT;
  591. else if (status & AT91_MCI_RCRCE)
  592. cmd->error = -EILSEQ;
  593. else
  594. cmd->error = -EIO;
  595. }
  596. pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n",
  597. cmd->error, data ? data->error : 0,
  598. cmd->opcode, cmd->retries);
  599. }
  600. }
  601. else
  602. cmd->error = 0;
  603. at91_mci_process_next(host);
  604. }
  605. /*
  606. * Handle an MMC request
  607. */
  608. static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
  609. {
  610. struct at91mci_host *host = mmc_priv(mmc);
  611. host->request = mrq;
  612. host->flags = 0;
  613. mod_timer(&host->timer, jiffies + HZ);
  614. at91_mci_process_next(host);
  615. }
  616. /*
  617. * Set the IOS
  618. */
  619. static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  620. {
  621. int clkdiv;
  622. struct at91mci_host *host = mmc_priv(mmc);
  623. unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
  624. host->bus_mode = ios->bus_mode;
  625. if (ios->clock == 0) {
  626. /* Disable the MCI controller */
  627. at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
  628. clkdiv = 0;
  629. }
  630. else {
  631. /* Enable the MCI controller */
  632. at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
  633. if ((at91_master_clock % (ios->clock * 2)) == 0)
  634. clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
  635. else
  636. clkdiv = (at91_master_clock / ios->clock) / 2;
  637. pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
  638. at91_master_clock / (2 * (clkdiv + 1)));
  639. }
  640. if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
  641. pr_debug("MMC: Setting controller bus width to 4\n");
  642. at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
  643. }
  644. else {
  645. pr_debug("MMC: Setting controller bus width to 1\n");
  646. at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
  647. }
  648. /* Set the clock divider */
  649. at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
  650. /* maybe switch power to the card */
  651. if (host->board->vcc_pin) {
  652. switch (ios->power_mode) {
  653. case MMC_POWER_OFF:
  654. gpio_set_value(host->board->vcc_pin, 0);
  655. break;
  656. case MMC_POWER_UP:
  657. gpio_set_value(host->board->vcc_pin, 1);
  658. break;
  659. case MMC_POWER_ON:
  660. break;
  661. default:
  662. WARN_ON(1);
  663. }
  664. }
  665. }
  666. /*
  667. * Handle an interrupt
  668. */
  669. static irqreturn_t at91_mci_irq(int irq, void *devid)
  670. {
  671. struct at91mci_host *host = devid;
  672. int completed = 0;
  673. unsigned int int_status, int_mask;
  674. int_status = at91_mci_read(host, AT91_MCI_SR);
  675. int_mask = at91_mci_read(host, AT91_MCI_IMR);
  676. pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
  677. int_status & int_mask);
  678. int_status = int_status & int_mask;
  679. if (int_status & AT91_MCI_ERRORS) {
  680. completed = 1;
  681. if (int_status & AT91_MCI_UNRE)
  682. pr_debug("MMC: Underrun error\n");
  683. if (int_status & AT91_MCI_OVRE)
  684. pr_debug("MMC: Overrun error\n");
  685. if (int_status & AT91_MCI_DTOE)
  686. pr_debug("MMC: Data timeout\n");
  687. if (int_status & AT91_MCI_DCRCE)
  688. pr_debug("MMC: CRC error in data\n");
  689. if (int_status & AT91_MCI_RTOE)
  690. pr_debug("MMC: Response timeout\n");
  691. if (int_status & AT91_MCI_RENDE)
  692. pr_debug("MMC: Response end bit error\n");
  693. if (int_status & AT91_MCI_RCRCE)
  694. pr_debug("MMC: Response CRC error\n");
  695. if (int_status & AT91_MCI_RDIRE)
  696. pr_debug("MMC: Response direction error\n");
  697. if (int_status & AT91_MCI_RINDE)
  698. pr_debug("MMC: Response index error\n");
  699. } else {
  700. /* Only continue processing if no errors */
  701. if (int_status & AT91_MCI_TXBUFE) {
  702. pr_debug("TX buffer empty\n");
  703. at91_mci_handle_transmitted(host);
  704. }
  705. if (int_status & AT91_MCI_ENDRX) {
  706. pr_debug("ENDRX\n");
  707. at91_mci_post_dma_read(host);
  708. }
  709. if (int_status & AT91_MCI_RXBUFF) {
  710. pr_debug("RX buffer full\n");
  711. at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
  712. at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
  713. completed = 1;
  714. }
  715. if (int_status & AT91_MCI_ENDTX)
  716. pr_debug("Transmit has ended\n");
  717. if (int_status & AT91_MCI_NOTBUSY) {
  718. pr_debug("Card is ready\n");
  719. at91_mci_update_bytes_xfered(host);
  720. completed = 1;
  721. }
  722. if (int_status & AT91_MCI_DTIP)
  723. pr_debug("Data transfer in progress\n");
  724. if (int_status & AT91_MCI_BLKE) {
  725. pr_debug("Block transfer has ended\n");
  726. if (host->request->data && host->request->data->blocks > 1) {
  727. /* multi block write : complete multi write
  728. * command and send stop */
  729. completed = 1;
  730. } else {
  731. at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
  732. }
  733. }
  734. if (int_status & AT91_MCI_SDIOIRQA)
  735. mmc_signal_sdio_irq(host->mmc);
  736. if (int_status & AT91_MCI_SDIOIRQB)
  737. mmc_signal_sdio_irq(host->mmc);
  738. if (int_status & AT91_MCI_TXRDY)
  739. pr_debug("Ready to transmit\n");
  740. if (int_status & AT91_MCI_RXRDY)
  741. pr_debug("Ready to receive\n");
  742. if (int_status & AT91_MCI_CMDRDY) {
  743. pr_debug("Command ready\n");
  744. completed = at91_mci_handle_cmdrdy(host);
  745. }
  746. }
  747. if (completed) {
  748. pr_debug("Completed command\n");
  749. at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
  750. at91_mci_completed_command(host, int_status);
  751. } else
  752. at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
  753. return IRQ_HANDLED;
  754. }
  755. static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
  756. {
  757. struct at91mci_host *host = _host;
  758. int present = !gpio_get_value(irq_to_gpio(irq));
  759. /*
  760. * we expect this irq on both insert and remove,
  761. * and use a short delay to debounce.
  762. */
  763. if (present != host->present) {
  764. host->present = present;
  765. pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
  766. present ? "insert" : "remove");
  767. if (!present) {
  768. pr_debug("****** Resetting SD-card bus width ******\n");
  769. at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
  770. }
  771. mmc_detect_change(host->mmc, msecs_to_jiffies(100));
  772. }
  773. return IRQ_HANDLED;
  774. }
  775. static int at91_mci_get_ro(struct mmc_host *mmc)
  776. {
  777. struct at91mci_host *host = mmc_priv(mmc);
  778. if (host->board->wp_pin)
  779. return !!gpio_get_value(host->board->wp_pin);
  780. /*
  781. * Board doesn't support read only detection; let the mmc core
  782. * decide what to do.
  783. */
  784. return -ENOSYS;
  785. }
  786. static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable)
  787. {
  788. struct at91mci_host *host = mmc_priv(mmc);
  789. pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc),
  790. host->board->slot_b ? 'B':'A', enable ? "enable" : "disable");
  791. at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR,
  792. host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA);
  793. }
  794. static const struct mmc_host_ops at91_mci_ops = {
  795. .request = at91_mci_request,
  796. .set_ios = at91_mci_set_ios,
  797. .get_ro = at91_mci_get_ro,
  798. .enable_sdio_irq = at91_mci_enable_sdio_irq,
  799. };
  800. /*
  801. * Probe for the device
  802. */
  803. static int __init at91_mci_probe(struct platform_device *pdev)
  804. {
  805. struct mmc_host *mmc;
  806. struct at91mci_host *host;
  807. struct resource *res;
  808. int ret;
  809. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  810. if (!res)
  811. return -ENXIO;
  812. if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
  813. return -EBUSY;
  814. mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
  815. if (!mmc) {
  816. ret = -ENOMEM;
  817. dev_dbg(&pdev->dev, "couldn't allocate mmc host\n");
  818. goto fail6;
  819. }
  820. mmc->ops = &at91_mci_ops;
  821. mmc->f_min = 375000;
  822. mmc->f_max = 25000000;
  823. mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
  824. mmc->caps = MMC_CAP_SDIO_IRQ;
  825. mmc->max_blk_size = 4095;
  826. mmc->max_blk_count = mmc->max_req_size;
  827. host = mmc_priv(mmc);
  828. host->mmc = mmc;
  829. host->buffer = NULL;
  830. host->bus_mode = 0;
  831. host->board = pdev->dev.platform_data;
  832. if (host->board->wire4) {
  833. if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
  834. mmc->caps |= MMC_CAP_4_BIT_DATA;
  835. else
  836. dev_warn(&pdev->dev, "4 wire bus mode not supported"
  837. " - using 1 wire\n");
  838. }
  839. /*
  840. * Reserve GPIOs ... board init code makes sure these pins are set
  841. * up as GPIOs with the right direction (input, except for vcc)
  842. */
  843. if (host->board->det_pin) {
  844. ret = gpio_request(host->board->det_pin, "mmc_detect");
  845. if (ret < 0) {
  846. dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
  847. goto fail5;
  848. }
  849. }
  850. if (host->board->wp_pin) {
  851. ret = gpio_request(host->board->wp_pin, "mmc_wp");
  852. if (ret < 0) {
  853. dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n");
  854. goto fail4;
  855. }
  856. }
  857. if (host->board->vcc_pin) {
  858. ret = gpio_request(host->board->vcc_pin, "mmc_vcc");
  859. if (ret < 0) {
  860. dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n");
  861. goto fail3;
  862. }
  863. }
  864. /*
  865. * Get Clock
  866. */
  867. host->mci_clk = clk_get(&pdev->dev, "mci_clk");
  868. if (IS_ERR(host->mci_clk)) {
  869. ret = -ENODEV;
  870. dev_dbg(&pdev->dev, "no mci_clk?\n");
  871. goto fail2;
  872. }
  873. /*
  874. * Map I/O region
  875. */
  876. host->baseaddr = ioremap(res->start, res->end - res->start + 1);
  877. if (!host->baseaddr) {
  878. ret = -ENOMEM;
  879. goto fail1;
  880. }
  881. /*
  882. * Reset hardware
  883. */
  884. clk_enable(host->mci_clk); /* Enable the peripheral clock */
  885. at91_mci_disable(host);
  886. at91_mci_enable(host);
  887. /*
  888. * Allocate the MCI interrupt
  889. */
  890. host->irq = platform_get_irq(pdev, 0);
  891. ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED,
  892. mmc_hostname(mmc), host);
  893. if (ret) {
  894. dev_dbg(&pdev->dev, "request MCI interrupt failed\n");
  895. goto fail0;
  896. }
  897. setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host);
  898. platform_set_drvdata(pdev, mmc);
  899. /*
  900. * Add host to MMC layer
  901. */
  902. if (host->board->det_pin) {
  903. host->present = !gpio_get_value(host->board->det_pin);
  904. }
  905. else
  906. host->present = -1;
  907. mmc_add_host(mmc);
  908. /*
  909. * monitor card insertion/removal if we can
  910. */
  911. if (host->board->det_pin) {
  912. ret = request_irq(gpio_to_irq(host->board->det_pin),
  913. at91_mmc_det_irq, 0, mmc_hostname(mmc), host);
  914. if (ret)
  915. dev_warn(&pdev->dev, "request MMC detect irq failed\n");
  916. else
  917. device_init_wakeup(&pdev->dev, 1);
  918. }
  919. pr_debug("Added MCI driver\n");
  920. return 0;
  921. fail0:
  922. clk_disable(host->mci_clk);
  923. iounmap(host->baseaddr);
  924. fail1:
  925. clk_put(host->mci_clk);
  926. fail2:
  927. if (host->board->vcc_pin)
  928. gpio_free(host->board->vcc_pin);
  929. fail3:
  930. if (host->board->wp_pin)
  931. gpio_free(host->board->wp_pin);
  932. fail4:
  933. if (host->board->det_pin)
  934. gpio_free(host->board->det_pin);
  935. fail5:
  936. mmc_free_host(mmc);
  937. fail6:
  938. release_mem_region(res->start, res->end - res->start + 1);
  939. dev_err(&pdev->dev, "probe failed, err %d\n", ret);
  940. return ret;
  941. }
  942. /*
  943. * Remove a device
  944. */
  945. static int __exit at91_mci_remove(struct platform_device *pdev)
  946. {
  947. struct mmc_host *mmc = platform_get_drvdata(pdev);
  948. struct at91mci_host *host;
  949. struct resource *res;
  950. if (!mmc)
  951. return -1;
  952. host = mmc_priv(mmc);
  953. if (host->board->det_pin) {
  954. if (device_can_wakeup(&pdev->dev))
  955. free_irq(gpio_to_irq(host->board->det_pin), host);
  956. device_init_wakeup(&pdev->dev, 0);
  957. gpio_free(host->board->det_pin);
  958. }
  959. at91_mci_disable(host);
  960. del_timer_sync(&host->timer);
  961. mmc_remove_host(mmc);
  962. free_irq(host->irq, host);
  963. clk_disable(host->mci_clk); /* Disable the peripheral clock */
  964. clk_put(host->mci_clk);
  965. if (host->board->vcc_pin)
  966. gpio_free(host->board->vcc_pin);
  967. if (host->board->wp_pin)
  968. gpio_free(host->board->wp_pin);
  969. iounmap(host->baseaddr);
  970. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  971. release_mem_region(res->start, res->end - res->start + 1);
  972. mmc_free_host(mmc);
  973. platform_set_drvdata(pdev, NULL);
  974. pr_debug("MCI Removed\n");
  975. return 0;
  976. }
  977. #ifdef CONFIG_PM
  978. static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
  979. {
  980. struct mmc_host *mmc = platform_get_drvdata(pdev);
  981. struct at91mci_host *host = mmc_priv(mmc);
  982. int ret = 0;
  983. if (host->board->det_pin && device_may_wakeup(&pdev->dev))
  984. enable_irq_wake(host->board->det_pin);
  985. if (mmc)
  986. ret = mmc_suspend_host(mmc, state);
  987. return ret;
  988. }
  989. static int at91_mci_resume(struct platform_device *pdev)
  990. {
  991. struct mmc_host *mmc = platform_get_drvdata(pdev);
  992. struct at91mci_host *host = mmc_priv(mmc);
  993. int ret = 0;
  994. if (host->board->det_pin && device_may_wakeup(&pdev->dev))
  995. disable_irq_wake(host->board->det_pin);
  996. if (mmc)
  997. ret = mmc_resume_host(mmc);
  998. return ret;
  999. }
  1000. #else
  1001. #define at91_mci_suspend NULL
  1002. #define at91_mci_resume NULL
  1003. #endif
  1004. static struct platform_driver at91_mci_driver = {
  1005. .remove = __exit_p(at91_mci_remove),
  1006. .suspend = at91_mci_suspend,
  1007. .resume = at91_mci_resume,
  1008. .driver = {
  1009. .name = DRIVER_NAME,
  1010. .owner = THIS_MODULE,
  1011. },
  1012. };
  1013. static int __init at91_mci_init(void)
  1014. {
  1015. return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
  1016. }
  1017. static void __exit at91_mci_exit(void)
  1018. {
  1019. platform_driver_unregister(&at91_mci_driver);
  1020. }
  1021. module_init(at91_mci_init);
  1022. module_exit(at91_mci_exit);
  1023. MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
  1024. MODULE_AUTHOR("Nick Randell");
  1025. MODULE_LICENSE("GPL");
  1026. MODULE_ALIAS("platform:at91_mci");