at91_mci.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250
  1. /*
  2. * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver
  3. *
  4. * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved
  5. *
  6. * Copyright (C) 2006 Malcolm Noyes
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. /*
  13. This is the AT91 MCI driver that has been tested with both MMC cards
  14. and SD-cards. Boards that support write protect are now supported.
  15. The CCAT91SBC001 board does not support SD cards.
  16. The three entry points are at91_mci_request, at91_mci_set_ios
  17. and at91_mci_get_ro.
  18. SET IOS
  19. This configures the device to put it into the correct mode and clock speed
  20. required.
  21. MCI REQUEST
  22. MCI request processes the commands sent in the mmc_request structure. This
  23. can consist of a processing command and a stop command in the case of
  24. multiple block transfers.
  25. There are three main types of request, commands, reads and writes.
  26. Commands are straight forward. The command is submitted to the controller and
  27. the request function returns. When the controller generates an interrupt to indicate
  28. the command is finished, the response to the command are read and the mmc_request_done
  29. function called to end the request.
  30. Reads and writes work in a similar manner to normal commands but involve the PDC (DMA)
  31. controller to manage the transfers.
  32. A read is done from the controller directly to the scatterlist passed in from the request.
  33. Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte
  34. swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug.
  35. The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY
  36. A write is slightly different in that the bytes to write are read from the scatterlist
  37. into a dma memory buffer (this is in case the source buffer should be read only). The
  38. entire write buffer is then done from this single dma memory buffer.
  39. The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY
  40. GET RO
  41. Gets the status of the write protect pin, if available.
  42. */
  43. #include <linux/module.h>
  44. #include <linux/moduleparam.h>
  45. #include <linux/init.h>
  46. #include <linux/ioport.h>
  47. #include <linux/platform_device.h>
  48. #include <linux/interrupt.h>
  49. #include <linux/blkdev.h>
  50. #include <linux/delay.h>
  51. #include <linux/err.h>
  52. #include <linux/dma-mapping.h>
  53. #include <linux/clk.h>
  54. #include <linux/atmel_pdc.h>
  55. #include <linux/mmc/host.h>
  56. #include <asm/io.h>
  57. #include <asm/irq.h>
  58. #include <asm/gpio.h>
  59. #include <asm/mach/mmc.h>
  60. #include <mach/board.h>
  61. #include <mach/cpu.h>
  62. #include <mach/at91_mci.h>
  63. #define DRIVER_NAME "at91_mci"
  64. #define FL_SENT_COMMAND (1 << 0)
  65. #define FL_SENT_STOP (1 << 1)
  66. #define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \
  67. | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \
  68. | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE)
  69. #define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg))
  70. #define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg))
  71. /*
  72. * Low level type for this driver
  73. */
  74. struct at91mci_host
  75. {
  76. struct mmc_host *mmc;
  77. struct mmc_command *cmd;
  78. struct mmc_request *request;
  79. void __iomem *baseaddr;
  80. int irq;
  81. struct at91_mmc_data *board;
  82. int present;
  83. struct clk *mci_clk;
  84. /*
  85. * Flag indicating when the command has been sent. This is used to
  86. * work out whether or not to send the stop
  87. */
  88. unsigned int flags;
  89. /* flag for current bus settings */
  90. u32 bus_mode;
  91. /* DMA buffer used for transmitting */
  92. unsigned int* buffer;
  93. dma_addr_t physical_address;
  94. unsigned int total_length;
  95. /* Latest in the scatterlist that has been enabled for transfer, but not freed */
  96. int in_use_index;
  97. /* Latest in the scatterlist that has been enabled for transfer */
  98. int transfer_index;
  99. /* Timer for timeouts */
  100. struct timer_list timer;
  101. };
  102. /*
  103. * Reset the controller and restore most of the state
  104. */
  105. static void at91_reset_host(struct at91mci_host *host)
  106. {
  107. unsigned long flags;
  108. u32 mr;
  109. u32 sdcr;
  110. u32 dtor;
  111. u32 imr;
  112. local_irq_save(flags);
  113. imr = at91_mci_read(host, AT91_MCI_IMR);
  114. at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
  115. /* save current state */
  116. mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff;
  117. sdcr = at91_mci_read(host, AT91_MCI_SDCR);
  118. dtor = at91_mci_read(host, AT91_MCI_DTOR);
  119. /* reset the controller */
  120. at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
  121. /* restore state */
  122. at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
  123. at91_mci_write(host, AT91_MCI_MR, mr);
  124. at91_mci_write(host, AT91_MCI_SDCR, sdcr);
  125. at91_mci_write(host, AT91_MCI_DTOR, dtor);
  126. at91_mci_write(host, AT91_MCI_IER, imr);
  127. /* make sure sdio interrupts will fire */
  128. at91_mci_read(host, AT91_MCI_SR);
  129. local_irq_restore(flags);
  130. }
  131. static void at91_timeout_timer(unsigned long data)
  132. {
  133. struct at91mci_host *host;
  134. host = (struct at91mci_host *)data;
  135. if (host->request) {
  136. dev_err(host->mmc->parent, "Timeout waiting end of packet\n");
  137. if (host->cmd && host->cmd->data) {
  138. host->cmd->data->error = -ETIMEDOUT;
  139. } else {
  140. if (host->cmd)
  141. host->cmd->error = -ETIMEDOUT;
  142. else
  143. host->request->cmd->error = -ETIMEDOUT;
  144. }
  145. at91_reset_host(host);
  146. mmc_request_done(host->mmc, host->request);
  147. }
  148. }
  149. /*
  150. * Copy from sg to a dma block - used for transfers
  151. */
  152. static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
  153. {
  154. unsigned int len, i, size;
  155. unsigned *dmabuf = host->buffer;
  156. size = data->blksz * data->blocks;
  157. len = data->sg_len;
  158. /* AT91SAM926[0/3] Data Write Operation and number of bytes erratum */
  159. if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
  160. if (host->total_length == 12)
  161. memset(dmabuf, 0, 12);
  162. /*
  163. * Just loop through all entries. Size might not
  164. * be the entire list though so make sure that
  165. * we do not transfer too much.
  166. */
  167. for (i = 0; i < len; i++) {
  168. struct scatterlist *sg;
  169. int amount;
  170. unsigned int *sgbuffer;
  171. sg = &data->sg[i];
  172. sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
  173. amount = min(size, sg->length);
  174. size -= amount;
  175. if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
  176. int index;
  177. for (index = 0; index < (amount / 4); index++)
  178. *dmabuf++ = swab32(sgbuffer[index]);
  179. } else {
  180. memcpy(dmabuf, sgbuffer, amount);
  181. dmabuf += amount;
  182. }
  183. kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);
  184. if (size == 0)
  185. break;
  186. }
  187. /*
  188. * Check that we didn't get a request to transfer
  189. * more data than can fit into the SG list.
  190. */
  191. BUG_ON(size != 0);
  192. }
  193. /*
  194. * Prepare a dma read
  195. */
  196. static void at91_mci_pre_dma_read(struct at91mci_host *host)
  197. {
  198. int i;
  199. struct scatterlist *sg;
  200. struct mmc_command *cmd;
  201. struct mmc_data *data;
  202. pr_debug("pre dma read\n");
  203. cmd = host->cmd;
  204. if (!cmd) {
  205. pr_debug("no command\n");
  206. return;
  207. }
  208. data = cmd->data;
  209. if (!data) {
  210. pr_debug("no data\n");
  211. return;
  212. }
  213. for (i = 0; i < 2; i++) {
  214. /* nothing left to transfer */
  215. if (host->transfer_index >= data->sg_len) {
  216. pr_debug("Nothing left to transfer (index = %d)\n", host->transfer_index);
  217. break;
  218. }
  219. /* Check to see if this needs filling */
  220. if (i == 0) {
  221. if (at91_mci_read(host, ATMEL_PDC_RCR) != 0) {
  222. pr_debug("Transfer active in current\n");
  223. continue;
  224. }
  225. }
  226. else {
  227. if (at91_mci_read(host, ATMEL_PDC_RNCR) != 0) {
  228. pr_debug("Transfer active in next\n");
  229. continue;
  230. }
  231. }
  232. /* Setup the next transfer */
  233. pr_debug("Using transfer index %d\n", host->transfer_index);
  234. sg = &data->sg[host->transfer_index++];
  235. pr_debug("sg = %p\n", sg);
  236. sg->dma_address = dma_map_page(NULL, sg_page(sg), sg->offset, sg->length, DMA_FROM_DEVICE);
  237. pr_debug("dma address = %08X, length = %d\n", sg->dma_address, sg->length);
  238. if (i == 0) {
  239. at91_mci_write(host, ATMEL_PDC_RPR, sg->dma_address);
  240. at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
  241. }
  242. else {
  243. at91_mci_write(host, ATMEL_PDC_RNPR, sg->dma_address);
  244. at91_mci_write(host, ATMEL_PDC_RNCR, (data->blksz & 0x3) ? sg->length : sg->length / 4);
  245. }
  246. }
  247. pr_debug("pre dma read done\n");
  248. }
  249. /*
  250. * Handle after a dma read
  251. */
  252. static void at91_mci_post_dma_read(struct at91mci_host *host)
  253. {
  254. struct mmc_command *cmd;
  255. struct mmc_data *data;
  256. pr_debug("post dma read\n");
  257. cmd = host->cmd;
  258. if (!cmd) {
  259. pr_debug("no command\n");
  260. return;
  261. }
  262. data = cmd->data;
  263. if (!data) {
  264. pr_debug("no data\n");
  265. return;
  266. }
  267. while (host->in_use_index < host->transfer_index) {
  268. struct scatterlist *sg;
  269. pr_debug("finishing index %d\n", host->in_use_index);
  270. sg = &data->sg[host->in_use_index++];
  271. pr_debug("Unmapping page %08X\n", sg->dma_address);
  272. dma_unmap_page(NULL, sg->dma_address, sg->length, DMA_FROM_DEVICE);
  273. if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */
  274. unsigned int *buffer;
  275. int index;
  276. /* Swap the contents of the buffer */
  277. buffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
  278. pr_debug("buffer = %p, length = %d\n", buffer, sg->length);
  279. for (index = 0; index < (sg->length / 4); index++)
  280. buffer[index] = swab32(buffer[index]);
  281. kunmap_atomic(buffer, KM_BIO_SRC_IRQ);
  282. }
  283. flush_dcache_page(sg_page(sg));
  284. data->bytes_xfered += sg->length;
  285. }
  286. /* Is there another transfer to trigger? */
  287. if (host->transfer_index < data->sg_len)
  288. at91_mci_pre_dma_read(host);
  289. else {
  290. at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX);
  291. at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF);
  292. }
  293. pr_debug("post dma read done\n");
  294. }
  295. /*
  296. * Handle transmitted data
  297. */
  298. static void at91_mci_handle_transmitted(struct at91mci_host *host)
  299. {
  300. struct mmc_command *cmd;
  301. struct mmc_data *data;
  302. pr_debug("Handling the transmit\n");
  303. /* Disable the transfer */
  304. at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
  305. /* Now wait for cmd ready */
  306. at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE);
  307. cmd = host->cmd;
  308. if (!cmd) return;
  309. data = cmd->data;
  310. if (!data) return;
  311. if (cmd->data->blocks > 1) {
  312. pr_debug("multiple write : wait for BLKE...\n");
  313. at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE);
  314. } else
  315. at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
  316. }
  317. /*
  318. * Update bytes tranfered count during a write operation
  319. */
  320. static void at91_mci_update_bytes_xfered(struct at91mci_host *host)
  321. {
  322. struct mmc_data *data;
  323. /* always deal with the effective request (and not the current cmd) */
  324. if (host->request->cmd && host->request->cmd->error != 0)
  325. return;
  326. if (host->request->data) {
  327. data = host->request->data;
  328. if (data->flags & MMC_DATA_WRITE) {
  329. /* card is in IDLE mode now */
  330. pr_debug("-> bytes_xfered %d, total_length = %d\n",
  331. data->bytes_xfered, host->total_length);
  332. data->bytes_xfered = data->blksz * data->blocks;
  333. }
  334. }
  335. }
  336. /*Handle after command sent ready*/
  337. static int at91_mci_handle_cmdrdy(struct at91mci_host *host)
  338. {
  339. if (!host->cmd)
  340. return 1;
  341. else if (!host->cmd->data) {
  342. if (host->flags & FL_SENT_STOP) {
  343. /*After multi block write, we must wait for NOTBUSY*/
  344. at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
  345. } else return 1;
  346. } else if (host->cmd->data->flags & MMC_DATA_WRITE) {
  347. /*After sendding multi-block-write command, start DMA transfer*/
  348. at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE);
  349. at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN);
  350. }
  351. /* command not completed, have to wait */
  352. return 0;
  353. }
  354. /*
  355. * Enable the controller
  356. */
  357. static void at91_mci_enable(struct at91mci_host *host)
  358. {
  359. unsigned int mr;
  360. at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
  361. at91_mci_write(host, AT91_MCI_IDR, 0xffffffff);
  362. at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC);
  363. mr = AT91_MCI_PDCMODE | 0x34a;
  364. if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
  365. mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF;
  366. at91_mci_write(host, AT91_MCI_MR, mr);
  367. /* use Slot A or B (only one at same time) */
  368. at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b);
  369. }
  370. /*
  371. * Disable the controller
  372. */
  373. static void at91_mci_disable(struct at91mci_host *host)
  374. {
  375. at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST);
  376. }
  377. /*
  378. * Send a command
  379. */
  380. static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd)
  381. {
  382. unsigned int cmdr, mr;
  383. unsigned int block_length;
  384. struct mmc_data *data = cmd->data;
  385. unsigned int blocks;
  386. unsigned int ier = 0;
  387. host->cmd = cmd;
  388. /* Needed for leaving busy state before CMD1 */
  389. if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) {
  390. pr_debug("Clearing timeout\n");
  391. at91_mci_write(host, AT91_MCI_ARGR, 0);
  392. at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD);
  393. while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) {
  394. /* spin */
  395. pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR));
  396. }
  397. }
  398. cmdr = cmd->opcode;
  399. if (mmc_resp_type(cmd) == MMC_RSP_NONE)
  400. cmdr |= AT91_MCI_RSPTYP_NONE;
  401. else {
  402. /* if a response is expected then allow maximum response latancy */
  403. cmdr |= AT91_MCI_MAXLAT;
  404. /* set 136 bit response for R2, 48 bit response otherwise */
  405. if (mmc_resp_type(cmd) == MMC_RSP_R2)
  406. cmdr |= AT91_MCI_RSPTYP_136;
  407. else
  408. cmdr |= AT91_MCI_RSPTYP_48;
  409. }
  410. if (data) {
  411. if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) {
  412. if (data->blksz & 0x3) {
  413. pr_debug("Unsupported block size\n");
  414. cmd->error = -EINVAL;
  415. mmc_request_done(host->mmc, host->request);
  416. return;
  417. }
  418. if (data->flags & MMC_DATA_STREAM) {
  419. pr_debug("Stream commands not supported\n");
  420. cmd->error = -EINVAL;
  421. mmc_request_done(host->mmc, host->request);
  422. return;
  423. }
  424. }
  425. block_length = data->blksz;
  426. blocks = data->blocks;
  427. /* always set data start - also set direction flag for read */
  428. if (data->flags & MMC_DATA_READ)
  429. cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START);
  430. else if (data->flags & MMC_DATA_WRITE)
  431. cmdr |= AT91_MCI_TRCMD_START;
  432. if (data->flags & MMC_DATA_STREAM)
  433. cmdr |= AT91_MCI_TRTYP_STREAM;
  434. if (data->blocks > 1)
  435. cmdr |= AT91_MCI_TRTYP_MULTIPLE;
  436. }
  437. else {
  438. block_length = 0;
  439. blocks = 0;
  440. }
  441. if (host->flags & FL_SENT_STOP)
  442. cmdr |= AT91_MCI_TRCMD_STOP;
  443. if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
  444. cmdr |= AT91_MCI_OPDCMD;
  445. /*
  446. * Set the arguments and send the command
  447. */
  448. pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n",
  449. cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR));
  450. if (!data) {
  451. at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS);
  452. at91_mci_write(host, ATMEL_PDC_RPR, 0);
  453. at91_mci_write(host, ATMEL_PDC_RCR, 0);
  454. at91_mci_write(host, ATMEL_PDC_RNPR, 0);
  455. at91_mci_write(host, ATMEL_PDC_RNCR, 0);
  456. at91_mci_write(host, ATMEL_PDC_TPR, 0);
  457. at91_mci_write(host, ATMEL_PDC_TCR, 0);
  458. at91_mci_write(host, ATMEL_PDC_TNPR, 0);
  459. at91_mci_write(host, ATMEL_PDC_TNCR, 0);
  460. ier = AT91_MCI_CMDRDY;
  461. } else {
  462. /* zero block length and PDC mode */
  463. mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff;
  464. mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0;
  465. mr |= (block_length << 16);
  466. mr |= AT91_MCI_PDCMODE;
  467. at91_mci_write(host, AT91_MCI_MR, mr);
  468. if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261()))
  469. at91_mci_write(host, AT91_MCI_BLKR,
  470. AT91_MCI_BLKR_BCNT(blocks) |
  471. AT91_MCI_BLKR_BLKLEN(block_length));
  472. /*
  473. * Disable the PDC controller
  474. */
  475. at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
  476. if (cmdr & AT91_MCI_TRCMD_START) {
  477. data->bytes_xfered = 0;
  478. host->transfer_index = 0;
  479. host->in_use_index = 0;
  480. if (cmdr & AT91_MCI_TRDIR) {
  481. /*
  482. * Handle a read
  483. */
  484. host->buffer = NULL;
  485. host->total_length = 0;
  486. at91_mci_pre_dma_read(host);
  487. ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */;
  488. }
  489. else {
  490. /*
  491. * Handle a write
  492. */
  493. host->total_length = block_length * blocks;
  494. /*
  495. * AT91SAM926[0/3] Data Write Operation and
  496. * number of bytes erratum
  497. */
  498. if (cpu_is_at91sam9260 () || cpu_is_at91sam9263())
  499. if (host->total_length < 12)
  500. host->total_length = 12;
  501. host->buffer = kmalloc(host->total_length, GFP_KERNEL);
  502. if (!host->buffer) {
  503. pr_debug("Can't alloc tx buffer\n");
  504. cmd->error = -ENOMEM;
  505. mmc_request_done(host->mmc, host->request);
  506. return;
  507. }
  508. at91_mci_sg_to_dma(host, data);
  509. host->physical_address = dma_map_single(NULL,
  510. host->buffer, host->total_length,
  511. DMA_TO_DEVICE);
  512. pr_debug("Transmitting %d bytes\n", host->total_length);
  513. at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address);
  514. at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ?
  515. host->total_length : host->total_length / 4);
  516. ier = AT91_MCI_CMDRDY;
  517. }
  518. }
  519. }
  520. /*
  521. * Send the command and then enable the PDC - not the other way round as
  522. * the data sheet says
  523. */
  524. at91_mci_write(host, AT91_MCI_ARGR, cmd->arg);
  525. at91_mci_write(host, AT91_MCI_CMDR, cmdr);
  526. if (cmdr & AT91_MCI_TRCMD_START) {
  527. if (cmdr & AT91_MCI_TRDIR)
  528. at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN);
  529. }
  530. /* Enable selected interrupts */
  531. at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier);
  532. }
  533. /*
  534. * Process the next step in the request
  535. */
  536. static void at91_mci_process_next(struct at91mci_host *host)
  537. {
  538. if (!(host->flags & FL_SENT_COMMAND)) {
  539. host->flags |= FL_SENT_COMMAND;
  540. at91_mci_send_command(host, host->request->cmd);
  541. }
  542. else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) {
  543. host->flags |= FL_SENT_STOP;
  544. at91_mci_send_command(host, host->request->stop);
  545. } else {
  546. del_timer(&host->timer);
  547. /* the at91rm9200 mci controller hangs after some transfers,
  548. * and the workaround is to reset it after each transfer.
  549. */
  550. if (cpu_is_at91rm9200())
  551. at91_reset_host(host);
  552. mmc_request_done(host->mmc, host->request);
  553. }
  554. }
  555. /*
  556. * Handle a command that has been completed
  557. */
  558. static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status)
  559. {
  560. struct mmc_command *cmd = host->cmd;
  561. struct mmc_data *data = cmd->data;
  562. at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
  563. cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0));
  564. cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1));
  565. cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2));
  566. cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3));
  567. if (host->buffer) {
  568. dma_unmap_single(NULL,
  569. host->physical_address, host->total_length,
  570. DMA_TO_DEVICE);
  571. kfree(host->buffer);
  572. host->buffer = NULL;
  573. }
  574. pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n",
  575. status, at91_mci_read(host, AT91_MCI_SR),
  576. cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
  577. if (status & AT91_MCI_ERRORS) {
  578. if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) {
  579. cmd->error = 0;
  580. }
  581. else {
  582. if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) {
  583. if (data) {
  584. if (status & AT91_MCI_DTOE)
  585. data->error = -ETIMEDOUT;
  586. else if (status & AT91_MCI_DCRCE)
  587. data->error = -EILSEQ;
  588. }
  589. } else {
  590. if (status & AT91_MCI_RTOE)
  591. cmd->error = -ETIMEDOUT;
  592. else if (status & AT91_MCI_RCRCE)
  593. cmd->error = -EILSEQ;
  594. else
  595. cmd->error = -EIO;
  596. }
  597. pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n",
  598. cmd->error, data ? data->error : 0,
  599. cmd->opcode, cmd->retries);
  600. }
  601. }
  602. else
  603. cmd->error = 0;
  604. at91_mci_process_next(host);
  605. }
  606. /*
  607. * Handle an MMC request
  608. */
  609. static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq)
  610. {
  611. struct at91mci_host *host = mmc_priv(mmc);
  612. host->request = mrq;
  613. host->flags = 0;
  614. mod_timer(&host->timer, jiffies + HZ);
  615. at91_mci_process_next(host);
  616. }
  617. /*
  618. * Set the IOS
  619. */
  620. static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
  621. {
  622. int clkdiv;
  623. struct at91mci_host *host = mmc_priv(mmc);
  624. unsigned long at91_master_clock = clk_get_rate(host->mci_clk);
  625. host->bus_mode = ios->bus_mode;
  626. if (ios->clock == 0) {
  627. /* Disable the MCI controller */
  628. at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS);
  629. clkdiv = 0;
  630. }
  631. else {
  632. /* Enable the MCI controller */
  633. at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN);
  634. if ((at91_master_clock % (ios->clock * 2)) == 0)
  635. clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
  636. else
  637. clkdiv = (at91_master_clock / ios->clock) / 2;
  638. pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv,
  639. at91_master_clock / (2 * (clkdiv + 1)));
  640. }
  641. if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) {
  642. pr_debug("MMC: Setting controller bus width to 4\n");
  643. at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS);
  644. }
  645. else {
  646. pr_debug("MMC: Setting controller bus width to 1\n");
  647. at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
  648. }
  649. /* Set the clock divider */
  650. at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv);
  651. /* maybe switch power to the card */
  652. if (host->board->vcc_pin) {
  653. switch (ios->power_mode) {
  654. case MMC_POWER_OFF:
  655. gpio_set_value(host->board->vcc_pin, 0);
  656. break;
  657. case MMC_POWER_UP:
  658. gpio_set_value(host->board->vcc_pin, 1);
  659. break;
  660. case MMC_POWER_ON:
  661. break;
  662. default:
  663. WARN_ON(1);
  664. }
  665. }
  666. }
  667. /*
  668. * Handle an interrupt
  669. */
  670. static irqreturn_t at91_mci_irq(int irq, void *devid)
  671. {
  672. struct at91mci_host *host = devid;
  673. int completed = 0;
  674. unsigned int int_status, int_mask;
  675. int_status = at91_mci_read(host, AT91_MCI_SR);
  676. int_mask = at91_mci_read(host, AT91_MCI_IMR);
  677. pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask,
  678. int_status & int_mask);
  679. int_status = int_status & int_mask;
  680. if (int_status & AT91_MCI_ERRORS) {
  681. completed = 1;
  682. if (int_status & AT91_MCI_UNRE)
  683. pr_debug("MMC: Underrun error\n");
  684. if (int_status & AT91_MCI_OVRE)
  685. pr_debug("MMC: Overrun error\n");
  686. if (int_status & AT91_MCI_DTOE)
  687. pr_debug("MMC: Data timeout\n");
  688. if (int_status & AT91_MCI_DCRCE)
  689. pr_debug("MMC: CRC error in data\n");
  690. if (int_status & AT91_MCI_RTOE)
  691. pr_debug("MMC: Response timeout\n");
  692. if (int_status & AT91_MCI_RENDE)
  693. pr_debug("MMC: Response end bit error\n");
  694. if (int_status & AT91_MCI_RCRCE)
  695. pr_debug("MMC: Response CRC error\n");
  696. if (int_status & AT91_MCI_RDIRE)
  697. pr_debug("MMC: Response direction error\n");
  698. if (int_status & AT91_MCI_RINDE)
  699. pr_debug("MMC: Response index error\n");
  700. } else {
  701. /* Only continue processing if no errors */
  702. if (int_status & AT91_MCI_TXBUFE) {
  703. pr_debug("TX buffer empty\n");
  704. at91_mci_handle_transmitted(host);
  705. }
  706. if (int_status & AT91_MCI_ENDRX) {
  707. pr_debug("ENDRX\n");
  708. at91_mci_post_dma_read(host);
  709. }
  710. if (int_status & AT91_MCI_RXBUFF) {
  711. pr_debug("RX buffer full\n");
  712. at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
  713. at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX);
  714. completed = 1;
  715. }
  716. if (int_status & AT91_MCI_ENDTX)
  717. pr_debug("Transmit has ended\n");
  718. if (int_status & AT91_MCI_NOTBUSY) {
  719. pr_debug("Card is ready\n");
  720. at91_mci_update_bytes_xfered(host);
  721. completed = 1;
  722. }
  723. if (int_status & AT91_MCI_DTIP)
  724. pr_debug("Data transfer in progress\n");
  725. if (int_status & AT91_MCI_BLKE) {
  726. pr_debug("Block transfer has ended\n");
  727. if (host->request->data && host->request->data->blocks > 1) {
  728. /* multi block write : complete multi write
  729. * command and send stop */
  730. completed = 1;
  731. } else {
  732. at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY);
  733. }
  734. }
  735. if (int_status & AT91_MCI_SDIOIRQA)
  736. mmc_signal_sdio_irq(host->mmc);
  737. if (int_status & AT91_MCI_SDIOIRQB)
  738. mmc_signal_sdio_irq(host->mmc);
  739. if (int_status & AT91_MCI_TXRDY)
  740. pr_debug("Ready to transmit\n");
  741. if (int_status & AT91_MCI_RXRDY)
  742. pr_debug("Ready to receive\n");
  743. if (int_status & AT91_MCI_CMDRDY) {
  744. pr_debug("Command ready\n");
  745. completed = at91_mci_handle_cmdrdy(host);
  746. }
  747. }
  748. if (completed) {
  749. pr_debug("Completed command\n");
  750. at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
  751. at91_mci_completed_command(host, int_status);
  752. } else
  753. at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB));
  754. return IRQ_HANDLED;
  755. }
  756. static irqreturn_t at91_mmc_det_irq(int irq, void *_host)
  757. {
  758. struct at91mci_host *host = _host;
  759. int present = !gpio_get_value(irq_to_gpio(irq));
  760. /*
  761. * we expect this irq on both insert and remove,
  762. * and use a short delay to debounce.
  763. */
  764. if (present != host->present) {
  765. host->present = present;
  766. pr_debug("%s: card %s\n", mmc_hostname(host->mmc),
  767. present ? "insert" : "remove");
  768. if (!present) {
  769. pr_debug("****** Resetting SD-card bus width ******\n");
  770. at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS);
  771. }
  772. mmc_detect_change(host->mmc, msecs_to_jiffies(100));
  773. }
  774. return IRQ_HANDLED;
  775. }
  776. static int at91_mci_get_ro(struct mmc_host *mmc)
  777. {
  778. struct at91mci_host *host = mmc_priv(mmc);
  779. if (host->board->wp_pin)
  780. return !!gpio_get_value(host->board->wp_pin);
  781. /*
  782. * Board doesn't support read only detection; let the mmc core
  783. * decide what to do.
  784. */
  785. return -ENOSYS;
  786. }
  787. static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable)
  788. {
  789. struct at91mci_host *host = mmc_priv(mmc);
  790. pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc),
  791. host->board->slot_b ? 'B':'A', enable ? "enable" : "disable");
  792. at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR,
  793. host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA);
  794. }
  795. static const struct mmc_host_ops at91_mci_ops = {
  796. .request = at91_mci_request,
  797. .set_ios = at91_mci_set_ios,
  798. .get_ro = at91_mci_get_ro,
  799. .enable_sdio_irq = at91_mci_enable_sdio_irq,
  800. };
  801. /*
  802. * Probe for the device
  803. */
  804. static int __init at91_mci_probe(struct platform_device *pdev)
  805. {
  806. struct mmc_host *mmc;
  807. struct at91mci_host *host;
  808. struct resource *res;
  809. int ret;
  810. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  811. if (!res)
  812. return -ENXIO;
  813. if (!request_mem_region(res->start, res->end - res->start + 1, DRIVER_NAME))
  814. return -EBUSY;
  815. mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev);
  816. if (!mmc) {
  817. ret = -ENOMEM;
  818. dev_dbg(&pdev->dev, "couldn't allocate mmc host\n");
  819. goto fail6;
  820. }
  821. mmc->ops = &at91_mci_ops;
  822. mmc->f_min = 375000;
  823. mmc->f_max = 25000000;
  824. mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
  825. mmc->caps = MMC_CAP_SDIO_IRQ;
  826. mmc->max_blk_size = 4095;
  827. mmc->max_blk_count = mmc->max_req_size;
  828. host = mmc_priv(mmc);
  829. host->mmc = mmc;
  830. host->buffer = NULL;
  831. host->bus_mode = 0;
  832. host->board = pdev->dev.platform_data;
  833. if (host->board->wire4) {
  834. if (cpu_is_at91sam9260() || cpu_is_at91sam9263())
  835. mmc->caps |= MMC_CAP_4_BIT_DATA;
  836. else
  837. dev_warn(&pdev->dev, "4 wire bus mode not supported"
  838. " - using 1 wire\n");
  839. }
  840. /*
  841. * Reserve GPIOs ... board init code makes sure these pins are set
  842. * up as GPIOs with the right direction (input, except for vcc)
  843. */
  844. if (host->board->det_pin) {
  845. ret = gpio_request(host->board->det_pin, "mmc_detect");
  846. if (ret < 0) {
  847. dev_dbg(&pdev->dev, "couldn't claim card detect pin\n");
  848. goto fail5;
  849. }
  850. }
  851. if (host->board->wp_pin) {
  852. ret = gpio_request(host->board->wp_pin, "mmc_wp");
  853. if (ret < 0) {
  854. dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n");
  855. goto fail4;
  856. }
  857. }
  858. if (host->board->vcc_pin) {
  859. ret = gpio_request(host->board->vcc_pin, "mmc_vcc");
  860. if (ret < 0) {
  861. dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n");
  862. goto fail3;
  863. }
  864. }
  865. /*
  866. * Get Clock
  867. */
  868. host->mci_clk = clk_get(&pdev->dev, "mci_clk");
  869. if (IS_ERR(host->mci_clk)) {
  870. ret = -ENODEV;
  871. dev_dbg(&pdev->dev, "no mci_clk?\n");
  872. goto fail2;
  873. }
  874. /*
  875. * Map I/O region
  876. */
  877. host->baseaddr = ioremap(res->start, res->end - res->start + 1);
  878. if (!host->baseaddr) {
  879. ret = -ENOMEM;
  880. goto fail1;
  881. }
  882. /*
  883. * Reset hardware
  884. */
  885. clk_enable(host->mci_clk); /* Enable the peripheral clock */
  886. at91_mci_disable(host);
  887. at91_mci_enable(host);
  888. /*
  889. * Allocate the MCI interrupt
  890. */
  891. host->irq = platform_get_irq(pdev, 0);
  892. ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED,
  893. mmc_hostname(mmc), host);
  894. if (ret) {
  895. dev_dbg(&pdev->dev, "request MCI interrupt failed\n");
  896. goto fail0;
  897. }
  898. setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host);
  899. platform_set_drvdata(pdev, mmc);
  900. /*
  901. * Add host to MMC layer
  902. */
  903. if (host->board->det_pin) {
  904. host->present = !gpio_get_value(host->board->det_pin);
  905. }
  906. else
  907. host->present = -1;
  908. mmc_add_host(mmc);
  909. /*
  910. * monitor card insertion/removal if we can
  911. */
  912. if (host->board->det_pin) {
  913. ret = request_irq(gpio_to_irq(host->board->det_pin),
  914. at91_mmc_det_irq, 0, mmc_hostname(mmc), host);
  915. if (ret)
  916. dev_warn(&pdev->dev, "request MMC detect irq failed\n");
  917. else
  918. device_init_wakeup(&pdev->dev, 1);
  919. }
  920. pr_debug("Added MCI driver\n");
  921. return 0;
  922. fail0:
  923. clk_disable(host->mci_clk);
  924. iounmap(host->baseaddr);
  925. fail1:
  926. clk_put(host->mci_clk);
  927. fail2:
  928. if (host->board->vcc_pin)
  929. gpio_free(host->board->vcc_pin);
  930. fail3:
  931. if (host->board->wp_pin)
  932. gpio_free(host->board->wp_pin);
  933. fail4:
  934. if (host->board->det_pin)
  935. gpio_free(host->board->det_pin);
  936. fail5:
  937. mmc_free_host(mmc);
  938. fail6:
  939. release_mem_region(res->start, res->end - res->start + 1);
  940. dev_err(&pdev->dev, "probe failed, err %d\n", ret);
  941. return ret;
  942. }
  943. /*
  944. * Remove a device
  945. */
  946. static int __exit at91_mci_remove(struct platform_device *pdev)
  947. {
  948. struct mmc_host *mmc = platform_get_drvdata(pdev);
  949. struct at91mci_host *host;
  950. struct resource *res;
  951. if (!mmc)
  952. return -1;
  953. host = mmc_priv(mmc);
  954. if (host->board->det_pin) {
  955. if (device_can_wakeup(&pdev->dev))
  956. free_irq(gpio_to_irq(host->board->det_pin), host);
  957. device_init_wakeup(&pdev->dev, 0);
  958. gpio_free(host->board->det_pin);
  959. }
  960. at91_mci_disable(host);
  961. del_timer_sync(&host->timer);
  962. mmc_remove_host(mmc);
  963. free_irq(host->irq, host);
  964. clk_disable(host->mci_clk); /* Disable the peripheral clock */
  965. clk_put(host->mci_clk);
  966. if (host->board->vcc_pin)
  967. gpio_free(host->board->vcc_pin);
  968. if (host->board->wp_pin)
  969. gpio_free(host->board->wp_pin);
  970. iounmap(host->baseaddr);
  971. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  972. release_mem_region(res->start, res->end - res->start + 1);
  973. mmc_free_host(mmc);
  974. platform_set_drvdata(pdev, NULL);
  975. pr_debug("MCI Removed\n");
  976. return 0;
  977. }
  978. #ifdef CONFIG_PM
  979. static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state)
  980. {
  981. struct mmc_host *mmc = platform_get_drvdata(pdev);
  982. struct at91mci_host *host = mmc_priv(mmc);
  983. int ret = 0;
  984. if (host->board->det_pin && device_may_wakeup(&pdev->dev))
  985. enable_irq_wake(host->board->det_pin);
  986. if (mmc)
  987. ret = mmc_suspend_host(mmc, state);
  988. return ret;
  989. }
  990. static int at91_mci_resume(struct platform_device *pdev)
  991. {
  992. struct mmc_host *mmc = platform_get_drvdata(pdev);
  993. struct at91mci_host *host = mmc_priv(mmc);
  994. int ret = 0;
  995. if (host->board->det_pin && device_may_wakeup(&pdev->dev))
  996. disable_irq_wake(host->board->det_pin);
  997. if (mmc)
  998. ret = mmc_resume_host(mmc);
  999. return ret;
  1000. }
  1001. #else
  1002. #define at91_mci_suspend NULL
  1003. #define at91_mci_resume NULL
  1004. #endif
  1005. static struct platform_driver at91_mci_driver = {
  1006. .remove = __exit_p(at91_mci_remove),
  1007. .suspend = at91_mci_suspend,
  1008. .resume = at91_mci_resume,
  1009. .driver = {
  1010. .name = DRIVER_NAME,
  1011. .owner = THIS_MODULE,
  1012. },
  1013. };
  1014. static int __init at91_mci_init(void)
  1015. {
  1016. return platform_driver_probe(&at91_mci_driver, at91_mci_probe);
  1017. }
  1018. static void __exit at91_mci_exit(void)
  1019. {
  1020. platform_driver_unregister(&at91_mci_driver);
  1021. }
  1022. module_init(at91_mci_init);
  1023. module_exit(at91_mci_exit);
  1024. MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver");
  1025. MODULE_AUTHOR("Nick Randell");
  1026. MODULE_LICENSE("GPL");
  1027. MODULE_ALIAS("platform:at91_mci");