denali.c 64 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134
  1. /*
  2. * NAND Flash Controller Device Driver
  3. * Copyright © 2009-2010, Intel Corporation and its suppliers.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. * You should have received a copy of the GNU General Public License along with
  15. * this program; if not, write to the Free Software Foundation, Inc.,
  16. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  17. *
  18. */
  19. #include <linux/interrupt.h>
  20. #include <linux/delay.h>
  21. #include <linux/wait.h>
  22. #include <linux/mutex.h>
  23. #include <linux/pci.h>
  24. #include <linux/mtd/mtd.h>
  25. #include <linux/module.h>
  26. #include "denali.h"
  27. MODULE_LICENSE("GPL");
  28. /* We define a module parameter that allows the user to override
  29. * the hardware and decide what timing mode should be used.
  30. */
  31. #define NAND_DEFAULT_TIMINGS -1
  32. static int onfi_timing_mode = NAND_DEFAULT_TIMINGS;
  33. module_param(onfi_timing_mode, int, S_IRUGO);
  34. MODULE_PARM_DESC(onfi_timing_mode, "Overrides default ONFI setting. -1 indicates"
  35. " use default timings");
  36. #define DENALI_NAND_NAME "denali-nand"
  37. /* We define a macro here that combines all interrupts this driver uses into
  38. * a single constant value, for convenience. */
  39. #define DENALI_IRQ_ALL (INTR_STATUS0__DMA_CMD_COMP | \
  40. INTR_STATUS0__ECC_TRANSACTION_DONE | \
  41. INTR_STATUS0__ECC_ERR | \
  42. INTR_STATUS0__PROGRAM_FAIL | \
  43. INTR_STATUS0__LOAD_COMP | \
  44. INTR_STATUS0__PROGRAM_COMP | \
  45. INTR_STATUS0__TIME_OUT | \
  46. INTR_STATUS0__ERASE_FAIL | \
  47. INTR_STATUS0__RST_COMP | \
  48. INTR_STATUS0__ERASE_COMP)
  49. /* indicates whether or not the internal value for the flash bank is
  50. valid or not */
  51. #define CHIP_SELECT_INVALID -1
  52. #define SUPPORT_8BITECC 1
  53. /* This macro divides two integers and rounds fractional values up
  54. * to the nearest integer value. */
  55. #define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y)))
  56. /* this macro allows us to convert from an MTD structure to our own
  57. * device context (denali) structure.
  58. */
  59. #define mtd_to_denali(m) container_of(m, struct denali_nand_info, mtd)
  60. /* These constants are defined by the driver to enable common driver
  61. configuration options. */
  62. #define SPARE_ACCESS 0x41
  63. #define MAIN_ACCESS 0x42
  64. #define MAIN_SPARE_ACCESS 0x43
  65. #define DENALI_READ 0
  66. #define DENALI_WRITE 0x100
  67. /* types of device accesses. We can issue commands and get status */
  68. #define COMMAND_CYCLE 0
  69. #define ADDR_CYCLE 1
  70. #define STATUS_CYCLE 2
  71. /* this is a helper macro that allows us to
  72. * format the bank into the proper bits for the controller */
  73. #define BANK(x) ((x) << 24)
  74. /* List of platforms this NAND controller has be integrated into */
  75. static const struct pci_device_id denali_pci_ids[] = {
  76. { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 },
  77. { PCI_VDEVICE(INTEL, 0x0809), INTEL_MRST },
  78. { /* end: all zeroes */ }
  79. };
  80. /* these are static lookup tables that give us easy access to
  81. registers in the NAND controller.
  82. */
  83. static const uint32_t intr_status_addresses[4] = {INTR_STATUS0,
  84. INTR_STATUS1,
  85. INTR_STATUS2,
  86. INTR_STATUS3};
  87. static const uint32_t device_reset_banks[4] = {DEVICE_RESET__BANK0,
  88. DEVICE_RESET__BANK1,
  89. DEVICE_RESET__BANK2,
  90. DEVICE_RESET__BANK3};
  91. static const uint32_t operation_timeout[4] = {INTR_STATUS0__TIME_OUT,
  92. INTR_STATUS1__TIME_OUT,
  93. INTR_STATUS2__TIME_OUT,
  94. INTR_STATUS3__TIME_OUT};
  95. static const uint32_t reset_complete[4] = {INTR_STATUS0__RST_COMP,
  96. INTR_STATUS1__RST_COMP,
  97. INTR_STATUS2__RST_COMP,
  98. INTR_STATUS3__RST_COMP};
  99. /* specifies the debug level of the driver */
  100. static int nand_debug_level = 0;
  101. /* forward declarations */
  102. static void clear_interrupts(struct denali_nand_info *denali);
  103. static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask);
  104. static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask);
  105. static uint32_t read_interrupt_status(struct denali_nand_info *denali);
  106. #define DEBUG_DENALI 0
  107. /* This is a wrapper for writing to the denali registers.
  108. * this allows us to create debug information so we can
  109. * observe how the driver is programming the device.
  110. * it uses standard linux convention for (val, addr) */
  111. static void denali_write32(uint32_t value, void *addr)
  112. {
  113. iowrite32(value, addr);
  114. #if DEBUG_DENALI
  115. printk(KERN_ERR "wrote: 0x%x -> 0x%x\n", value, (uint32_t)((uint32_t)addr & 0x1fff));
  116. #endif
  117. }
  118. /* Certain operations for the denali NAND controller use an indexed mode to read/write
  119. data. The operation is performed by writing the address value of the command to
  120. the device memory followed by the data. This function abstracts this common
  121. operation.
  122. */
  123. static void index_addr(struct denali_nand_info *denali, uint32_t address, uint32_t data)
  124. {
  125. denali_write32(address, denali->flash_mem);
  126. denali_write32(data, denali->flash_mem + 0x10);
  127. }
  128. /* Perform an indexed read of the device */
  129. static void index_addr_read_data(struct denali_nand_info *denali,
  130. uint32_t address, uint32_t *pdata)
  131. {
  132. denali_write32(address, denali->flash_mem);
  133. *pdata = ioread32(denali->flash_mem + 0x10);
  134. }
  135. /* We need to buffer some data for some of the NAND core routines.
  136. * The operations manage buffering that data. */
  137. static void reset_buf(struct denali_nand_info *denali)
  138. {
  139. denali->buf.head = denali->buf.tail = 0;
  140. }
  141. static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte)
  142. {
  143. BUG_ON(denali->buf.tail >= sizeof(denali->buf.buf));
  144. denali->buf.buf[denali->buf.tail++] = byte;
  145. }
  146. /* reads the status of the device */
  147. static void read_status(struct denali_nand_info *denali)
  148. {
  149. uint32_t cmd = 0x0;
  150. /* initialize the data buffer to store status */
  151. reset_buf(denali);
  152. /* initiate a device status read */
  153. cmd = MODE_11 | BANK(denali->flash_bank);
  154. index_addr(denali, cmd | COMMAND_CYCLE, 0x70);
  155. denali_write32(cmd | STATUS_CYCLE, denali->flash_mem);
  156. /* update buffer with status value */
  157. write_byte_to_buf(denali, ioread32(denali->flash_mem + 0x10));
  158. #if DEBUG_DENALI
  159. printk("device reporting status value of 0x%2x\n", denali->buf.buf[0]);
  160. #endif
  161. }
  162. /* resets a specific device connected to the core */
  163. static void reset_bank(struct denali_nand_info *denali)
  164. {
  165. uint32_t irq_status = 0;
  166. uint32_t irq_mask = reset_complete[denali->flash_bank] |
  167. operation_timeout[denali->flash_bank];
  168. int bank = 0;
  169. clear_interrupts(denali);
  170. bank = device_reset_banks[denali->flash_bank];
  171. denali_write32(bank, denali->flash_reg + DEVICE_RESET);
  172. irq_status = wait_for_irq(denali, irq_mask);
  173. if (irq_status & operation_timeout[denali->flash_bank])
  174. {
  175. printk(KERN_ERR "reset bank failed.\n");
  176. }
  177. }
  178. /* Reset the flash controller */
  179. static uint16_t NAND_Flash_Reset(struct denali_nand_info *denali)
  180. {
  181. uint32_t i;
  182. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  183. __FILE__, __LINE__, __func__);
  184. for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++)
  185. denali_write32(reset_complete[i] | operation_timeout[i],
  186. denali->flash_reg + intr_status_addresses[i]);
  187. for (i = 0 ; i < LLD_MAX_FLASH_BANKS; i++) {
  188. denali_write32(device_reset_banks[i], denali->flash_reg + DEVICE_RESET);
  189. while (!(ioread32(denali->flash_reg + intr_status_addresses[i]) &
  190. (reset_complete[i] | operation_timeout[i])))
  191. ;
  192. if (ioread32(denali->flash_reg + intr_status_addresses[i]) &
  193. operation_timeout[i])
  194. nand_dbg_print(NAND_DBG_WARN,
  195. "NAND Reset operation timed out on bank %d\n", i);
  196. }
  197. for (i = 0; i < LLD_MAX_FLASH_BANKS; i++)
  198. denali_write32(reset_complete[i] | operation_timeout[i],
  199. denali->flash_reg + intr_status_addresses[i]);
  200. return PASS;
  201. }
  202. /* this routine calculates the ONFI timing values for a given mode and programs
  203. * the clocking register accordingly. The mode is determined by the get_onfi_nand_para
  204. routine.
  205. */
  206. static void NAND_ONFi_Timing_Mode(struct denali_nand_info *denali, uint16_t mode)
  207. {
  208. uint16_t Trea[6] = {40, 30, 25, 20, 20, 16};
  209. uint16_t Trp[6] = {50, 25, 17, 15, 12, 10};
  210. uint16_t Treh[6] = {30, 15, 15, 10, 10, 7};
  211. uint16_t Trc[6] = {100, 50, 35, 30, 25, 20};
  212. uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15};
  213. uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5};
  214. uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25};
  215. uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70};
  216. uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100};
  217. uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100};
  218. uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60};
  219. uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15};
  220. uint16_t TclsRising = 1;
  221. uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid;
  222. uint16_t dv_window = 0;
  223. uint16_t en_lo, en_hi;
  224. uint16_t acc_clks;
  225. uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt;
  226. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  227. __FILE__, __LINE__, __func__);
  228. en_lo = CEIL_DIV(Trp[mode], CLK_X);
  229. en_hi = CEIL_DIV(Treh[mode], CLK_X);
  230. #if ONFI_BLOOM_TIME
  231. if ((en_hi * CLK_X) < (Treh[mode] + 2))
  232. en_hi++;
  233. #endif
  234. if ((en_lo + en_hi) * CLK_X < Trc[mode])
  235. en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X);
  236. if ((en_lo + en_hi) < CLK_MULTI)
  237. en_lo += CLK_MULTI - en_lo - en_hi;
  238. while (dv_window < 8) {
  239. data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode];
  240. data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode];
  241. data_invalid =
  242. data_invalid_rhoh <
  243. data_invalid_rloh ? data_invalid_rhoh : data_invalid_rloh;
  244. dv_window = data_invalid - Trea[mode];
  245. if (dv_window < 8)
  246. en_lo++;
  247. }
  248. acc_clks = CEIL_DIV(Trea[mode], CLK_X);
  249. while (((acc_clks * CLK_X) - Trea[mode]) < 3)
  250. acc_clks++;
  251. if ((data_invalid - acc_clks * CLK_X) < 2)
  252. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d: Warning!\n",
  253. __FILE__, __LINE__);
  254. addr_2_data = CEIL_DIV(Tadl[mode], CLK_X);
  255. re_2_we = CEIL_DIV(Trhw[mode], CLK_X);
  256. re_2_re = CEIL_DIV(Trhz[mode], CLK_X);
  257. we_2_re = CEIL_DIV(Twhr[mode], CLK_X);
  258. cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X);
  259. if (!TclsRising)
  260. cs_cnt = CEIL_DIV(Tcs[mode], CLK_X);
  261. if (cs_cnt == 0)
  262. cs_cnt = 1;
  263. if (Tcea[mode]) {
  264. while (((cs_cnt * CLK_X) + Trea[mode]) < Tcea[mode])
  265. cs_cnt++;
  266. }
  267. #if MODE5_WORKAROUND
  268. if (mode == 5)
  269. acc_clks = 5;
  270. #endif
  271. /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */
  272. if ((ioread32(denali->flash_reg + MANUFACTURER_ID) == 0) &&
  273. (ioread32(denali->flash_reg + DEVICE_ID) == 0x88))
  274. acc_clks = 6;
  275. denali_write32(acc_clks, denali->flash_reg + ACC_CLKS);
  276. denali_write32(re_2_we, denali->flash_reg + RE_2_WE);
  277. denali_write32(re_2_re, denali->flash_reg + RE_2_RE);
  278. denali_write32(we_2_re, denali->flash_reg + WE_2_RE);
  279. denali_write32(addr_2_data, denali->flash_reg + ADDR_2_DATA);
  280. denali_write32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT);
  281. denali_write32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT);
  282. denali_write32(cs_cnt, denali->flash_reg + CS_SETUP_CNT);
  283. }
  284. /* configures the initial ECC settings for the controller */
  285. static void set_ecc_config(struct denali_nand_info *denali)
  286. {
  287. #if SUPPORT_8BITECC
  288. if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) < 4096) ||
  289. (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) <= 128))
  290. denali_write32(8, denali->flash_reg + ECC_CORRECTION);
  291. #endif
  292. if ((ioread32(denali->flash_reg + ECC_CORRECTION) & ECC_CORRECTION__VALUE)
  293. == 1) {
  294. denali->dev_info.wECCBytesPerSector = 4;
  295. denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected;
  296. denali->dev_info.wNumPageSpareFlag =
  297. denali->dev_info.wPageSpareSize -
  298. denali->dev_info.wPageDataSize /
  299. (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) *
  300. denali->dev_info.wECCBytesPerSector
  301. - denali->dev_info.wSpareSkipBytes;
  302. } else {
  303. denali->dev_info.wECCBytesPerSector =
  304. (ioread32(denali->flash_reg + ECC_CORRECTION) &
  305. ECC_CORRECTION__VALUE) * 13 / 8;
  306. if ((denali->dev_info.wECCBytesPerSector) % 2 == 0)
  307. denali->dev_info.wECCBytesPerSector += 2;
  308. else
  309. denali->dev_info.wECCBytesPerSector += 1;
  310. denali->dev_info.wECCBytesPerSector *= denali->dev_info.wDevicesConnected;
  311. denali->dev_info.wNumPageSpareFlag = denali->dev_info.wPageSpareSize -
  312. denali->dev_info.wPageDataSize /
  313. (ECC_SECTOR_SIZE * denali->dev_info.wDevicesConnected) *
  314. denali->dev_info.wECCBytesPerSector
  315. - denali->dev_info.wSpareSkipBytes;
  316. }
  317. }
  318. /* queries the NAND device to see what ONFI modes it supports. */
  319. static uint16_t get_onfi_nand_para(struct denali_nand_info *denali)
  320. {
  321. int i;
  322. uint16_t blks_lun_l, blks_lun_h, n_of_luns;
  323. uint32_t blockperlun, id;
  324. denali_write32(DEVICE_RESET__BANK0, denali->flash_reg + DEVICE_RESET);
  325. while (!((ioread32(denali->flash_reg + INTR_STATUS0) &
  326. INTR_STATUS0__RST_COMP) |
  327. (ioread32(denali->flash_reg + INTR_STATUS0) &
  328. INTR_STATUS0__TIME_OUT)))
  329. ;
  330. if (ioread32(denali->flash_reg + INTR_STATUS0) & INTR_STATUS0__RST_COMP) {
  331. denali_write32(DEVICE_RESET__BANK1, denali->flash_reg + DEVICE_RESET);
  332. while (!((ioread32(denali->flash_reg + INTR_STATUS1) &
  333. INTR_STATUS1__RST_COMP) |
  334. (ioread32(denali->flash_reg + INTR_STATUS1) &
  335. INTR_STATUS1__TIME_OUT)))
  336. ;
  337. if (ioread32(denali->flash_reg + INTR_STATUS1) &
  338. INTR_STATUS1__RST_COMP) {
  339. denali_write32(DEVICE_RESET__BANK2,
  340. denali->flash_reg + DEVICE_RESET);
  341. while (!((ioread32(denali->flash_reg + INTR_STATUS2) &
  342. INTR_STATUS2__RST_COMP) |
  343. (ioread32(denali->flash_reg + INTR_STATUS2) &
  344. INTR_STATUS2__TIME_OUT)))
  345. ;
  346. if (ioread32(denali->flash_reg + INTR_STATUS2) &
  347. INTR_STATUS2__RST_COMP) {
  348. denali_write32(DEVICE_RESET__BANK3,
  349. denali->flash_reg + DEVICE_RESET);
  350. while (!((ioread32(denali->flash_reg + INTR_STATUS3) &
  351. INTR_STATUS3__RST_COMP) |
  352. (ioread32(denali->flash_reg + INTR_STATUS3) &
  353. INTR_STATUS3__TIME_OUT)))
  354. ;
  355. } else {
  356. printk(KERN_ERR "Getting a time out for bank 2!\n");
  357. }
  358. } else {
  359. printk(KERN_ERR "Getting a time out for bank 1!\n");
  360. }
  361. }
  362. denali_write32(INTR_STATUS0__TIME_OUT, denali->flash_reg + INTR_STATUS0);
  363. denali_write32(INTR_STATUS1__TIME_OUT, denali->flash_reg + INTR_STATUS1);
  364. denali_write32(INTR_STATUS2__TIME_OUT, denali->flash_reg + INTR_STATUS2);
  365. denali_write32(INTR_STATUS3__TIME_OUT, denali->flash_reg + INTR_STATUS3);
  366. denali->dev_info.wONFIDevFeatures =
  367. ioread32(denali->flash_reg + ONFI_DEVICE_FEATURES);
  368. denali->dev_info.wONFIOptCommands =
  369. ioread32(denali->flash_reg + ONFI_OPTIONAL_COMMANDS);
  370. denali->dev_info.wONFITimingMode =
  371. ioread32(denali->flash_reg + ONFI_TIMING_MODE);
  372. denali->dev_info.wONFIPgmCacheTimingMode =
  373. ioread32(denali->flash_reg + ONFI_PGM_CACHE_TIMING_MODE);
  374. n_of_luns = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
  375. ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS;
  376. blks_lun_l = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L);
  377. blks_lun_h = ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U);
  378. blockperlun = (blks_lun_h << 16) | blks_lun_l;
  379. denali->dev_info.wTotalBlocks = n_of_luns * blockperlun;
  380. if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) &
  381. ONFI_TIMING_MODE__VALUE))
  382. return FAIL;
  383. for (i = 5; i > 0; i--) {
  384. if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) & (0x01 << i))
  385. break;
  386. }
  387. NAND_ONFi_Timing_Mode(denali, i);
  388. index_addr(denali, MODE_11 | 0, 0x90);
  389. index_addr(denali, MODE_11 | 1, 0);
  390. for (i = 0; i < 3; i++)
  391. index_addr_read_data(denali, MODE_11 | 2, &id);
  392. nand_dbg_print(NAND_DBG_DEBUG, "3rd ID: 0x%x\n", id);
  393. denali->dev_info.MLCDevice = id & 0x0C;
  394. /* By now, all the ONFI devices we know support the page cache */
  395. /* rw feature. So here we enable the pipeline_rw_ahead feature */
  396. /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */
  397. /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */
  398. return PASS;
  399. }
  400. static void get_samsung_nand_para(struct denali_nand_info *denali)
  401. {
  402. uint8_t no_of_planes;
  403. uint32_t blk_size;
  404. uint64_t plane_size, capacity;
  405. uint32_t id_bytes[5];
  406. int i;
  407. index_addr(denali, (uint32_t)(MODE_11 | 0), 0x90);
  408. index_addr(denali, (uint32_t)(MODE_11 | 1), 0);
  409. for (i = 0; i < 5; i++)
  410. index_addr_read_data(denali, (uint32_t)(MODE_11 | 2), &id_bytes[i]);
  411. nand_dbg_print(NAND_DBG_DEBUG,
  412. "ID bytes: 0x%x, 0x%x, 0x%x, 0x%x, 0x%x\n",
  413. id_bytes[0], id_bytes[1], id_bytes[2],
  414. id_bytes[3], id_bytes[4]);
  415. if ((id_bytes[1] & 0xff) == 0xd3) { /* Samsung K9WAG08U1A */
  416. /* Set timing register values according to datasheet */
  417. denali_write32(5, denali->flash_reg + ACC_CLKS);
  418. denali_write32(20, denali->flash_reg + RE_2_WE);
  419. denali_write32(12, denali->flash_reg + WE_2_RE);
  420. denali_write32(14, denali->flash_reg + ADDR_2_DATA);
  421. denali_write32(3, denali->flash_reg + RDWR_EN_LO_CNT);
  422. denali_write32(2, denali->flash_reg + RDWR_EN_HI_CNT);
  423. denali_write32(2, denali->flash_reg + CS_SETUP_CNT);
  424. }
  425. no_of_planes = 1 << ((id_bytes[4] & 0x0c) >> 2);
  426. plane_size = (uint64_t)64 << ((id_bytes[4] & 0x70) >> 4);
  427. blk_size = 64 << ((ioread32(denali->flash_reg + DEVICE_PARAM_1) & 0x30) >> 4);
  428. capacity = (uint64_t)128 * plane_size * no_of_planes;
  429. do_div(capacity, blk_size);
  430. denali->dev_info.wTotalBlocks = capacity;
  431. }
  432. static void get_toshiba_nand_para(struct denali_nand_info *denali)
  433. {
  434. void __iomem *scratch_reg;
  435. uint32_t tmp;
  436. /* Workaround to fix a controller bug which reports a wrong */
  437. /* spare area size for some kind of Toshiba NAND device */
  438. if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) &&
  439. (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) {
  440. denali_write32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
  441. tmp = ioread32(denali->flash_reg + DEVICES_CONNECTED) *
  442. ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
  443. denali_write32(tmp, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
  444. #if SUPPORT_15BITECC
  445. denali_write32(15, denali->flash_reg + ECC_CORRECTION);
  446. #elif SUPPORT_8BITECC
  447. denali_write32(8, denali->flash_reg + ECC_CORRECTION);
  448. #endif
  449. }
  450. /* As Toshiba NAND can not provide it's block number, */
  451. /* so here we need user to provide the correct block */
  452. /* number in a scratch register before the Linux NAND */
  453. /* driver is loaded. If no valid value found in the scratch */
  454. /* register, then we use default block number value */
  455. scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
  456. if (!scratch_reg) {
  457. printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
  458. __FILE__, __LINE__);
  459. denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
  460. } else {
  461. nand_dbg_print(NAND_DBG_WARN,
  462. "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
  463. denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg);
  464. if (denali->dev_info.wTotalBlocks < 512)
  465. denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
  466. iounmap(scratch_reg);
  467. }
  468. }
  469. static void get_hynix_nand_para(struct denali_nand_info *denali)
  470. {
  471. void __iomem *scratch_reg;
  472. uint32_t main_size, spare_size;
  473. switch (denali->dev_info.wDeviceID) {
  474. case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */
  475. case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */
  476. denali_write32(128, denali->flash_reg + PAGES_PER_BLOCK);
  477. denali_write32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
  478. denali_write32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
  479. main_size = 4096 * ioread32(denali->flash_reg + DEVICES_CONNECTED);
  480. spare_size = 224 * ioread32(denali->flash_reg + DEVICES_CONNECTED);
  481. denali_write32(main_size, denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
  482. denali_write32(spare_size, denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
  483. denali_write32(0, denali->flash_reg + DEVICE_WIDTH);
  484. #if SUPPORT_15BITECC
  485. denali_write32(15, denali->flash_reg + ECC_CORRECTION);
  486. #elif SUPPORT_8BITECC
  487. denali_write32(8, denali->flash_reg + ECC_CORRECTION);
  488. #endif
  489. denali->dev_info.MLCDevice = 1;
  490. break;
  491. default:
  492. nand_dbg_print(NAND_DBG_WARN,
  493. "Spectra: Unknown Hynix NAND (Device ID: 0x%x)."
  494. "Will use default parameter values instead.\n",
  495. denali->dev_info.wDeviceID);
  496. }
  497. scratch_reg = ioremap_nocache(SCRATCH_REG_ADDR, SCRATCH_REG_SIZE);
  498. if (!scratch_reg) {
  499. printk(KERN_ERR "Spectra: ioremap failed in %s, Line %d",
  500. __FILE__, __LINE__);
  501. denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
  502. } else {
  503. nand_dbg_print(NAND_DBG_WARN,
  504. "Spectra: ioremap reg address: 0x%p\n", scratch_reg);
  505. denali->dev_info.wTotalBlocks = 1 << ioread8(scratch_reg);
  506. if (denali->dev_info.wTotalBlocks < 512)
  507. denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
  508. iounmap(scratch_reg);
  509. }
  510. }
  511. /* determines how many NAND chips are connected to the controller. Note for
  512. Intel CE4100 devices we don't support more than one device.
  513. */
  514. static void find_valid_banks(struct denali_nand_info *denali)
  515. {
  516. uint32_t id[LLD_MAX_FLASH_BANKS];
  517. int i;
  518. denali->total_used_banks = 1;
  519. for (i = 0; i < LLD_MAX_FLASH_BANKS; i++) {
  520. index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 0), 0x90);
  521. index_addr(denali, (uint32_t)(MODE_11 | (i << 24) | 1), 0);
  522. index_addr_read_data(denali, (uint32_t)(MODE_11 | (i << 24) | 2), &id[i]);
  523. nand_dbg_print(NAND_DBG_DEBUG,
  524. "Return 1st ID for bank[%d]: %x\n", i, id[i]);
  525. if (i == 0) {
  526. if (!(id[i] & 0x0ff))
  527. break; /* WTF? */
  528. } else {
  529. if ((id[i] & 0x0ff) == (id[0] & 0x0ff))
  530. denali->total_used_banks++;
  531. else
  532. break;
  533. }
  534. }
  535. if (denali->platform == INTEL_CE4100)
  536. {
  537. /* Platform limitations of the CE4100 device limit
  538. * users to a single chip solution for NAND.
  539. * Multichip support is not enabled.
  540. */
  541. if (denali->total_used_banks != 1)
  542. {
  543. printk(KERN_ERR "Sorry, Intel CE4100 only supports "
  544. "a single NAND device.\n");
  545. BUG();
  546. }
  547. }
  548. nand_dbg_print(NAND_DBG_DEBUG,
  549. "denali->total_used_banks: %d\n", denali->total_used_banks);
  550. }
  551. static void detect_partition_feature(struct denali_nand_info *denali)
  552. {
  553. if (ioread32(denali->flash_reg + FEATURES) & FEATURES__PARTITION) {
  554. if ((ioread32(denali->flash_reg + PERM_SRC_ID_1) &
  555. PERM_SRC_ID_1__SRCID) == SPECTRA_PARTITION_ID) {
  556. denali->dev_info.wSpectraStartBlock =
  557. ((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
  558. MIN_MAX_BANK_1__MIN_VALUE) *
  559. denali->dev_info.wTotalBlocks)
  560. +
  561. (ioread32(denali->flash_reg + MIN_BLK_ADDR_1) &
  562. MIN_BLK_ADDR_1__VALUE);
  563. denali->dev_info.wSpectraEndBlock =
  564. (((ioread32(denali->flash_reg + MIN_MAX_BANK_1) &
  565. MIN_MAX_BANK_1__MAX_VALUE) >> 2) *
  566. denali->dev_info.wTotalBlocks)
  567. +
  568. (ioread32(denali->flash_reg + MAX_BLK_ADDR_1) &
  569. MAX_BLK_ADDR_1__VALUE);
  570. denali->dev_info.wTotalBlocks *= denali->total_used_banks;
  571. if (denali->dev_info.wSpectraEndBlock >=
  572. denali->dev_info.wTotalBlocks) {
  573. denali->dev_info.wSpectraEndBlock =
  574. denali->dev_info.wTotalBlocks - 1;
  575. }
  576. denali->dev_info.wDataBlockNum =
  577. denali->dev_info.wSpectraEndBlock -
  578. denali->dev_info.wSpectraStartBlock + 1;
  579. } else {
  580. denali->dev_info.wTotalBlocks *= denali->total_used_banks;
  581. denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK;
  582. denali->dev_info.wSpectraEndBlock =
  583. denali->dev_info.wTotalBlocks - 1;
  584. denali->dev_info.wDataBlockNum =
  585. denali->dev_info.wSpectraEndBlock -
  586. denali->dev_info.wSpectraStartBlock + 1;
  587. }
  588. } else {
  589. denali->dev_info.wTotalBlocks *= denali->total_used_banks;
  590. denali->dev_info.wSpectraStartBlock = SPECTRA_START_BLOCK;
  591. denali->dev_info.wSpectraEndBlock = denali->dev_info.wTotalBlocks - 1;
  592. denali->dev_info.wDataBlockNum =
  593. denali->dev_info.wSpectraEndBlock -
  594. denali->dev_info.wSpectraStartBlock + 1;
  595. }
  596. }
  597. static void dump_device_info(struct denali_nand_info *denali)
  598. {
  599. nand_dbg_print(NAND_DBG_DEBUG, "denali->dev_info:\n");
  600. nand_dbg_print(NAND_DBG_DEBUG, "DeviceMaker: 0x%x\n",
  601. denali->dev_info.wDeviceMaker);
  602. nand_dbg_print(NAND_DBG_DEBUG, "DeviceID: 0x%x\n",
  603. denali->dev_info.wDeviceID);
  604. nand_dbg_print(NAND_DBG_DEBUG, "DeviceType: 0x%x\n",
  605. denali->dev_info.wDeviceType);
  606. nand_dbg_print(NAND_DBG_DEBUG, "SpectraStartBlock: %d\n",
  607. denali->dev_info.wSpectraStartBlock);
  608. nand_dbg_print(NAND_DBG_DEBUG, "SpectraEndBlock: %d\n",
  609. denali->dev_info.wSpectraEndBlock);
  610. nand_dbg_print(NAND_DBG_DEBUG, "TotalBlocks: %d\n",
  611. denali->dev_info.wTotalBlocks);
  612. nand_dbg_print(NAND_DBG_DEBUG, "PagesPerBlock: %d\n",
  613. denali->dev_info.wPagesPerBlock);
  614. nand_dbg_print(NAND_DBG_DEBUG, "PageSize: %d\n",
  615. denali->dev_info.wPageSize);
  616. nand_dbg_print(NAND_DBG_DEBUG, "PageDataSize: %d\n",
  617. denali->dev_info.wPageDataSize);
  618. nand_dbg_print(NAND_DBG_DEBUG, "PageSpareSize: %d\n",
  619. denali->dev_info.wPageSpareSize);
  620. nand_dbg_print(NAND_DBG_DEBUG, "NumPageSpareFlag: %d\n",
  621. denali->dev_info.wNumPageSpareFlag);
  622. nand_dbg_print(NAND_DBG_DEBUG, "ECCBytesPerSector: %d\n",
  623. denali->dev_info.wECCBytesPerSector);
  624. nand_dbg_print(NAND_DBG_DEBUG, "BlockSize: %d\n",
  625. denali->dev_info.wBlockSize);
  626. nand_dbg_print(NAND_DBG_DEBUG, "BlockDataSize: %d\n",
  627. denali->dev_info.wBlockDataSize);
  628. nand_dbg_print(NAND_DBG_DEBUG, "DataBlockNum: %d\n",
  629. denali->dev_info.wDataBlockNum);
  630. nand_dbg_print(NAND_DBG_DEBUG, "PlaneNum: %d\n",
  631. denali->dev_info.bPlaneNum);
  632. nand_dbg_print(NAND_DBG_DEBUG, "DeviceMainAreaSize: %d\n",
  633. denali->dev_info.wDeviceMainAreaSize);
  634. nand_dbg_print(NAND_DBG_DEBUG, "DeviceSpareAreaSize: %d\n",
  635. denali->dev_info.wDeviceSpareAreaSize);
  636. nand_dbg_print(NAND_DBG_DEBUG, "DevicesConnected: %d\n",
  637. denali->dev_info.wDevicesConnected);
  638. nand_dbg_print(NAND_DBG_DEBUG, "DeviceWidth: %d\n",
  639. denali->dev_info.wDeviceWidth);
  640. nand_dbg_print(NAND_DBG_DEBUG, "HWRevision: 0x%x\n",
  641. denali->dev_info.wHWRevision);
  642. nand_dbg_print(NAND_DBG_DEBUG, "HWFeatures: 0x%x\n",
  643. denali->dev_info.wHWFeatures);
  644. nand_dbg_print(NAND_DBG_DEBUG, "ONFIDevFeatures: 0x%x\n",
  645. denali->dev_info.wONFIDevFeatures);
  646. nand_dbg_print(NAND_DBG_DEBUG, "ONFIOptCommands: 0x%x\n",
  647. denali->dev_info.wONFIOptCommands);
  648. nand_dbg_print(NAND_DBG_DEBUG, "ONFITimingMode: 0x%x\n",
  649. denali->dev_info.wONFITimingMode);
  650. nand_dbg_print(NAND_DBG_DEBUG, "ONFIPgmCacheTimingMode: 0x%x\n",
  651. denali->dev_info.wONFIPgmCacheTimingMode);
  652. nand_dbg_print(NAND_DBG_DEBUG, "MLCDevice: %s\n",
  653. denali->dev_info.MLCDevice ? "Yes" : "No");
  654. nand_dbg_print(NAND_DBG_DEBUG, "SpareSkipBytes: %d\n",
  655. denali->dev_info.wSpareSkipBytes);
  656. nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageNumber: %d\n",
  657. denali->dev_info.nBitsInPageNumber);
  658. nand_dbg_print(NAND_DBG_DEBUG, "BitsInPageDataSize: %d\n",
  659. denali->dev_info.nBitsInPageDataSize);
  660. nand_dbg_print(NAND_DBG_DEBUG, "BitsInBlockDataSize: %d\n",
  661. denali->dev_info.nBitsInBlockDataSize);
  662. }
  663. static uint16_t NAND_Read_Device_ID(struct denali_nand_info *denali)
  664. {
  665. uint16_t status = PASS;
  666. uint8_t no_of_planes;
  667. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  668. __FILE__, __LINE__, __func__);
  669. denali->dev_info.wDeviceMaker = ioread32(denali->flash_reg + MANUFACTURER_ID);
  670. denali->dev_info.wDeviceID = ioread32(denali->flash_reg + DEVICE_ID);
  671. denali->dev_info.bDeviceParam0 = ioread32(denali->flash_reg + DEVICE_PARAM_0);
  672. denali->dev_info.bDeviceParam1 = ioread32(denali->flash_reg + DEVICE_PARAM_1);
  673. denali->dev_info.bDeviceParam2 = ioread32(denali->flash_reg + DEVICE_PARAM_2);
  674. denali->dev_info.MLCDevice = ioread32(denali->flash_reg + DEVICE_PARAM_0) & 0x0c;
  675. if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) &
  676. ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */
  677. if (FAIL == get_onfi_nand_para(denali))
  678. return FAIL;
  679. } else if (denali->dev_info.wDeviceMaker == 0xEC) { /* Samsung NAND */
  680. get_samsung_nand_para(denali);
  681. } else if (denali->dev_info.wDeviceMaker == 0x98) { /* Toshiba NAND */
  682. get_toshiba_nand_para(denali);
  683. } else if (denali->dev_info.wDeviceMaker == 0xAD) { /* Hynix NAND */
  684. get_hynix_nand_para(denali);
  685. } else {
  686. denali->dev_info.wTotalBlocks = GLOB_HWCTL_DEFAULT_BLKS;
  687. }
  688. nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
  689. "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
  690. "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
  691. "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
  692. ioread32(denali->flash_reg + ACC_CLKS),
  693. ioread32(denali->flash_reg + RE_2_WE),
  694. ioread32(denali->flash_reg + WE_2_RE),
  695. ioread32(denali->flash_reg + ADDR_2_DATA),
  696. ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
  697. ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
  698. ioread32(denali->flash_reg + CS_SETUP_CNT));
  699. denali->dev_info.wHWRevision = ioread32(denali->flash_reg + REVISION);
  700. denali->dev_info.wHWFeatures = ioread32(denali->flash_reg + FEATURES);
  701. denali->dev_info.wDeviceMainAreaSize =
  702. ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE);
  703. denali->dev_info.wDeviceSpareAreaSize =
  704. ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE);
  705. denali->dev_info.wPageDataSize =
  706. ioread32(denali->flash_reg + LOGICAL_PAGE_DATA_SIZE);
  707. /* Note: When using the Micon 4K NAND device, the controller will report
  708. * Page Spare Size as 216 bytes. But Micron's Spec say it's 218 bytes.
  709. * And if force set it to 218 bytes, the controller can not work
  710. * correctly. So just let it be. But keep in mind that this bug may
  711. * cause
  712. * other problems in future. - Yunpeng 2008-10-10
  713. */
  714. denali->dev_info.wPageSpareSize =
  715. ioread32(denali->flash_reg + LOGICAL_PAGE_SPARE_SIZE);
  716. denali->dev_info.wPagesPerBlock = ioread32(denali->flash_reg + PAGES_PER_BLOCK);
  717. denali->dev_info.wPageSize =
  718. denali->dev_info.wPageDataSize + denali->dev_info.wPageSpareSize;
  719. denali->dev_info.wBlockSize =
  720. denali->dev_info.wPageSize * denali->dev_info.wPagesPerBlock;
  721. denali->dev_info.wBlockDataSize =
  722. denali->dev_info.wPagesPerBlock * denali->dev_info.wPageDataSize;
  723. denali->dev_info.wDeviceWidth = ioread32(denali->flash_reg + DEVICE_WIDTH);
  724. denali->dev_info.wDeviceType =
  725. ((ioread32(denali->flash_reg + DEVICE_WIDTH) > 0) ? 16 : 8);
  726. denali->dev_info.wDevicesConnected = ioread32(denali->flash_reg + DEVICES_CONNECTED);
  727. denali->dev_info.wSpareSkipBytes =
  728. ioread32(denali->flash_reg + SPARE_AREA_SKIP_BYTES) *
  729. denali->dev_info.wDevicesConnected;
  730. denali->dev_info.nBitsInPageNumber =
  731. ilog2(denali->dev_info.wPagesPerBlock);
  732. denali->dev_info.nBitsInPageDataSize =
  733. ilog2(denali->dev_info.wPageDataSize);
  734. denali->dev_info.nBitsInBlockDataSize =
  735. ilog2(denali->dev_info.wBlockDataSize);
  736. set_ecc_config(denali);
  737. no_of_planes = ioread32(denali->flash_reg + NUMBER_OF_PLANES) &
  738. NUMBER_OF_PLANES__VALUE;
  739. switch (no_of_planes) {
  740. case 0:
  741. case 1:
  742. case 3:
  743. case 7:
  744. denali->dev_info.bPlaneNum = no_of_planes + 1;
  745. break;
  746. default:
  747. status = FAIL;
  748. break;
  749. }
  750. find_valid_banks(denali);
  751. detect_partition_feature(denali);
  752. dump_device_info(denali);
  753. /* If the user specified to override the default timings
  754. * with a specific ONFI mode, we apply those changes here.
  755. */
  756. if (onfi_timing_mode != NAND_DEFAULT_TIMINGS)
  757. {
  758. NAND_ONFi_Timing_Mode(denali, onfi_timing_mode);
  759. }
  760. return status;
  761. }
  762. static void NAND_LLD_Enable_Disable_Interrupts(struct denali_nand_info *denali,
  763. uint16_t INT_ENABLE)
  764. {
  765. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  766. __FILE__, __LINE__, __func__);
  767. if (INT_ENABLE)
  768. denali_write32(1, denali->flash_reg + GLOBAL_INT_ENABLE);
  769. else
  770. denali_write32(0, denali->flash_reg + GLOBAL_INT_ENABLE);
  771. }
  772. /* validation function to verify that the controlling software is making
  773. a valid request
  774. */
  775. static inline bool is_flash_bank_valid(int flash_bank)
  776. {
  777. return (flash_bank >= 0 && flash_bank < 4);
  778. }
  779. static void denali_irq_init(struct denali_nand_info *denali)
  780. {
  781. uint32_t int_mask = 0;
  782. /* Disable global interrupts */
  783. NAND_LLD_Enable_Disable_Interrupts(denali, false);
  784. int_mask = DENALI_IRQ_ALL;
  785. /* Clear all status bits */
  786. denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS0);
  787. denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS1);
  788. denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS2);
  789. denali_write32(0xFFFF, denali->flash_reg + INTR_STATUS3);
  790. denali_irq_enable(denali, int_mask);
  791. }
  792. static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali)
  793. {
  794. NAND_LLD_Enable_Disable_Interrupts(denali, false);
  795. free_irq(irqnum, denali);
  796. }
  797. static void denali_irq_enable(struct denali_nand_info *denali, uint32_t int_mask)
  798. {
  799. denali_write32(int_mask, denali->flash_reg + INTR_EN0);
  800. denali_write32(int_mask, denali->flash_reg + INTR_EN1);
  801. denali_write32(int_mask, denali->flash_reg + INTR_EN2);
  802. denali_write32(int_mask, denali->flash_reg + INTR_EN3);
  803. }
  804. /* This function only returns when an interrupt that this driver cares about
  805. * occurs. This is to reduce the overhead of servicing interrupts
  806. */
  807. static inline uint32_t denali_irq_detected(struct denali_nand_info *denali)
  808. {
  809. return (read_interrupt_status(denali) & DENALI_IRQ_ALL);
  810. }
  811. /* Interrupts are cleared by writing a 1 to the appropriate status bit */
  812. static inline void clear_interrupt(struct denali_nand_info *denali, uint32_t irq_mask)
  813. {
  814. uint32_t intr_status_reg = 0;
  815. intr_status_reg = intr_status_addresses[denali->flash_bank];
  816. denali_write32(irq_mask, denali->flash_reg + intr_status_reg);
  817. }
  818. static void clear_interrupts(struct denali_nand_info *denali)
  819. {
  820. uint32_t status = 0x0;
  821. spin_lock_irq(&denali->irq_lock);
  822. status = read_interrupt_status(denali);
  823. #if DEBUG_DENALI
  824. denali->irq_debug_array[denali->idx++] = 0x30000000 | status;
  825. denali->idx %= 32;
  826. #endif
  827. denali->irq_status = 0x0;
  828. spin_unlock_irq(&denali->irq_lock);
  829. }
  830. static uint32_t read_interrupt_status(struct denali_nand_info *denali)
  831. {
  832. uint32_t intr_status_reg = 0;
  833. intr_status_reg = intr_status_addresses[denali->flash_bank];
  834. return ioread32(denali->flash_reg + intr_status_reg);
  835. }
  836. #if DEBUG_DENALI
  837. static void print_irq_log(struct denali_nand_info *denali)
  838. {
  839. int i = 0;
  840. printk("ISR debug log index = %X\n", denali->idx);
  841. for (i = 0; i < 32; i++)
  842. {
  843. printk("%08X: %08X\n", i, denali->irq_debug_array[i]);
  844. }
  845. }
  846. #endif
  847. /* This is the interrupt service routine. It handles all interrupts
  848. * sent to this device. Note that on CE4100, this is a shared
  849. * interrupt.
  850. */
  851. static irqreturn_t denali_isr(int irq, void *dev_id)
  852. {
  853. struct denali_nand_info *denali = dev_id;
  854. uint32_t irq_status = 0x0;
  855. irqreturn_t result = IRQ_NONE;
  856. spin_lock(&denali->irq_lock);
  857. /* check to see if a valid NAND chip has
  858. * been selected.
  859. */
  860. if (is_flash_bank_valid(denali->flash_bank))
  861. {
  862. /* check to see if controller generated
  863. * the interrupt, since this is a shared interrupt */
  864. if ((irq_status = denali_irq_detected(denali)) != 0)
  865. {
  866. #if DEBUG_DENALI
  867. denali->irq_debug_array[denali->idx++] = 0x10000000 | irq_status;
  868. denali->idx %= 32;
  869. printk("IRQ status = 0x%04x\n", irq_status);
  870. #endif
  871. /* handle interrupt */
  872. /* first acknowledge it */
  873. clear_interrupt(denali, irq_status);
  874. /* store the status in the device context for someone
  875. to read */
  876. denali->irq_status |= irq_status;
  877. /* notify anyone who cares that it happened */
  878. complete(&denali->complete);
  879. /* tell the OS that we've handled this */
  880. result = IRQ_HANDLED;
  881. }
  882. }
  883. spin_unlock(&denali->irq_lock);
  884. return result;
  885. }
  886. #define BANK(x) ((x) << 24)
  887. static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask)
  888. {
  889. unsigned long comp_res = 0;
  890. uint32_t intr_status = 0;
  891. bool retry = false;
  892. unsigned long timeout = msecs_to_jiffies(1000);
  893. do
  894. {
  895. #if DEBUG_DENALI
  896. printk("waiting for 0x%x\n", irq_mask);
  897. #endif
  898. comp_res = wait_for_completion_timeout(&denali->complete, timeout);
  899. spin_lock_irq(&denali->irq_lock);
  900. intr_status = denali->irq_status;
  901. #if DEBUG_DENALI
  902. denali->irq_debug_array[denali->idx++] = 0x20000000 | (irq_mask << 16) | intr_status;
  903. denali->idx %= 32;
  904. #endif
  905. if (intr_status & irq_mask)
  906. {
  907. denali->irq_status &= ~irq_mask;
  908. spin_unlock_irq(&denali->irq_lock);
  909. #if DEBUG_DENALI
  910. if (retry) printk("status on retry = 0x%x\n", intr_status);
  911. #endif
  912. /* our interrupt was detected */
  913. break;
  914. }
  915. else
  916. {
  917. /* these are not the interrupts you are looking for -
  918. need to wait again */
  919. spin_unlock_irq(&denali->irq_lock);
  920. #if DEBUG_DENALI
  921. print_irq_log(denali);
  922. printk("received irq nobody cared: irq_status = 0x%x,"
  923. " irq_mask = 0x%x, timeout = %ld\n", intr_status, irq_mask, comp_res);
  924. #endif
  925. retry = true;
  926. }
  927. } while (comp_res != 0);
  928. if (comp_res == 0)
  929. {
  930. /* timeout */
  931. printk(KERN_ERR "timeout occurred, status = 0x%x, mask = 0x%x\n",
  932. intr_status, irq_mask);
  933. intr_status = 0;
  934. }
  935. return intr_status;
  936. }
  937. /* This helper function setups the registers for ECC and whether or not
  938. the spare area will be transfered. */
  939. static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en,
  940. bool transfer_spare)
  941. {
  942. int ecc_en_flag = 0, transfer_spare_flag = 0;
  943. /* set ECC, transfer spare bits if needed */
  944. ecc_en_flag = ecc_en ? ECC_ENABLE__FLAG : 0;
  945. transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0;
  946. /* Enable spare area/ECC per user's request. */
  947. denali_write32(ecc_en_flag, denali->flash_reg + ECC_ENABLE);
  948. denali_write32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG);
  949. }
  950. /* sends a pipeline command operation to the controller. See the Denali NAND
  951. controller's user guide for more information (section 4.2.3.6).
  952. */
  953. static int denali_send_pipeline_cmd(struct denali_nand_info *denali, bool ecc_en,
  954. bool transfer_spare, int access_type,
  955. int op)
  956. {
  957. int status = PASS;
  958. uint32_t addr = 0x0, cmd = 0x0, page_count = 1, irq_status = 0,
  959. irq_mask = 0;
  960. if (op == DENALI_READ) irq_mask = INTR_STATUS0__LOAD_COMP;
  961. else if (op == DENALI_WRITE) irq_mask = 0;
  962. else BUG();
  963. setup_ecc_for_xfer(denali, ecc_en, transfer_spare);
  964. #if DEBUG_DENALI
  965. spin_lock_irq(&denali->irq_lock);
  966. denali->irq_debug_array[denali->idx++] = 0x40000000 | ioread32(denali->flash_reg + ECC_ENABLE) | (access_type << 4);
  967. denali->idx %= 32;
  968. spin_unlock_irq(&denali->irq_lock);
  969. #endif
  970. /* clear interrupts */
  971. clear_interrupts(denali);
  972. addr = BANK(denali->flash_bank) | denali->page;
  973. if (op == DENALI_WRITE && access_type != SPARE_ACCESS)
  974. {
  975. cmd = MODE_01 | addr;
  976. denali_write32(cmd, denali->flash_mem);
  977. }
  978. else if (op == DENALI_WRITE && access_type == SPARE_ACCESS)
  979. {
  980. /* read spare area */
  981. cmd = MODE_10 | addr;
  982. index_addr(denali, (uint32_t)cmd, access_type);
  983. cmd = MODE_01 | addr;
  984. denali_write32(cmd, denali->flash_mem);
  985. }
  986. else if (op == DENALI_READ)
  987. {
  988. /* setup page read request for access type */
  989. cmd = MODE_10 | addr;
  990. index_addr(denali, (uint32_t)cmd, access_type);
  991. /* page 33 of the NAND controller spec indicates we should not
  992. use the pipeline commands in Spare area only mode. So we
  993. don't.
  994. */
  995. if (access_type == SPARE_ACCESS)
  996. {
  997. cmd = MODE_01 | addr;
  998. denali_write32(cmd, denali->flash_mem);
  999. }
  1000. else
  1001. {
  1002. index_addr(denali, (uint32_t)cmd, 0x2000 | op | page_count);
  1003. /* wait for command to be accepted
  1004. * can always use status0 bit as the mask is identical for each
  1005. * bank. */
  1006. irq_status = wait_for_irq(denali, irq_mask);
  1007. if (irq_status == 0)
  1008. {
  1009. printk(KERN_ERR "cmd, page, addr on timeout "
  1010. "(0x%x, 0x%x, 0x%x)\n", cmd, denali->page, addr);
  1011. status = FAIL;
  1012. }
  1013. else
  1014. {
  1015. cmd = MODE_01 | addr;
  1016. denali_write32(cmd, denali->flash_mem);
  1017. }
  1018. }
  1019. }
  1020. return status;
  1021. }
  1022. /* helper function that simply writes a buffer to the flash */
  1023. static int write_data_to_flash_mem(struct denali_nand_info *denali, const uint8_t *buf,
  1024. int len)
  1025. {
  1026. uint32_t i = 0, *buf32;
  1027. /* verify that the len is a multiple of 4. see comment in
  1028. * read_data_from_flash_mem() */
  1029. BUG_ON((len % 4) != 0);
  1030. /* write the data to the flash memory */
  1031. buf32 = (uint32_t *)buf;
  1032. for (i = 0; i < len / 4; i++)
  1033. {
  1034. denali_write32(*buf32++, denali->flash_mem + 0x10);
  1035. }
  1036. return i*4; /* intent is to return the number of bytes read */
  1037. }
  1038. /* helper function that simply reads a buffer from the flash */
  1039. static int read_data_from_flash_mem(struct denali_nand_info *denali, uint8_t *buf,
  1040. int len)
  1041. {
  1042. uint32_t i = 0, *buf32;
  1043. /* we assume that len will be a multiple of 4, if not
  1044. * it would be nice to know about it ASAP rather than
  1045. * have random failures...
  1046. *
  1047. * This assumption is based on the fact that this
  1048. * function is designed to be used to read flash pages,
  1049. * which are typically multiples of 4...
  1050. */
  1051. BUG_ON((len % 4) != 0);
  1052. /* transfer the data from the flash */
  1053. buf32 = (uint32_t *)buf;
  1054. for (i = 0; i < len / 4; i++)
  1055. {
  1056. *buf32++ = ioread32(denali->flash_mem + 0x10);
  1057. }
  1058. return i*4; /* intent is to return the number of bytes read */
  1059. }
  1060. /* writes OOB data to the device */
  1061. static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
  1062. {
  1063. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1064. uint32_t irq_status = 0;
  1065. uint32_t irq_mask = INTR_STATUS0__PROGRAM_COMP |
  1066. INTR_STATUS0__PROGRAM_FAIL;
  1067. int status = 0;
  1068. denali->page = page;
  1069. if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
  1070. DENALI_WRITE) == PASS)
  1071. {
  1072. write_data_to_flash_mem(denali, buf, mtd->oobsize);
  1073. #if DEBUG_DENALI
  1074. spin_lock_irq(&denali->irq_lock);
  1075. denali->irq_debug_array[denali->idx++] = 0x80000000 | mtd->oobsize;
  1076. denali->idx %= 32;
  1077. spin_unlock_irq(&denali->irq_lock);
  1078. #endif
  1079. /* wait for operation to complete */
  1080. irq_status = wait_for_irq(denali, irq_mask);
  1081. if (irq_status == 0)
  1082. {
  1083. printk(KERN_ERR "OOB write failed\n");
  1084. status = -EIO;
  1085. }
  1086. }
  1087. else
  1088. {
  1089. printk(KERN_ERR "unable to send pipeline command\n");
  1090. status = -EIO;
  1091. }
  1092. return status;
  1093. }
  1094. /* reads OOB data from the device */
  1095. static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
  1096. {
  1097. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1098. uint32_t irq_mask = INTR_STATUS0__LOAD_COMP, irq_status = 0, addr = 0x0, cmd = 0x0;
  1099. denali->page = page;
  1100. #if DEBUG_DENALI
  1101. printk("read_oob %d\n", page);
  1102. #endif
  1103. if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
  1104. DENALI_READ) == PASS)
  1105. {
  1106. read_data_from_flash_mem(denali, buf, mtd->oobsize);
  1107. /* wait for command to be accepted
  1108. * can always use status0 bit as the mask is identical for each
  1109. * bank. */
  1110. irq_status = wait_for_irq(denali, irq_mask);
  1111. if (irq_status == 0)
  1112. {
  1113. printk(KERN_ERR "page on OOB timeout %d\n", denali->page);
  1114. }
  1115. /* We set the device back to MAIN_ACCESS here as I observed
  1116. * instability with the controller if you do a block erase
  1117. * and the last transaction was a SPARE_ACCESS. Block erase
  1118. * is reliable (according to the MTD test infrastructure)
  1119. * if you are in MAIN_ACCESS.
  1120. */
  1121. addr = BANK(denali->flash_bank) | denali->page;
  1122. cmd = MODE_10 | addr;
  1123. index_addr(denali, (uint32_t)cmd, MAIN_ACCESS);
  1124. #if DEBUG_DENALI
  1125. spin_lock_irq(&denali->irq_lock);
  1126. denali->irq_debug_array[denali->idx++] = 0x60000000 | mtd->oobsize;
  1127. denali->idx %= 32;
  1128. spin_unlock_irq(&denali->irq_lock);
  1129. #endif
  1130. }
  1131. }
  1132. /* this function examines buffers to see if they contain data that
  1133. * indicate that the buffer is part of an erased region of flash.
  1134. */
  1135. bool is_erased(uint8_t *buf, int len)
  1136. {
  1137. int i = 0;
  1138. for (i = 0; i < len; i++)
  1139. {
  1140. if (buf[i] != 0xFF)
  1141. {
  1142. return false;
  1143. }
  1144. }
  1145. return true;
  1146. }
  1147. #define ECC_SECTOR_SIZE 512
  1148. #define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12)
  1149. #define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET))
  1150. #define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK)
  1151. #define ECC_ERROR_CORRECTABLE(x) (!((x) & ERR_CORRECTION_INFO))
  1152. #define ECC_ERR_DEVICE(x) ((x) & ERR_CORRECTION_INFO__DEVICE_NR >> 8)
  1153. #define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
  1154. static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
  1155. uint8_t *oobbuf, uint32_t irq_status)
  1156. {
  1157. bool check_erased_page = false;
  1158. if (irq_status & INTR_STATUS0__ECC_ERR)
  1159. {
  1160. /* read the ECC errors. we'll ignore them for now */
  1161. uint32_t err_address = 0, err_correction_info = 0;
  1162. uint32_t err_byte = 0, err_sector = 0, err_device = 0;
  1163. uint32_t err_correction_value = 0;
  1164. do
  1165. {
  1166. err_address = ioread32(denali->flash_reg +
  1167. ECC_ERROR_ADDRESS);
  1168. err_sector = ECC_SECTOR(err_address);
  1169. err_byte = ECC_BYTE(err_address);
  1170. err_correction_info = ioread32(denali->flash_reg +
  1171. ERR_CORRECTION_INFO);
  1172. err_correction_value =
  1173. ECC_CORRECTION_VALUE(err_correction_info);
  1174. err_device = ECC_ERR_DEVICE(err_correction_info);
  1175. if (ECC_ERROR_CORRECTABLE(err_correction_info))
  1176. {
  1177. /* offset in our buffer is computed as:
  1178. sector number * sector size + offset in
  1179. sector
  1180. */
  1181. int offset = err_sector * ECC_SECTOR_SIZE +
  1182. err_byte;
  1183. if (offset < denali->mtd.writesize)
  1184. {
  1185. /* correct the ECC error */
  1186. buf[offset] ^= err_correction_value;
  1187. denali->mtd.ecc_stats.corrected++;
  1188. }
  1189. else
  1190. {
  1191. /* bummer, couldn't correct the error */
  1192. printk(KERN_ERR "ECC offset invalid\n");
  1193. denali->mtd.ecc_stats.failed++;
  1194. }
  1195. }
  1196. else
  1197. {
  1198. /* if the error is not correctable, need to
  1199. * look at the page to see if it is an erased page.
  1200. * if so, then it's not a real ECC error */
  1201. check_erased_page = true;
  1202. }
  1203. #if DEBUG_DENALI
  1204. printk("Detected ECC error in page %d: err_addr = 0x%08x,"
  1205. " info to fix is 0x%08x\n", denali->page, err_address,
  1206. err_correction_info);
  1207. #endif
  1208. } while (!ECC_LAST_ERR(err_correction_info));
  1209. }
  1210. return check_erased_page;
  1211. }
  1212. /* programs the controller to either enable/disable DMA transfers */
  1213. static void denali_enable_dma(struct denali_nand_info *denali, bool en)
  1214. {
  1215. uint32_t reg_val = 0x0;
  1216. if (en) reg_val = DMA_ENABLE__FLAG;
  1217. denali_write32(reg_val, denali->flash_reg + DMA_ENABLE);
  1218. ioread32(denali->flash_reg + DMA_ENABLE);
  1219. }
  1220. /* setups the HW to perform the data DMA */
  1221. static void denali_setup_dma(struct denali_nand_info *denali, int op)
  1222. {
  1223. uint32_t mode = 0x0;
  1224. const int page_count = 1;
  1225. dma_addr_t addr = denali->buf.dma_buf;
  1226. mode = MODE_10 | BANK(denali->flash_bank);
  1227. /* DMA is a four step process */
  1228. /* 1. setup transfer type and # of pages */
  1229. index_addr(denali, mode | denali->page, 0x2000 | op | page_count);
  1230. /* 2. set memory high address bits 23:8 */
  1231. index_addr(denali, mode | ((uint16_t)(addr >> 16) << 8), 0x2200);
  1232. /* 3. set memory low address bits 23:8 */
  1233. index_addr(denali, mode | ((uint16_t)addr << 8), 0x2300);
  1234. /* 4. interrupt when complete, burst len = 64 bytes*/
  1235. index_addr(denali, mode | 0x14000, 0x2400);
  1236. }
  1237. /* writes a page. user specifies type, and this function handles the
  1238. configuration details. */
  1239. static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
  1240. const uint8_t *buf, bool raw_xfer)
  1241. {
  1242. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1243. struct pci_dev *pci_dev = denali->dev;
  1244. dma_addr_t addr = denali->buf.dma_buf;
  1245. size_t size = denali->mtd.writesize + denali->mtd.oobsize;
  1246. uint32_t irq_status = 0;
  1247. uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP |
  1248. INTR_STATUS0__PROGRAM_FAIL;
  1249. /* if it is a raw xfer, we want to disable ecc, and send
  1250. * the spare area.
  1251. * !raw_xfer - enable ecc
  1252. * raw_xfer - transfer spare
  1253. */
  1254. setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
  1255. /* copy buffer into DMA buffer */
  1256. memcpy(denali->buf.buf, buf, mtd->writesize);
  1257. if (raw_xfer)
  1258. {
  1259. /* transfer the data to the spare area */
  1260. memcpy(denali->buf.buf + mtd->writesize,
  1261. chip->oob_poi,
  1262. mtd->oobsize);
  1263. }
  1264. pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_TODEVICE);
  1265. clear_interrupts(denali);
  1266. denali_enable_dma(denali, true);
  1267. denali_setup_dma(denali, DENALI_WRITE);
  1268. /* wait for operation to complete */
  1269. irq_status = wait_for_irq(denali, irq_mask);
  1270. if (irq_status == 0)
  1271. {
  1272. printk(KERN_ERR "timeout on write_page (type = %d)\n", raw_xfer);
  1273. denali->status =
  1274. (irq_status & INTR_STATUS0__PROGRAM_FAIL) ? NAND_STATUS_FAIL :
  1275. PASS;
  1276. }
  1277. denali_enable_dma(denali, false);
  1278. pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_TODEVICE);
  1279. }
  1280. /* NAND core entry points */
  1281. /* this is the callback that the NAND core calls to write a page. Since
  1282. writing a page with ECC or without is similar, all the work is done
  1283. by write_page above. */
  1284. static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
  1285. const uint8_t *buf)
  1286. {
  1287. /* for regular page writes, we let HW handle all the ECC
  1288. * data written to the device. */
  1289. write_page(mtd, chip, buf, false);
  1290. }
  1291. /* This is the callback that the NAND core calls to write a page without ECC.
  1292. raw access is similiar to ECC page writes, so all the work is done in the
  1293. write_page() function above.
  1294. */
  1295. static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  1296. const uint8_t *buf)
  1297. {
  1298. /* for raw page writes, we want to disable ECC and simply write
  1299. whatever data is in the buffer. */
  1300. write_page(mtd, chip, buf, true);
  1301. }
  1302. static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
  1303. int page)
  1304. {
  1305. return write_oob_data(mtd, chip->oob_poi, page);
  1306. }
  1307. static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
  1308. int page, int sndcmd)
  1309. {
  1310. read_oob_data(mtd, chip->oob_poi, page);
  1311. return 0; /* notify NAND core to send command to
  1312. * NAND device. */
  1313. }
  1314. static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
  1315. uint8_t *buf, int page)
  1316. {
  1317. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1318. struct pci_dev *pci_dev = denali->dev;
  1319. dma_addr_t addr = denali->buf.dma_buf;
  1320. size_t size = denali->mtd.writesize + denali->mtd.oobsize;
  1321. uint32_t irq_status = 0;
  1322. uint32_t irq_mask = INTR_STATUS0__ECC_TRANSACTION_DONE |
  1323. INTR_STATUS0__ECC_ERR;
  1324. bool check_erased_page = false;
  1325. setup_ecc_for_xfer(denali, true, false);
  1326. denali_enable_dma(denali, true);
  1327. pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
  1328. clear_interrupts(denali);
  1329. denali_setup_dma(denali, DENALI_READ);
  1330. /* wait for operation to complete */
  1331. irq_status = wait_for_irq(denali, irq_mask);
  1332. pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
  1333. memcpy(buf, denali->buf.buf, mtd->writesize);
  1334. check_erased_page = handle_ecc(denali, buf, chip->oob_poi, irq_status);
  1335. denali_enable_dma(denali, false);
  1336. if (check_erased_page)
  1337. {
  1338. read_oob_data(&denali->mtd, chip->oob_poi, denali->page);
  1339. /* check ECC failures that may have occurred on erased pages */
  1340. if (check_erased_page)
  1341. {
  1342. if (!is_erased(buf, denali->mtd.writesize))
  1343. {
  1344. denali->mtd.ecc_stats.failed++;
  1345. }
  1346. if (!is_erased(buf, denali->mtd.oobsize))
  1347. {
  1348. denali->mtd.ecc_stats.failed++;
  1349. }
  1350. }
  1351. }
  1352. return 0;
  1353. }
  1354. static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
  1355. uint8_t *buf, int page)
  1356. {
  1357. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1358. struct pci_dev *pci_dev = denali->dev;
  1359. dma_addr_t addr = denali->buf.dma_buf;
  1360. size_t size = denali->mtd.writesize + denali->mtd.oobsize;
  1361. uint32_t irq_status = 0;
  1362. uint32_t irq_mask = INTR_STATUS0__DMA_CMD_COMP;
  1363. setup_ecc_for_xfer(denali, false, true);
  1364. denali_enable_dma(denali, true);
  1365. pci_dma_sync_single_for_device(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
  1366. clear_interrupts(denali);
  1367. denali_setup_dma(denali, DENALI_READ);
  1368. /* wait for operation to complete */
  1369. irq_status = wait_for_irq(denali, irq_mask);
  1370. pci_dma_sync_single_for_cpu(pci_dev, addr, size, PCI_DMA_FROMDEVICE);
  1371. denali_enable_dma(denali, false);
  1372. memcpy(buf, denali->buf.buf, mtd->writesize);
  1373. memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);
  1374. return 0;
  1375. }
  1376. static uint8_t denali_read_byte(struct mtd_info *mtd)
  1377. {
  1378. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1379. uint8_t result = 0xff;
  1380. if (denali->buf.head < denali->buf.tail)
  1381. {
  1382. result = denali->buf.buf[denali->buf.head++];
  1383. }
  1384. #if DEBUG_DENALI
  1385. printk("read byte -> 0x%02x\n", result);
  1386. #endif
  1387. return result;
  1388. }
  1389. static void denali_select_chip(struct mtd_info *mtd, int chip)
  1390. {
  1391. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1392. #if DEBUG_DENALI
  1393. printk("denali select chip %d\n", chip);
  1394. #endif
  1395. spin_lock_irq(&denali->irq_lock);
  1396. denali->flash_bank = chip;
  1397. spin_unlock_irq(&denali->irq_lock);
  1398. }
  1399. static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip)
  1400. {
  1401. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1402. int status = denali->status;
  1403. denali->status = 0;
  1404. #if DEBUG_DENALI
  1405. printk("waitfunc %d\n", status);
  1406. #endif
  1407. return status;
  1408. }
  1409. static void denali_erase(struct mtd_info *mtd, int page)
  1410. {
  1411. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1412. uint32_t cmd = 0x0, irq_status = 0;
  1413. #if DEBUG_DENALI
  1414. printk("erase page: %d\n", page);
  1415. #endif
  1416. /* clear interrupts */
  1417. clear_interrupts(denali);
  1418. /* setup page read request for access type */
  1419. cmd = MODE_10 | BANK(denali->flash_bank) | page;
  1420. index_addr(denali, (uint32_t)cmd, 0x1);
  1421. /* wait for erase to complete or failure to occur */
  1422. irq_status = wait_for_irq(denali, INTR_STATUS0__ERASE_COMP |
  1423. INTR_STATUS0__ERASE_FAIL);
  1424. denali->status = (irq_status & INTR_STATUS0__ERASE_FAIL) ? NAND_STATUS_FAIL :
  1425. PASS;
  1426. }
  1427. static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
  1428. int page)
  1429. {
  1430. struct denali_nand_info *denali = mtd_to_denali(mtd);
  1431. #if DEBUG_DENALI
  1432. printk("cmdfunc: 0x%x %d %d\n", cmd, col, page);
  1433. #endif
  1434. switch (cmd)
  1435. {
  1436. case NAND_CMD_PAGEPROG:
  1437. break;
  1438. case NAND_CMD_STATUS:
  1439. read_status(denali);
  1440. break;
  1441. case NAND_CMD_READID:
  1442. reset_buf(denali);
  1443. if (denali->flash_bank < denali->total_used_banks)
  1444. {
  1445. /* write manufacturer information into nand
  1446. buffer for NAND subsystem to fetch.
  1447. */
  1448. write_byte_to_buf(denali, denali->dev_info.wDeviceMaker);
  1449. write_byte_to_buf(denali, denali->dev_info.wDeviceID);
  1450. write_byte_to_buf(denali, denali->dev_info.bDeviceParam0);
  1451. write_byte_to_buf(denali, denali->dev_info.bDeviceParam1);
  1452. write_byte_to_buf(denali, denali->dev_info.bDeviceParam2);
  1453. }
  1454. else
  1455. {
  1456. int i;
  1457. for (i = 0; i < 5; i++)
  1458. write_byte_to_buf(denali, 0xff);
  1459. }
  1460. break;
  1461. case NAND_CMD_READ0:
  1462. case NAND_CMD_SEQIN:
  1463. denali->page = page;
  1464. break;
  1465. case NAND_CMD_RESET:
  1466. reset_bank(denali);
  1467. break;
  1468. case NAND_CMD_READOOB:
  1469. /* TODO: Read OOB data */
  1470. break;
  1471. default:
  1472. printk(KERN_ERR ": unsupported command received 0x%x\n", cmd);
  1473. break;
  1474. }
  1475. }
  1476. /* stubs for ECC functions not used by the NAND core */
  1477. static int denali_ecc_calculate(struct mtd_info *mtd, const uint8_t *data,
  1478. uint8_t *ecc_code)
  1479. {
  1480. printk(KERN_ERR "denali_ecc_calculate called unexpectedly\n");
  1481. BUG();
  1482. return -EIO;
  1483. }
  1484. static int denali_ecc_correct(struct mtd_info *mtd, uint8_t *data,
  1485. uint8_t *read_ecc, uint8_t *calc_ecc)
  1486. {
  1487. printk(KERN_ERR "denali_ecc_correct called unexpectedly\n");
  1488. BUG();
  1489. return -EIO;
  1490. }
  1491. static void denali_ecc_hwctl(struct mtd_info *mtd, int mode)
  1492. {
  1493. printk(KERN_ERR "denali_ecc_hwctl called unexpectedly\n");
  1494. BUG();
  1495. }
  1496. /* end NAND core entry points */
  1497. /* Initialization code to bring the device up to a known good state */
  1498. static void denali_hw_init(struct denali_nand_info *denali)
  1499. {
  1500. denali_irq_init(denali);
  1501. NAND_Flash_Reset(denali);
  1502. denali_write32(0x0F, denali->flash_reg + RB_PIN_ENABLED);
  1503. denali_write32(CHIP_EN_DONT_CARE__FLAG, denali->flash_reg + CHIP_ENABLE_DONT_CARE);
  1504. denali_write32(0x0, denali->flash_reg + SPARE_AREA_SKIP_BYTES);
  1505. denali_write32(0xffff, denali->flash_reg + SPARE_AREA_MARKER);
  1506. /* Should set value for these registers when init */
  1507. denali_write32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES);
  1508. denali_write32(1, denali->flash_reg + ECC_ENABLE);
  1509. }
  1510. /* ECC layout for SLC devices. Denali spec indicates SLC fixed at 4 bytes */
  1511. #define ECC_BYTES_SLC 4 * (2048 / ECC_SECTOR_SIZE)
  1512. static struct nand_ecclayout nand_oob_slc = {
  1513. .eccbytes = 4,
  1514. .eccpos = { 0, 1, 2, 3 }, /* not used */
  1515. .oobfree = {{
  1516. .offset = ECC_BYTES_SLC,
  1517. .length = 64 - ECC_BYTES_SLC
  1518. }}
  1519. };
  1520. #define ECC_BYTES_MLC 14 * (2048 / ECC_SECTOR_SIZE)
  1521. static struct nand_ecclayout nand_oob_mlc_14bit = {
  1522. .eccbytes = 14,
  1523. .eccpos = { 0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13 }, /* not used */
  1524. .oobfree = {{
  1525. .offset = ECC_BYTES_MLC,
  1526. .length = 64 - ECC_BYTES_MLC
  1527. }}
  1528. };
  1529. static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' };
  1530. static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' };
  1531. static struct nand_bbt_descr bbt_main_descr = {
  1532. .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
  1533. | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
  1534. .offs = 8,
  1535. .len = 4,
  1536. .veroffs = 12,
  1537. .maxblocks = 4,
  1538. .pattern = bbt_pattern,
  1539. };
  1540. static struct nand_bbt_descr bbt_mirror_descr = {
  1541. .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE
  1542. | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP,
  1543. .offs = 8,
  1544. .len = 4,
  1545. .veroffs = 12,
  1546. .maxblocks = 4,
  1547. .pattern = mirror_pattern,
  1548. };
  1549. /* initialize driver data structures */
  1550. void denali_drv_init(struct denali_nand_info *denali)
  1551. {
  1552. denali->idx = 0;
  1553. /* setup interrupt handler */
  1554. /* the completion object will be used to notify
  1555. * the callee that the interrupt is done */
  1556. init_completion(&denali->complete);
  1557. /* the spinlock will be used to synchronize the ISR
  1558. * with any element that might be access shared
  1559. * data (interrupt status) */
  1560. spin_lock_init(&denali->irq_lock);
  1561. /* indicate that MTD has not selected a valid bank yet */
  1562. denali->flash_bank = CHIP_SELECT_INVALID;
  1563. /* initialize our irq_status variable to indicate no interrupts */
  1564. denali->irq_status = 0;
  1565. }
  1566. /* driver entry point */
  1567. static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
  1568. {
  1569. int ret = -ENODEV;
  1570. resource_size_t csr_base, mem_base;
  1571. unsigned long csr_len, mem_len;
  1572. struct denali_nand_info *denali;
  1573. nand_dbg_print(NAND_DBG_TRACE, "%s, Line %d, Function: %s\n",
  1574. __FILE__, __LINE__, __func__);
  1575. denali = kzalloc(sizeof(*denali), GFP_KERNEL);
  1576. if (!denali)
  1577. return -ENOMEM;
  1578. ret = pci_enable_device(dev);
  1579. if (ret) {
  1580. printk(KERN_ERR "Spectra: pci_enable_device failed.\n");
  1581. goto failed_enable;
  1582. }
  1583. if (id->driver_data == INTEL_CE4100) {
  1584. /* Due to a silicon limitation, we can only support
  1585. * ONFI timing mode 1 and below.
  1586. */
  1587. if (onfi_timing_mode < -1 || onfi_timing_mode > 1)
  1588. {
  1589. printk("Intel CE4100 only supports ONFI timing mode 1 "
  1590. "or below\n");
  1591. ret = -EINVAL;
  1592. goto failed_enable;
  1593. }
  1594. denali->platform = INTEL_CE4100;
  1595. mem_base = pci_resource_start(dev, 0);
  1596. mem_len = pci_resource_len(dev, 1);
  1597. csr_base = pci_resource_start(dev, 1);
  1598. csr_len = pci_resource_len(dev, 1);
  1599. } else {
  1600. denali->platform = INTEL_MRST;
  1601. csr_base = pci_resource_start(dev, 0);
  1602. csr_len = pci_resource_start(dev, 0);
  1603. mem_base = pci_resource_start(dev, 1);
  1604. mem_len = pci_resource_len(dev, 1);
  1605. if (!mem_len) {
  1606. mem_base = csr_base + csr_len;
  1607. mem_len = csr_len;
  1608. nand_dbg_print(NAND_DBG_WARN,
  1609. "Spectra: No second BAR for PCI device; assuming %08Lx\n",
  1610. (uint64_t)csr_base);
  1611. }
  1612. }
  1613. /* Is 32-bit DMA supported? */
  1614. ret = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
  1615. if (ret)
  1616. {
  1617. printk(KERN_ERR "Spectra: no usable DMA configuration\n");
  1618. goto failed_enable;
  1619. }
  1620. denali->buf.dma_buf = pci_map_single(dev, denali->buf.buf, DENALI_BUF_SIZE,
  1621. PCI_DMA_BIDIRECTIONAL);
  1622. if (pci_dma_mapping_error(dev, denali->buf.dma_buf))
  1623. {
  1624. printk(KERN_ERR "Spectra: failed to map DMA buffer\n");
  1625. goto failed_enable;
  1626. }
  1627. pci_set_master(dev);
  1628. denali->dev = dev;
  1629. ret = pci_request_regions(dev, DENALI_NAND_NAME);
  1630. if (ret) {
  1631. printk(KERN_ERR "Spectra: Unable to request memory regions\n");
  1632. goto failed_req_csr;
  1633. }
  1634. denali->flash_reg = ioremap_nocache(csr_base, csr_len);
  1635. if (!denali->flash_reg) {
  1636. printk(KERN_ERR "Spectra: Unable to remap memory region\n");
  1637. ret = -ENOMEM;
  1638. goto failed_remap_csr;
  1639. }
  1640. nand_dbg_print(NAND_DBG_DEBUG, "Spectra: CSR 0x%08Lx -> 0x%p (0x%lx)\n",
  1641. (uint64_t)csr_base, denali->flash_reg, csr_len);
  1642. denali->flash_mem = ioremap_nocache(mem_base, mem_len);
  1643. if (!denali->flash_mem) {
  1644. printk(KERN_ERR "Spectra: ioremap_nocache failed!");
  1645. iounmap(denali->flash_reg);
  1646. ret = -ENOMEM;
  1647. goto failed_remap_csr;
  1648. }
  1649. nand_dbg_print(NAND_DBG_WARN,
  1650. "Spectra: Remapped flash base address: "
  1651. "0x%p, len: %ld\n",
  1652. denali->flash_mem, csr_len);
  1653. denali_hw_init(denali);
  1654. denali_drv_init(denali);
  1655. nand_dbg_print(NAND_DBG_DEBUG, "Spectra: IRQ %d\n", dev->irq);
  1656. if (request_irq(dev->irq, denali_isr, IRQF_SHARED,
  1657. DENALI_NAND_NAME, denali)) {
  1658. printk(KERN_ERR "Spectra: Unable to allocate IRQ\n");
  1659. ret = -ENODEV;
  1660. goto failed_request_irq;
  1661. }
  1662. /* now that our ISR is registered, we can enable interrupts */
  1663. NAND_LLD_Enable_Disable_Interrupts(denali, true);
  1664. pci_set_drvdata(dev, denali);
  1665. NAND_Read_Device_ID(denali);
  1666. /* MTD supported page sizes vary by kernel. We validate our
  1667. kernel supports the device here.
  1668. */
  1669. if (denali->dev_info.wPageSize > NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE)
  1670. {
  1671. ret = -ENODEV;
  1672. printk(KERN_ERR "Spectra: device size not supported by this "
  1673. "version of MTD.");
  1674. goto failed_nand;
  1675. }
  1676. nand_dbg_print(NAND_DBG_DEBUG, "Dump timing register values:"
  1677. "acc_clks: %d, re_2_we: %d, we_2_re: %d,"
  1678. "addr_2_data: %d, rdwr_en_lo_cnt: %d, "
  1679. "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n",
  1680. ioread32(denali->flash_reg + ACC_CLKS),
  1681. ioread32(denali->flash_reg + RE_2_WE),
  1682. ioread32(denali->flash_reg + WE_2_RE),
  1683. ioread32(denali->flash_reg + ADDR_2_DATA),
  1684. ioread32(denali->flash_reg + RDWR_EN_LO_CNT),
  1685. ioread32(denali->flash_reg + RDWR_EN_HI_CNT),
  1686. ioread32(denali->flash_reg + CS_SETUP_CNT));
  1687. denali->mtd.name = "Denali NAND";
  1688. denali->mtd.owner = THIS_MODULE;
  1689. denali->mtd.priv = &denali->nand;
  1690. /* register the driver with the NAND core subsystem */
  1691. denali->nand.select_chip = denali_select_chip;
  1692. denali->nand.cmdfunc = denali_cmdfunc;
  1693. denali->nand.read_byte = denali_read_byte;
  1694. denali->nand.waitfunc = denali_waitfunc;
  1695. /* scan for NAND devices attached to the controller
  1696. * this is the first stage in a two step process to register
  1697. * with the nand subsystem */
  1698. if (nand_scan_ident(&denali->mtd, LLD_MAX_FLASH_BANKS, NULL))
  1699. {
  1700. ret = -ENXIO;
  1701. goto failed_nand;
  1702. }
  1703. /* second stage of the NAND scan
  1704. * this stage requires information regarding ECC and
  1705. * bad block management. */
  1706. /* Bad block management */
  1707. denali->nand.bbt_td = &bbt_main_descr;
  1708. denali->nand.bbt_md = &bbt_mirror_descr;
  1709. /* skip the scan for now until we have OOB read and write support */
  1710. denali->nand.options |= NAND_USE_FLASH_BBT | NAND_SKIP_BBTSCAN;
  1711. denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME;
  1712. if (denali->dev_info.MLCDevice)
  1713. {
  1714. denali->nand.ecc.layout = &nand_oob_mlc_14bit;
  1715. denali->nand.ecc.bytes = ECC_BYTES_MLC;
  1716. }
  1717. else /* SLC */
  1718. {
  1719. denali->nand.ecc.layout = &nand_oob_slc;
  1720. denali->nand.ecc.bytes = ECC_BYTES_SLC;
  1721. }
  1722. /* These functions are required by the NAND core framework, otherwise,
  1723. the NAND core will assert. However, we don't need them, so we'll stub
  1724. them out. */
  1725. denali->nand.ecc.calculate = denali_ecc_calculate;
  1726. denali->nand.ecc.correct = denali_ecc_correct;
  1727. denali->nand.ecc.hwctl = denali_ecc_hwctl;
  1728. /* override the default read operations */
  1729. denali->nand.ecc.size = denali->mtd.writesize;
  1730. denali->nand.ecc.read_page = denali_read_page;
  1731. denali->nand.ecc.read_page_raw = denali_read_page_raw;
  1732. denali->nand.ecc.write_page = denali_write_page;
  1733. denali->nand.ecc.write_page_raw = denali_write_page_raw;
  1734. denali->nand.ecc.read_oob = denali_read_oob;
  1735. denali->nand.ecc.write_oob = denali_write_oob;
  1736. denali->nand.erase_cmd = denali_erase;
  1737. if (nand_scan_tail(&denali->mtd))
  1738. {
  1739. ret = -ENXIO;
  1740. goto failed_nand;
  1741. }
  1742. ret = add_mtd_device(&denali->mtd);
  1743. if (ret) {
  1744. printk(KERN_ERR "Spectra: Failed to register MTD device: %d\n", ret);
  1745. goto failed_nand;
  1746. }
  1747. return 0;
  1748. failed_nand:
  1749. denali_irq_cleanup(dev->irq, denali);
  1750. failed_request_irq:
  1751. iounmap(denali->flash_reg);
  1752. iounmap(denali->flash_mem);
  1753. failed_remap_csr:
  1754. pci_release_regions(dev);
  1755. failed_req_csr:
  1756. pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
  1757. PCI_DMA_BIDIRECTIONAL);
  1758. failed_enable:
  1759. kfree(denali);
  1760. return ret;
  1761. }
  1762. /* driver exit point */
  1763. static void denali_pci_remove(struct pci_dev *dev)
  1764. {
  1765. struct denali_nand_info *denali = pci_get_drvdata(dev);
  1766. nand_dbg_print(NAND_DBG_WARN, "%s, Line %d, Function: %s\n",
  1767. __FILE__, __LINE__, __func__);
  1768. nand_release(&denali->mtd);
  1769. del_mtd_device(&denali->mtd);
  1770. denali_irq_cleanup(dev->irq, denali);
  1771. iounmap(denali->flash_reg);
  1772. iounmap(denali->flash_mem);
  1773. pci_release_regions(dev);
  1774. pci_disable_device(dev);
  1775. pci_unmap_single(dev, denali->buf.dma_buf, DENALI_BUF_SIZE,
  1776. PCI_DMA_BIDIRECTIONAL);
  1777. pci_set_drvdata(dev, NULL);
  1778. kfree(denali);
  1779. }
  1780. MODULE_DEVICE_TABLE(pci, denali_pci_ids);
  1781. static struct pci_driver denali_pci_driver = {
  1782. .name = DENALI_NAND_NAME,
  1783. .id_table = denali_pci_ids,
  1784. .probe = denali_pci_probe,
  1785. .remove = denali_pci_remove,
  1786. };
  1787. static int __devinit denali_init(void)
  1788. {
  1789. printk(KERN_INFO "Spectra MTD driver built on %s @ %s\n", __DATE__, __TIME__);
  1790. return pci_register_driver(&denali_pci_driver);
  1791. }
  1792. /* Free memory */
  1793. static void __devexit denali_exit(void)
  1794. {
  1795. pci_unregister_driver(&denali_pci_driver);
  1796. }
  1797. module_init(denali_init);
  1798. module_exit(denali_exit);