gpmc.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528
  1. /*
  2. * GPMC support functions
  3. *
  4. * Copyright (C) 2005-2006 Nokia Corporation
  5. *
  6. * Author: Juha Yrjola
  7. *
  8. * Copyright (C) 2009 Texas Instruments
  9. * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. */
  15. #undef DEBUG
  16. #include <linux/irq.h>
  17. #include <linux/kernel.h>
  18. #include <linux/init.h>
  19. #include <linux/err.h>
  20. #include <linux/clk.h>
  21. #include <linux/ioport.h>
  22. #include <linux/spinlock.h>
  23. #include <linux/io.h>
  24. #include <linux/module.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/platform_device.h>
  27. #include <linux/of.h>
  28. #include <linux/of_mtd.h>
  29. #include <linux/of_device.h>
  30. #include <linux/mtd/nand.h>
  31. #include <linux/platform_data/mtd-nand-omap2.h>
  32. #include <asm/mach-types.h>
  33. #include "soc.h"
  34. #include "common.h"
  35. #include "omap_device.h"
  36. #include "gpmc.h"
  37. #include "gpmc-nand.h"
  38. #include "gpmc-onenand.h"
  39. #define DEVICE_NAME "omap-gpmc"
  40. /* GPMC register offsets */
  41. #define GPMC_REVISION 0x00
  42. #define GPMC_SYSCONFIG 0x10
  43. #define GPMC_SYSSTATUS 0x14
  44. #define GPMC_IRQSTATUS 0x18
  45. #define GPMC_IRQENABLE 0x1c
  46. #define GPMC_TIMEOUT_CONTROL 0x40
  47. #define GPMC_ERR_ADDRESS 0x44
  48. #define GPMC_ERR_TYPE 0x48
  49. #define GPMC_CONFIG 0x50
  50. #define GPMC_STATUS 0x54
  51. #define GPMC_PREFETCH_CONFIG1 0x1e0
  52. #define GPMC_PREFETCH_CONFIG2 0x1e4
  53. #define GPMC_PREFETCH_CONTROL 0x1ec
  54. #define GPMC_PREFETCH_STATUS 0x1f0
  55. #define GPMC_ECC_CONFIG 0x1f4
  56. #define GPMC_ECC_CONTROL 0x1f8
  57. #define GPMC_ECC_SIZE_CONFIG 0x1fc
  58. #define GPMC_ECC1_RESULT 0x200
  59. #define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */
  60. #define GPMC_ECC_BCH_RESULT_1 0x244 /* not available on OMAP2 */
  61. #define GPMC_ECC_BCH_RESULT_2 0x248 /* not available on OMAP2 */
  62. #define GPMC_ECC_BCH_RESULT_3 0x24c /* not available on OMAP2 */
  63. /* GPMC ECC control settings */
  64. #define GPMC_ECC_CTRL_ECCCLEAR 0x100
  65. #define GPMC_ECC_CTRL_ECCDISABLE 0x000
  66. #define GPMC_ECC_CTRL_ECCREG1 0x001
  67. #define GPMC_ECC_CTRL_ECCREG2 0x002
  68. #define GPMC_ECC_CTRL_ECCREG3 0x003
  69. #define GPMC_ECC_CTRL_ECCREG4 0x004
  70. #define GPMC_ECC_CTRL_ECCREG5 0x005
  71. #define GPMC_ECC_CTRL_ECCREG6 0x006
  72. #define GPMC_ECC_CTRL_ECCREG7 0x007
  73. #define GPMC_ECC_CTRL_ECCREG8 0x008
  74. #define GPMC_ECC_CTRL_ECCREG9 0x009
  75. #define GPMC_CONFIG2_CSEXTRADELAY BIT(7)
  76. #define GPMC_CONFIG3_ADVEXTRADELAY BIT(7)
  77. #define GPMC_CONFIG4_OEEXTRADELAY BIT(7)
  78. #define GPMC_CONFIG4_WEEXTRADELAY BIT(23)
  79. #define GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN BIT(6)
  80. #define GPMC_CONFIG6_CYCLE2CYCLESAMECSEN BIT(7)
  81. #define GPMC_CS0_OFFSET 0x60
  82. #define GPMC_CS_SIZE 0x30
  83. #define GPMC_BCH_SIZE 0x10
  84. #define GPMC_MEM_START 0x00000000
  85. #define GPMC_MEM_END 0x3FFFFFFF
  86. #define BOOT_ROM_SPACE 0x100000 /* 1MB */
  87. #define GPMC_CHUNK_SHIFT 24 /* 16 MB */
  88. #define GPMC_SECTION_SHIFT 28 /* 128 MB */
  89. #define CS_NUM_SHIFT 24
  90. #define ENABLE_PREFETCH (0x1 << 7)
  91. #define DMA_MPU_MODE 2
  92. #define GPMC_REVISION_MAJOR(l) ((l >> 4) & 0xf)
  93. #define GPMC_REVISION_MINOR(l) (l & 0xf)
  94. #define GPMC_HAS_WR_ACCESS 0x1
  95. #define GPMC_HAS_WR_DATA_MUX_BUS 0x2
  96. /* XXX: Only NAND irq has been considered,currently these are the only ones used
  97. */
  98. #define GPMC_NR_IRQ 2
  99. struct gpmc_client_irq {
  100. unsigned irq;
  101. u32 bitmask;
  102. };
  103. /* Structure to save gpmc cs context */
  104. struct gpmc_cs_config {
  105. u32 config1;
  106. u32 config2;
  107. u32 config3;
  108. u32 config4;
  109. u32 config5;
  110. u32 config6;
  111. u32 config7;
  112. int is_valid;
  113. };
  114. /*
  115. * Structure to save/restore gpmc context
  116. * to support core off on OMAP3
  117. */
  118. struct omap3_gpmc_regs {
  119. u32 sysconfig;
  120. u32 irqenable;
  121. u32 timeout_ctrl;
  122. u32 config;
  123. u32 prefetch_config1;
  124. u32 prefetch_config2;
  125. u32 prefetch_control;
  126. struct gpmc_cs_config cs_context[GPMC_CS_NUM];
  127. };
  128. static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
  129. static struct irq_chip gpmc_irq_chip;
  130. static unsigned gpmc_irq_start;
  131. static struct resource gpmc_mem_root;
  132. static struct resource gpmc_cs_mem[GPMC_CS_NUM];
  133. static DEFINE_SPINLOCK(gpmc_mem_lock);
  134. /* Define chip-selects as reserved by default until probe completes */
  135. static unsigned int gpmc_cs_map = ((1 << GPMC_CS_NUM) - 1);
  136. static struct device *gpmc_dev;
  137. static int gpmc_irq;
  138. static resource_size_t phys_base, mem_size;
  139. static unsigned gpmc_capability;
  140. static void __iomem *gpmc_base;
  141. static struct clk *gpmc_l3_clk;
  142. static irqreturn_t gpmc_handle_irq(int irq, void *dev);
  143. static void gpmc_write_reg(int idx, u32 val)
  144. {
  145. __raw_writel(val, gpmc_base + idx);
  146. }
  147. static u32 gpmc_read_reg(int idx)
  148. {
  149. return __raw_readl(gpmc_base + idx);
  150. }
  151. void gpmc_cs_write_reg(int cs, int idx, u32 val)
  152. {
  153. void __iomem *reg_addr;
  154. reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
  155. __raw_writel(val, reg_addr);
  156. }
  157. static u32 gpmc_cs_read_reg(int cs, int idx)
  158. {
  159. void __iomem *reg_addr;
  160. reg_addr = gpmc_base + GPMC_CS0_OFFSET + (cs * GPMC_CS_SIZE) + idx;
  161. return __raw_readl(reg_addr);
  162. }
  163. /* TODO: Add support for gpmc_fck to clock framework and use it */
  164. static unsigned long gpmc_get_fclk_period(void)
  165. {
  166. unsigned long rate = clk_get_rate(gpmc_l3_clk);
  167. if (rate == 0) {
  168. printk(KERN_WARNING "gpmc_l3_clk not enabled\n");
  169. return 0;
  170. }
  171. rate /= 1000;
  172. rate = 1000000000 / rate; /* In picoseconds */
  173. return rate;
  174. }
  175. static unsigned int gpmc_ns_to_ticks(unsigned int time_ns)
  176. {
  177. unsigned long tick_ps;
  178. /* Calculate in picosecs to yield more exact results */
  179. tick_ps = gpmc_get_fclk_period();
  180. return (time_ns * 1000 + tick_ps - 1) / tick_ps;
  181. }
  182. static unsigned int gpmc_ps_to_ticks(unsigned int time_ps)
  183. {
  184. unsigned long tick_ps;
  185. /* Calculate in picosecs to yield more exact results */
  186. tick_ps = gpmc_get_fclk_period();
  187. return (time_ps + tick_ps - 1) / tick_ps;
  188. }
  189. unsigned int gpmc_ticks_to_ns(unsigned int ticks)
  190. {
  191. return ticks * gpmc_get_fclk_period() / 1000;
  192. }
  193. static unsigned int gpmc_ticks_to_ps(unsigned int ticks)
  194. {
  195. return ticks * gpmc_get_fclk_period();
  196. }
  197. static unsigned int gpmc_round_ps_to_ticks(unsigned int time_ps)
  198. {
  199. unsigned long ticks = gpmc_ps_to_ticks(time_ps);
  200. return ticks * gpmc_get_fclk_period();
  201. }
  202. static inline void gpmc_cs_modify_reg(int cs, int reg, u32 mask, bool value)
  203. {
  204. u32 l;
  205. l = gpmc_cs_read_reg(cs, reg);
  206. if (value)
  207. l |= mask;
  208. else
  209. l &= ~mask;
  210. gpmc_cs_write_reg(cs, reg, l);
  211. }
  212. static void gpmc_cs_bool_timings(int cs, const struct gpmc_bool_timings *p)
  213. {
  214. gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG1,
  215. GPMC_CONFIG1_TIME_PARA_GRAN,
  216. p->time_para_granularity);
  217. gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG2,
  218. GPMC_CONFIG2_CSEXTRADELAY, p->cs_extra_delay);
  219. gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG3,
  220. GPMC_CONFIG3_ADVEXTRADELAY, p->adv_extra_delay);
  221. gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
  222. GPMC_CONFIG4_OEEXTRADELAY, p->oe_extra_delay);
  223. gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG4,
  224. GPMC_CONFIG4_OEEXTRADELAY, p->we_extra_delay);
  225. gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
  226. GPMC_CONFIG6_CYCLE2CYCLESAMECSEN,
  227. p->cycle2cyclesamecsen);
  228. gpmc_cs_modify_reg(cs, GPMC_CS_CONFIG6,
  229. GPMC_CONFIG6_CYCLE2CYCLEDIFFCSEN,
  230. p->cycle2cyclediffcsen);
  231. }
  232. #ifdef DEBUG
  233. static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
  234. int time, const char *name)
  235. #else
  236. static int set_gpmc_timing_reg(int cs, int reg, int st_bit, int end_bit,
  237. int time)
  238. #endif
  239. {
  240. u32 l;
  241. int ticks, mask, nr_bits;
  242. if (time == 0)
  243. ticks = 0;
  244. else
  245. ticks = gpmc_ns_to_ticks(time);
  246. nr_bits = end_bit - st_bit + 1;
  247. if (ticks >= 1 << nr_bits) {
  248. #ifdef DEBUG
  249. printk(KERN_INFO "GPMC CS%d: %-10s* %3d ns, %3d ticks >= %d\n",
  250. cs, name, time, ticks, 1 << nr_bits);
  251. #endif
  252. return -1;
  253. }
  254. mask = (1 << nr_bits) - 1;
  255. l = gpmc_cs_read_reg(cs, reg);
  256. #ifdef DEBUG
  257. printk(KERN_INFO
  258. "GPMC CS%d: %-10s: %3d ticks, %3lu ns (was %3i ticks) %3d ns\n",
  259. cs, name, ticks, gpmc_get_fclk_period() * ticks / 1000,
  260. (l >> st_bit) & mask, time);
  261. #endif
  262. l &= ~(mask << st_bit);
  263. l |= ticks << st_bit;
  264. gpmc_cs_write_reg(cs, reg, l);
  265. return 0;
  266. }
  267. #ifdef DEBUG
  268. #define GPMC_SET_ONE(reg, st, end, field) \
  269. if (set_gpmc_timing_reg(cs, (reg), (st), (end), \
  270. t->field, #field) < 0) \
  271. return -1
  272. #else
  273. #define GPMC_SET_ONE(reg, st, end, field) \
  274. if (set_gpmc_timing_reg(cs, (reg), (st), (end), t->field) < 0) \
  275. return -1
  276. #endif
  277. int gpmc_calc_divider(unsigned int sync_clk)
  278. {
  279. int div;
  280. u32 l;
  281. l = sync_clk + (gpmc_get_fclk_period() - 1);
  282. div = l / gpmc_get_fclk_period();
  283. if (div > 4)
  284. return -1;
  285. if (div <= 0)
  286. div = 1;
  287. return div;
  288. }
  289. int gpmc_cs_set_timings(int cs, const struct gpmc_timings *t)
  290. {
  291. int div;
  292. u32 l;
  293. div = gpmc_calc_divider(t->sync_clk);
  294. if (div < 0)
  295. return div;
  296. GPMC_SET_ONE(GPMC_CS_CONFIG2, 0, 3, cs_on);
  297. GPMC_SET_ONE(GPMC_CS_CONFIG2, 8, 12, cs_rd_off);
  298. GPMC_SET_ONE(GPMC_CS_CONFIG2, 16, 20, cs_wr_off);
  299. GPMC_SET_ONE(GPMC_CS_CONFIG3, 0, 3, adv_on);
  300. GPMC_SET_ONE(GPMC_CS_CONFIG3, 8, 12, adv_rd_off);
  301. GPMC_SET_ONE(GPMC_CS_CONFIG3, 16, 20, adv_wr_off);
  302. GPMC_SET_ONE(GPMC_CS_CONFIG4, 0, 3, oe_on);
  303. GPMC_SET_ONE(GPMC_CS_CONFIG4, 8, 12, oe_off);
  304. GPMC_SET_ONE(GPMC_CS_CONFIG4, 16, 19, we_on);
  305. GPMC_SET_ONE(GPMC_CS_CONFIG4, 24, 28, we_off);
  306. GPMC_SET_ONE(GPMC_CS_CONFIG5, 0, 4, rd_cycle);
  307. GPMC_SET_ONE(GPMC_CS_CONFIG5, 8, 12, wr_cycle);
  308. GPMC_SET_ONE(GPMC_CS_CONFIG5, 16, 20, access);
  309. GPMC_SET_ONE(GPMC_CS_CONFIG5, 24, 27, page_burst_access);
  310. GPMC_SET_ONE(GPMC_CS_CONFIG6, 0, 3, bus_turnaround);
  311. GPMC_SET_ONE(GPMC_CS_CONFIG6, 8, 11, cycle2cycle_delay);
  312. GPMC_SET_ONE(GPMC_CS_CONFIG1, 18, 19, wait_monitoring);
  313. GPMC_SET_ONE(GPMC_CS_CONFIG1, 25, 26, clk_activation);
  314. if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
  315. GPMC_SET_ONE(GPMC_CS_CONFIG6, 16, 19, wr_data_mux_bus);
  316. if (gpmc_capability & GPMC_HAS_WR_ACCESS)
  317. GPMC_SET_ONE(GPMC_CS_CONFIG6, 24, 28, wr_access);
  318. /* caller is expected to have initialized CONFIG1 to cover
  319. * at least sync vs async
  320. */
  321. l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
  322. if (l & (GPMC_CONFIG1_READTYPE_SYNC | GPMC_CONFIG1_WRITETYPE_SYNC)) {
  323. #ifdef DEBUG
  324. printk(KERN_INFO "GPMC CS%d CLK period is %lu ns (div %d)\n",
  325. cs, (div * gpmc_get_fclk_period()) / 1000, div);
  326. #endif
  327. l &= ~0x03;
  328. l |= (div - 1);
  329. gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, l);
  330. }
  331. gpmc_cs_bool_timings(cs, &t->bool_timings);
  332. return 0;
  333. }
  334. static void gpmc_cs_enable_mem(int cs, u32 base, u32 size)
  335. {
  336. u32 l;
  337. u32 mask;
  338. mask = (1 << GPMC_SECTION_SHIFT) - size;
  339. l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
  340. l &= ~0x3f;
  341. l = (base >> GPMC_CHUNK_SHIFT) & 0x3f;
  342. l &= ~(0x0f << 8);
  343. l |= ((mask >> GPMC_CHUNK_SHIFT) & 0x0f) << 8;
  344. l |= GPMC_CONFIG7_CSVALID;
  345. gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
  346. }
  347. static void gpmc_cs_disable_mem(int cs)
  348. {
  349. u32 l;
  350. l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
  351. l &= ~GPMC_CONFIG7_CSVALID;
  352. gpmc_cs_write_reg(cs, GPMC_CS_CONFIG7, l);
  353. }
  354. static void gpmc_cs_get_memconf(int cs, u32 *base, u32 *size)
  355. {
  356. u32 l;
  357. u32 mask;
  358. l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
  359. *base = (l & 0x3f) << GPMC_CHUNK_SHIFT;
  360. mask = (l >> 8) & 0x0f;
  361. *size = (1 << GPMC_SECTION_SHIFT) - (mask << GPMC_CHUNK_SHIFT);
  362. }
  363. static int gpmc_cs_mem_enabled(int cs)
  364. {
  365. u32 l;
  366. l = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7);
  367. return l & GPMC_CONFIG7_CSVALID;
  368. }
  369. static void gpmc_cs_set_reserved(int cs, int reserved)
  370. {
  371. gpmc_cs_map &= ~(1 << cs);
  372. gpmc_cs_map |= (reserved ? 1 : 0) << cs;
  373. }
  374. static bool gpmc_cs_reserved(int cs)
  375. {
  376. return gpmc_cs_map & (1 << cs);
  377. }
  378. static unsigned long gpmc_mem_align(unsigned long size)
  379. {
  380. int order;
  381. size = (size - 1) >> (GPMC_CHUNK_SHIFT - 1);
  382. order = GPMC_CHUNK_SHIFT - 1;
  383. do {
  384. size >>= 1;
  385. order++;
  386. } while (size);
  387. size = 1 << order;
  388. return size;
  389. }
  390. static int gpmc_cs_insert_mem(int cs, unsigned long base, unsigned long size)
  391. {
  392. struct resource *res = &gpmc_cs_mem[cs];
  393. int r;
  394. size = gpmc_mem_align(size);
  395. spin_lock(&gpmc_mem_lock);
  396. res->start = base;
  397. res->end = base + size - 1;
  398. r = request_resource(&gpmc_mem_root, res);
  399. spin_unlock(&gpmc_mem_lock);
  400. return r;
  401. }
  402. static int gpmc_cs_delete_mem(int cs)
  403. {
  404. struct resource *res = &gpmc_cs_mem[cs];
  405. int r;
  406. spin_lock(&gpmc_mem_lock);
  407. r = release_resource(&gpmc_cs_mem[cs]);
  408. res->start = 0;
  409. res->end = 0;
  410. spin_unlock(&gpmc_mem_lock);
  411. return r;
  412. }
  413. int gpmc_cs_request(int cs, unsigned long size, unsigned long *base)
  414. {
  415. struct resource *res = &gpmc_cs_mem[cs];
  416. int r = -1;
  417. if (cs > GPMC_CS_NUM)
  418. return -ENODEV;
  419. size = gpmc_mem_align(size);
  420. if (size > (1 << GPMC_SECTION_SHIFT))
  421. return -ENOMEM;
  422. spin_lock(&gpmc_mem_lock);
  423. if (gpmc_cs_reserved(cs)) {
  424. r = -EBUSY;
  425. goto out;
  426. }
  427. if (gpmc_cs_mem_enabled(cs))
  428. r = adjust_resource(res, res->start & ~(size - 1), size);
  429. if (r < 0)
  430. r = allocate_resource(&gpmc_mem_root, res, size, 0, ~0,
  431. size, NULL, NULL);
  432. if (r < 0)
  433. goto out;
  434. gpmc_cs_enable_mem(cs, res->start, resource_size(res));
  435. *base = res->start;
  436. gpmc_cs_set_reserved(cs, 1);
  437. out:
  438. spin_unlock(&gpmc_mem_lock);
  439. return r;
  440. }
  441. EXPORT_SYMBOL(gpmc_cs_request);
  442. void gpmc_cs_free(int cs)
  443. {
  444. spin_lock(&gpmc_mem_lock);
  445. if (cs >= GPMC_CS_NUM || cs < 0 || !gpmc_cs_reserved(cs)) {
  446. printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
  447. BUG();
  448. spin_unlock(&gpmc_mem_lock);
  449. return;
  450. }
  451. gpmc_cs_disable_mem(cs);
  452. release_resource(&gpmc_cs_mem[cs]);
  453. gpmc_cs_set_reserved(cs, 0);
  454. spin_unlock(&gpmc_mem_lock);
  455. }
  456. EXPORT_SYMBOL(gpmc_cs_free);
  457. /**
  458. * gpmc_cs_configure - write request to configure gpmc
  459. * @cs: chip select number
  460. * @cmd: command type
  461. * @wval: value to write
  462. * @return status of the operation
  463. */
  464. int gpmc_cs_configure(int cs, int cmd, int wval)
  465. {
  466. int err = 0;
  467. u32 regval = 0;
  468. switch (cmd) {
  469. case GPMC_ENABLE_IRQ:
  470. gpmc_write_reg(GPMC_IRQENABLE, wval);
  471. break;
  472. case GPMC_SET_IRQ_STATUS:
  473. gpmc_write_reg(GPMC_IRQSTATUS, wval);
  474. break;
  475. case GPMC_CONFIG_WP:
  476. regval = gpmc_read_reg(GPMC_CONFIG);
  477. if (wval)
  478. regval &= ~GPMC_CONFIG_WRITEPROTECT; /* WP is ON */
  479. else
  480. regval |= GPMC_CONFIG_WRITEPROTECT; /* WP is OFF */
  481. gpmc_write_reg(GPMC_CONFIG, regval);
  482. break;
  483. case GPMC_CONFIG_RDY_BSY:
  484. regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
  485. if (wval)
  486. regval |= WR_RD_PIN_MONITORING;
  487. else
  488. regval &= ~WR_RD_PIN_MONITORING;
  489. gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
  490. break;
  491. case GPMC_CONFIG_DEV_SIZE:
  492. regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
  493. /* clear 2 target bits */
  494. regval &= ~GPMC_CONFIG1_DEVICESIZE(3);
  495. /* set the proper value */
  496. regval |= GPMC_CONFIG1_DEVICESIZE(wval);
  497. gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
  498. break;
  499. case GPMC_CONFIG_DEV_TYPE:
  500. regval = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
  501. /* clear 4 target bits */
  502. regval &= ~(GPMC_CONFIG1_DEVICETYPE(3) |
  503. GPMC_CONFIG1_MUXTYPE(3));
  504. /* set the proper value */
  505. regval |= GPMC_CONFIG1_DEVICETYPE(wval);
  506. if (wval == GPMC_DEVICETYPE_NOR)
  507. regval |= GPMC_CONFIG1_MUXADDDATA;
  508. gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
  509. break;
  510. default:
  511. printk(KERN_ERR "gpmc_configure_cs: Not supported\n");
  512. err = -EINVAL;
  513. }
  514. return err;
  515. }
  516. EXPORT_SYMBOL(gpmc_cs_configure);
  517. void gpmc_update_nand_reg(struct gpmc_nand_regs *reg, int cs)
  518. {
  519. int i;
  520. reg->gpmc_status = gpmc_base + GPMC_STATUS;
  521. reg->gpmc_nand_command = gpmc_base + GPMC_CS0_OFFSET +
  522. GPMC_CS_NAND_COMMAND + GPMC_CS_SIZE * cs;
  523. reg->gpmc_nand_address = gpmc_base + GPMC_CS0_OFFSET +
  524. GPMC_CS_NAND_ADDRESS + GPMC_CS_SIZE * cs;
  525. reg->gpmc_nand_data = gpmc_base + GPMC_CS0_OFFSET +
  526. GPMC_CS_NAND_DATA + GPMC_CS_SIZE * cs;
  527. reg->gpmc_prefetch_config1 = gpmc_base + GPMC_PREFETCH_CONFIG1;
  528. reg->gpmc_prefetch_config2 = gpmc_base + GPMC_PREFETCH_CONFIG2;
  529. reg->gpmc_prefetch_control = gpmc_base + GPMC_PREFETCH_CONTROL;
  530. reg->gpmc_prefetch_status = gpmc_base + GPMC_PREFETCH_STATUS;
  531. reg->gpmc_ecc_config = gpmc_base + GPMC_ECC_CONFIG;
  532. reg->gpmc_ecc_control = gpmc_base + GPMC_ECC_CONTROL;
  533. reg->gpmc_ecc_size_config = gpmc_base + GPMC_ECC_SIZE_CONFIG;
  534. reg->gpmc_ecc1_result = gpmc_base + GPMC_ECC1_RESULT;
  535. for (i = 0; i < GPMC_BCH_NUM_REMAINDER; i++) {
  536. reg->gpmc_bch_result0[i] = gpmc_base + GPMC_ECC_BCH_RESULT_0 +
  537. GPMC_BCH_SIZE * i;
  538. reg->gpmc_bch_result1[i] = gpmc_base + GPMC_ECC_BCH_RESULT_1 +
  539. GPMC_BCH_SIZE * i;
  540. reg->gpmc_bch_result2[i] = gpmc_base + GPMC_ECC_BCH_RESULT_2 +
  541. GPMC_BCH_SIZE * i;
  542. reg->gpmc_bch_result3[i] = gpmc_base + GPMC_ECC_BCH_RESULT_3 +
  543. GPMC_BCH_SIZE * i;
  544. }
  545. }
  546. int gpmc_get_client_irq(unsigned irq_config)
  547. {
  548. int i;
  549. if (hweight32(irq_config) > 1)
  550. return 0;
  551. for (i = 0; i < GPMC_NR_IRQ; i++)
  552. if (gpmc_client_irq[i].bitmask & irq_config)
  553. return gpmc_client_irq[i].irq;
  554. return 0;
  555. }
  556. static int gpmc_irq_endis(unsigned irq, bool endis)
  557. {
  558. int i;
  559. u32 regval;
  560. for (i = 0; i < GPMC_NR_IRQ; i++)
  561. if (irq == gpmc_client_irq[i].irq) {
  562. regval = gpmc_read_reg(GPMC_IRQENABLE);
  563. if (endis)
  564. regval |= gpmc_client_irq[i].bitmask;
  565. else
  566. regval &= ~gpmc_client_irq[i].bitmask;
  567. gpmc_write_reg(GPMC_IRQENABLE, regval);
  568. break;
  569. }
  570. return 0;
  571. }
  572. static void gpmc_irq_disable(struct irq_data *p)
  573. {
  574. gpmc_irq_endis(p->irq, false);
  575. }
  576. static void gpmc_irq_enable(struct irq_data *p)
  577. {
  578. gpmc_irq_endis(p->irq, true);
  579. }
  580. static void gpmc_irq_noop(struct irq_data *data) { }
  581. static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
  582. static int gpmc_setup_irq(void)
  583. {
  584. int i;
  585. u32 regval;
  586. if (!gpmc_irq)
  587. return -EINVAL;
  588. gpmc_irq_start = irq_alloc_descs(-1, 0, GPMC_NR_IRQ, 0);
  589. if (gpmc_irq_start < 0) {
  590. pr_err("irq_alloc_descs failed\n");
  591. return gpmc_irq_start;
  592. }
  593. gpmc_irq_chip.name = "gpmc";
  594. gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
  595. gpmc_irq_chip.irq_enable = gpmc_irq_enable;
  596. gpmc_irq_chip.irq_disable = gpmc_irq_disable;
  597. gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
  598. gpmc_irq_chip.irq_ack = gpmc_irq_noop;
  599. gpmc_irq_chip.irq_mask = gpmc_irq_noop;
  600. gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
  601. gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
  602. gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
  603. for (i = 0; i < GPMC_NR_IRQ; i++) {
  604. gpmc_client_irq[i].irq = gpmc_irq_start + i;
  605. irq_set_chip_and_handler(gpmc_client_irq[i].irq,
  606. &gpmc_irq_chip, handle_simple_irq);
  607. set_irq_flags(gpmc_client_irq[i].irq,
  608. IRQF_VALID | IRQF_NOAUTOEN);
  609. }
  610. /* Disable interrupts */
  611. gpmc_write_reg(GPMC_IRQENABLE, 0);
  612. /* clear interrupts */
  613. regval = gpmc_read_reg(GPMC_IRQSTATUS);
  614. gpmc_write_reg(GPMC_IRQSTATUS, regval);
  615. return request_irq(gpmc_irq, gpmc_handle_irq, 0, "gpmc", NULL);
  616. }
  617. static int gpmc_free_irq(void)
  618. {
  619. int i;
  620. if (gpmc_irq)
  621. free_irq(gpmc_irq, NULL);
  622. for (i = 0; i < GPMC_NR_IRQ; i++) {
  623. irq_set_handler(gpmc_client_irq[i].irq, NULL);
  624. irq_set_chip(gpmc_client_irq[i].irq, &no_irq_chip);
  625. irq_modify_status(gpmc_client_irq[i].irq, 0, 0);
  626. }
  627. irq_free_descs(gpmc_irq_start, GPMC_NR_IRQ);
  628. return 0;
  629. }
  630. static void gpmc_mem_exit(void)
  631. {
  632. int cs;
  633. for (cs = 0; cs < GPMC_CS_NUM; cs++) {
  634. if (!gpmc_cs_mem_enabled(cs))
  635. continue;
  636. gpmc_cs_delete_mem(cs);
  637. }
  638. }
  639. static int gpmc_mem_init(void)
  640. {
  641. int cs, rc;
  642. unsigned long boot_rom_space = 0;
  643. /* never allocate the first page, to facilitate bug detection;
  644. * even if we didn't boot from ROM.
  645. */
  646. boot_rom_space = BOOT_ROM_SPACE;
  647. gpmc_mem_root.start = GPMC_MEM_START + boot_rom_space;
  648. gpmc_mem_root.end = GPMC_MEM_END;
  649. /* Reserve all regions that has been set up by bootloader */
  650. for (cs = 0; cs < GPMC_CS_NUM; cs++) {
  651. u32 base, size;
  652. if (!gpmc_cs_mem_enabled(cs))
  653. continue;
  654. gpmc_cs_get_memconf(cs, &base, &size);
  655. rc = gpmc_cs_insert_mem(cs, base, size);
  656. if (rc < 0) {
  657. while (--cs >= 0)
  658. if (gpmc_cs_mem_enabled(cs))
  659. gpmc_cs_delete_mem(cs);
  660. return rc;
  661. }
  662. }
  663. return 0;
  664. }
  665. static u32 gpmc_round_ps_to_sync_clk(u32 time_ps, u32 sync_clk)
  666. {
  667. u32 temp;
  668. int div;
  669. div = gpmc_calc_divider(sync_clk);
  670. temp = gpmc_ps_to_ticks(time_ps);
  671. temp = (temp + div - 1) / div;
  672. return gpmc_ticks_to_ps(temp * div);
  673. }
  674. /* XXX: can the cycles be avoided ? */
  675. static int gpmc_calc_sync_read_timings(struct gpmc_timings *gpmc_t,
  676. struct gpmc_device_timings *dev_t)
  677. {
  678. bool mux = dev_t->mux;
  679. u32 temp;
  680. /* adv_rd_off */
  681. temp = dev_t->t_avdp_r;
  682. /* XXX: mux check required ? */
  683. if (mux) {
  684. /* XXX: t_avdp not to be required for sync, only added for tusb
  685. * this indirectly necessitates requirement of t_avdp_r and
  686. * t_avdp_w instead of having a single t_avdp
  687. */
  688. temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_avdh);
  689. temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
  690. }
  691. gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
  692. /* oe_on */
  693. temp = dev_t->t_oeasu; /* XXX: remove this ? */
  694. if (mux) {
  695. temp = max_t(u32, temp, gpmc_t->clk_activation + dev_t->t_ach);
  696. temp = max_t(u32, temp, gpmc_t->adv_rd_off +
  697. gpmc_ticks_to_ps(dev_t->cyc_aavdh_oe));
  698. }
  699. gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
  700. /* access */
  701. /* XXX: any scope for improvement ?, by combining oe_on
  702. * and clk_activation, need to check whether
  703. * access = clk_activation + round to sync clk ?
  704. */
  705. temp = max_t(u32, dev_t->t_iaa, dev_t->cyc_iaa * gpmc_t->sync_clk);
  706. temp += gpmc_t->clk_activation;
  707. if (dev_t->cyc_oe)
  708. temp = max_t(u32, temp, gpmc_t->oe_on +
  709. gpmc_ticks_to_ps(dev_t->cyc_oe));
  710. gpmc_t->access = gpmc_round_ps_to_ticks(temp);
  711. gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
  712. gpmc_t->cs_rd_off = gpmc_t->oe_off;
  713. /* rd_cycle */
  714. temp = max_t(u32, dev_t->t_cez_r, dev_t->t_oez);
  715. temp = gpmc_round_ps_to_sync_clk(temp, gpmc_t->sync_clk) +
  716. gpmc_t->access;
  717. /* XXX: barter t_ce_rdyz with t_cez_r ? */
  718. if (dev_t->t_ce_rdyz)
  719. temp = max_t(u32, temp, gpmc_t->cs_rd_off + dev_t->t_ce_rdyz);
  720. gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
  721. return 0;
  722. }
  723. static int gpmc_calc_sync_write_timings(struct gpmc_timings *gpmc_t,
  724. struct gpmc_device_timings *dev_t)
  725. {
  726. bool mux = dev_t->mux;
  727. u32 temp;
  728. /* adv_wr_off */
  729. temp = dev_t->t_avdp_w;
  730. if (mux) {
  731. temp = max_t(u32, temp,
  732. gpmc_t->clk_activation + dev_t->t_avdh);
  733. temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
  734. }
  735. gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
  736. /* wr_data_mux_bus */
  737. temp = max_t(u32, dev_t->t_weasu,
  738. gpmc_t->clk_activation + dev_t->t_rdyo);
  739. /* XXX: shouldn't mux be kept as a whole for wr_data_mux_bus ?,
  740. * and in that case remember to handle we_on properly
  741. */
  742. if (mux) {
  743. temp = max_t(u32, temp,
  744. gpmc_t->adv_wr_off + dev_t->t_aavdh);
  745. temp = max_t(u32, temp, gpmc_t->adv_wr_off +
  746. gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
  747. }
  748. gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
  749. /* we_on */
  750. if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
  751. gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
  752. else
  753. gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
  754. /* wr_access */
  755. /* XXX: gpmc_capability check reqd ? , even if not, will not harm */
  756. gpmc_t->wr_access = gpmc_t->access;
  757. /* we_off */
  758. temp = gpmc_t->we_on + dev_t->t_wpl;
  759. temp = max_t(u32, temp,
  760. gpmc_t->wr_access + gpmc_ticks_to_ps(1));
  761. temp = max_t(u32, temp,
  762. gpmc_t->we_on + gpmc_ticks_to_ps(dev_t->cyc_wpl));
  763. gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
  764. gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
  765. dev_t->t_wph);
  766. /* wr_cycle */
  767. temp = gpmc_round_ps_to_sync_clk(dev_t->t_cez_w, gpmc_t->sync_clk);
  768. temp += gpmc_t->wr_access;
  769. /* XXX: barter t_ce_rdyz with t_cez_w ? */
  770. if (dev_t->t_ce_rdyz)
  771. temp = max_t(u32, temp,
  772. gpmc_t->cs_wr_off + dev_t->t_ce_rdyz);
  773. gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
  774. return 0;
  775. }
  776. static int gpmc_calc_async_read_timings(struct gpmc_timings *gpmc_t,
  777. struct gpmc_device_timings *dev_t)
  778. {
  779. bool mux = dev_t->mux;
  780. u32 temp;
  781. /* adv_rd_off */
  782. temp = dev_t->t_avdp_r;
  783. if (mux)
  784. temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
  785. gpmc_t->adv_rd_off = gpmc_round_ps_to_ticks(temp);
  786. /* oe_on */
  787. temp = dev_t->t_oeasu;
  788. if (mux)
  789. temp = max_t(u32, temp,
  790. gpmc_t->adv_rd_off + dev_t->t_aavdh);
  791. gpmc_t->oe_on = gpmc_round_ps_to_ticks(temp);
  792. /* access */
  793. temp = max_t(u32, dev_t->t_iaa, /* XXX: remove t_iaa in async ? */
  794. gpmc_t->oe_on + dev_t->t_oe);
  795. temp = max_t(u32, temp,
  796. gpmc_t->cs_on + dev_t->t_ce);
  797. temp = max_t(u32, temp,
  798. gpmc_t->adv_on + dev_t->t_aa);
  799. gpmc_t->access = gpmc_round_ps_to_ticks(temp);
  800. gpmc_t->oe_off = gpmc_t->access + gpmc_ticks_to_ps(1);
  801. gpmc_t->cs_rd_off = gpmc_t->oe_off;
  802. /* rd_cycle */
  803. temp = max_t(u32, dev_t->t_rd_cycle,
  804. gpmc_t->cs_rd_off + dev_t->t_cez_r);
  805. temp = max_t(u32, temp, gpmc_t->oe_off + dev_t->t_oez);
  806. gpmc_t->rd_cycle = gpmc_round_ps_to_ticks(temp);
  807. return 0;
  808. }
  809. static int gpmc_calc_async_write_timings(struct gpmc_timings *gpmc_t,
  810. struct gpmc_device_timings *dev_t)
  811. {
  812. bool mux = dev_t->mux;
  813. u32 temp;
  814. /* adv_wr_off */
  815. temp = dev_t->t_avdp_w;
  816. if (mux)
  817. temp = max_t(u32, gpmc_t->adv_on + gpmc_ticks_to_ps(1), temp);
  818. gpmc_t->adv_wr_off = gpmc_round_ps_to_ticks(temp);
  819. /* wr_data_mux_bus */
  820. temp = dev_t->t_weasu;
  821. if (mux) {
  822. temp = max_t(u32, temp, gpmc_t->adv_wr_off + dev_t->t_aavdh);
  823. temp = max_t(u32, temp, gpmc_t->adv_wr_off +
  824. gpmc_ticks_to_ps(dev_t->cyc_aavdh_we));
  825. }
  826. gpmc_t->wr_data_mux_bus = gpmc_round_ps_to_ticks(temp);
  827. /* we_on */
  828. if (gpmc_capability & GPMC_HAS_WR_DATA_MUX_BUS)
  829. gpmc_t->we_on = gpmc_round_ps_to_ticks(dev_t->t_weasu);
  830. else
  831. gpmc_t->we_on = gpmc_t->wr_data_mux_bus;
  832. /* we_off */
  833. temp = gpmc_t->we_on + dev_t->t_wpl;
  834. gpmc_t->we_off = gpmc_round_ps_to_ticks(temp);
  835. gpmc_t->cs_wr_off = gpmc_round_ps_to_ticks(gpmc_t->we_off +
  836. dev_t->t_wph);
  837. /* wr_cycle */
  838. temp = max_t(u32, dev_t->t_wr_cycle,
  839. gpmc_t->cs_wr_off + dev_t->t_cez_w);
  840. gpmc_t->wr_cycle = gpmc_round_ps_to_ticks(temp);
  841. return 0;
  842. }
  843. static int gpmc_calc_sync_common_timings(struct gpmc_timings *gpmc_t,
  844. struct gpmc_device_timings *dev_t)
  845. {
  846. u32 temp;
  847. gpmc_t->sync_clk = gpmc_calc_divider(dev_t->clk) *
  848. gpmc_get_fclk_period();
  849. gpmc_t->page_burst_access = gpmc_round_ps_to_sync_clk(
  850. dev_t->t_bacc,
  851. gpmc_t->sync_clk);
  852. temp = max_t(u32, dev_t->t_ces, dev_t->t_avds);
  853. gpmc_t->clk_activation = gpmc_round_ps_to_ticks(temp);
  854. if (gpmc_calc_divider(gpmc_t->sync_clk) != 1)
  855. return 0;
  856. if (dev_t->ce_xdelay)
  857. gpmc_t->bool_timings.cs_extra_delay = true;
  858. if (dev_t->avd_xdelay)
  859. gpmc_t->bool_timings.adv_extra_delay = true;
  860. if (dev_t->oe_xdelay)
  861. gpmc_t->bool_timings.oe_extra_delay = true;
  862. if (dev_t->we_xdelay)
  863. gpmc_t->bool_timings.we_extra_delay = true;
  864. return 0;
  865. }
  866. static int gpmc_calc_common_timings(struct gpmc_timings *gpmc_t,
  867. struct gpmc_device_timings *dev_t)
  868. {
  869. u32 temp;
  870. /* cs_on */
  871. gpmc_t->cs_on = gpmc_round_ps_to_ticks(dev_t->t_ceasu);
  872. /* adv_on */
  873. temp = dev_t->t_avdasu;
  874. if (dev_t->t_ce_avd)
  875. temp = max_t(u32, temp,
  876. gpmc_t->cs_on + dev_t->t_ce_avd);
  877. gpmc_t->adv_on = gpmc_round_ps_to_ticks(temp);
  878. if (dev_t->sync_write || dev_t->sync_read)
  879. gpmc_calc_sync_common_timings(gpmc_t, dev_t);
  880. return 0;
  881. }
  882. /* TODO: remove this function once all peripherals are confirmed to
  883. * work with generic timing. Simultaneously gpmc_cs_set_timings()
  884. * has to be modified to handle timings in ps instead of ns
  885. */
  886. static void gpmc_convert_ps_to_ns(struct gpmc_timings *t)
  887. {
  888. t->cs_on /= 1000;
  889. t->cs_rd_off /= 1000;
  890. t->cs_wr_off /= 1000;
  891. t->adv_on /= 1000;
  892. t->adv_rd_off /= 1000;
  893. t->adv_wr_off /= 1000;
  894. t->we_on /= 1000;
  895. t->we_off /= 1000;
  896. t->oe_on /= 1000;
  897. t->oe_off /= 1000;
  898. t->page_burst_access /= 1000;
  899. t->access /= 1000;
  900. t->rd_cycle /= 1000;
  901. t->wr_cycle /= 1000;
  902. t->bus_turnaround /= 1000;
  903. t->cycle2cycle_delay /= 1000;
  904. t->wait_monitoring /= 1000;
  905. t->clk_activation /= 1000;
  906. t->wr_access /= 1000;
  907. t->wr_data_mux_bus /= 1000;
  908. }
  909. int gpmc_calc_timings(struct gpmc_timings *gpmc_t,
  910. struct gpmc_device_timings *dev_t)
  911. {
  912. memset(gpmc_t, 0, sizeof(*gpmc_t));
  913. gpmc_calc_common_timings(gpmc_t, dev_t);
  914. if (dev_t->sync_read)
  915. gpmc_calc_sync_read_timings(gpmc_t, dev_t);
  916. else
  917. gpmc_calc_async_read_timings(gpmc_t, dev_t);
  918. if (dev_t->sync_write)
  919. gpmc_calc_sync_write_timings(gpmc_t, dev_t);
  920. else
  921. gpmc_calc_async_write_timings(gpmc_t, dev_t);
  922. /* TODO: remove, see function definition */
  923. gpmc_convert_ps_to_ns(gpmc_t);
  924. return 0;
  925. }
  926. #ifdef CONFIG_OF
  927. static struct of_device_id gpmc_dt_ids[] = {
  928. { .compatible = "ti,omap2420-gpmc" },
  929. { .compatible = "ti,omap2430-gpmc" },
  930. { .compatible = "ti,omap3430-gpmc" }, /* omap3430 & omap3630 */
  931. { .compatible = "ti,omap4430-gpmc" }, /* omap4430 & omap4460 & omap543x */
  932. { .compatible = "ti,am3352-gpmc" }, /* am335x devices */
  933. { }
  934. };
  935. MODULE_DEVICE_TABLE(of, gpmc_dt_ids);
  936. static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
  937. struct gpmc_timings *gpmc_t)
  938. {
  939. u32 val;
  940. memset(gpmc_t, 0, sizeof(*gpmc_t));
  941. /* minimum clock period for syncronous mode */
  942. if (!of_property_read_u32(np, "gpmc,sync-clk", &val))
  943. gpmc_t->sync_clk = val;
  944. /* chip select timtings */
  945. if (!of_property_read_u32(np, "gpmc,cs-on", &val))
  946. gpmc_t->cs_on = val;
  947. if (!of_property_read_u32(np, "gpmc,cs-rd-off", &val))
  948. gpmc_t->cs_rd_off = val;
  949. if (!of_property_read_u32(np, "gpmc,cs-wr-off", &val))
  950. gpmc_t->cs_wr_off = val;
  951. /* ADV signal timings */
  952. if (!of_property_read_u32(np, "gpmc,adv-on", &val))
  953. gpmc_t->adv_on = val;
  954. if (!of_property_read_u32(np, "gpmc,adv-rd-off", &val))
  955. gpmc_t->adv_rd_off = val;
  956. if (!of_property_read_u32(np, "gpmc,adv-wr-off", &val))
  957. gpmc_t->adv_wr_off = val;
  958. /* WE signal timings */
  959. if (!of_property_read_u32(np, "gpmc,we-on", &val))
  960. gpmc_t->we_on = val;
  961. if (!of_property_read_u32(np, "gpmc,we-off", &val))
  962. gpmc_t->we_off = val;
  963. /* OE signal timings */
  964. if (!of_property_read_u32(np, "gpmc,oe-on", &val))
  965. gpmc_t->oe_on = val;
  966. if (!of_property_read_u32(np, "gpmc,oe-off", &val))
  967. gpmc_t->oe_off = val;
  968. /* access and cycle timings */
  969. if (!of_property_read_u32(np, "gpmc,page-burst-access", &val))
  970. gpmc_t->page_burst_access = val;
  971. if (!of_property_read_u32(np, "gpmc,access", &val))
  972. gpmc_t->access = val;
  973. if (!of_property_read_u32(np, "gpmc,rd-cycle", &val))
  974. gpmc_t->rd_cycle = val;
  975. if (!of_property_read_u32(np, "gpmc,wr-cycle", &val))
  976. gpmc_t->wr_cycle = val;
  977. /* only for OMAP3430 */
  978. if (!of_property_read_u32(np, "gpmc,wr-access", &val))
  979. gpmc_t->wr_access = val;
  980. if (!of_property_read_u32(np, "gpmc,wr-data-mux-bus", &val))
  981. gpmc_t->wr_data_mux_bus = val;
  982. }
  983. #ifdef CONFIG_MTD_NAND
  984. static const char * const nand_ecc_opts[] = {
  985. [OMAP_ECC_HAMMING_CODE_DEFAULT] = "sw",
  986. [OMAP_ECC_HAMMING_CODE_HW] = "hw",
  987. [OMAP_ECC_HAMMING_CODE_HW_ROMCODE] = "hw-romcode",
  988. [OMAP_ECC_BCH4_CODE_HW] = "bch4",
  989. [OMAP_ECC_BCH8_CODE_HW] = "bch8",
  990. };
  991. static int gpmc_probe_nand_child(struct platform_device *pdev,
  992. struct device_node *child)
  993. {
  994. u32 val;
  995. const char *s;
  996. struct gpmc_timings gpmc_t;
  997. struct omap_nand_platform_data *gpmc_nand_data;
  998. if (of_property_read_u32(child, "reg", &val) < 0) {
  999. dev_err(&pdev->dev, "%s has no 'reg' property\n",
  1000. child->full_name);
  1001. return -ENODEV;
  1002. }
  1003. gpmc_nand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_nand_data),
  1004. GFP_KERNEL);
  1005. if (!gpmc_nand_data)
  1006. return -ENOMEM;
  1007. gpmc_nand_data->cs = val;
  1008. gpmc_nand_data->of_node = child;
  1009. if (!of_property_read_string(child, "ti,nand-ecc-opt", &s))
  1010. for (val = 0; val < ARRAY_SIZE(nand_ecc_opts); val++)
  1011. if (!strcasecmp(s, nand_ecc_opts[val])) {
  1012. gpmc_nand_data->ecc_opt = val;
  1013. break;
  1014. }
  1015. val = of_get_nand_bus_width(child);
  1016. if (val == 16)
  1017. gpmc_nand_data->devsize = NAND_BUSWIDTH_16;
  1018. gpmc_read_timings_dt(child, &gpmc_t);
  1019. gpmc_nand_init(gpmc_nand_data, &gpmc_t);
  1020. return 0;
  1021. }
  1022. #else
  1023. static int gpmc_probe_nand_child(struct platform_device *pdev,
  1024. struct device_node *child)
  1025. {
  1026. return 0;
  1027. }
  1028. #endif
  1029. #ifdef CONFIG_MTD_ONENAND
  1030. static int gpmc_probe_onenand_child(struct platform_device *pdev,
  1031. struct device_node *child)
  1032. {
  1033. u32 val;
  1034. struct omap_onenand_platform_data *gpmc_onenand_data;
  1035. if (of_property_read_u32(child, "reg", &val) < 0) {
  1036. dev_err(&pdev->dev, "%s has no 'reg' property\n",
  1037. child->full_name);
  1038. return -ENODEV;
  1039. }
  1040. gpmc_onenand_data = devm_kzalloc(&pdev->dev, sizeof(*gpmc_onenand_data),
  1041. GFP_KERNEL);
  1042. if (!gpmc_onenand_data)
  1043. return -ENOMEM;
  1044. gpmc_onenand_data->cs = val;
  1045. gpmc_onenand_data->of_node = child;
  1046. gpmc_onenand_data->dma_channel = -1;
  1047. if (!of_property_read_u32(child, "dma-channel", &val))
  1048. gpmc_onenand_data->dma_channel = val;
  1049. gpmc_onenand_init(gpmc_onenand_data);
  1050. return 0;
  1051. }
  1052. #else
  1053. static int gpmc_probe_onenand_child(struct platform_device *pdev,
  1054. struct device_node *child)
  1055. {
  1056. return 0;
  1057. }
  1058. #endif
  1059. static int gpmc_probe_dt(struct platform_device *pdev)
  1060. {
  1061. int ret;
  1062. struct device_node *child;
  1063. const struct of_device_id *of_id =
  1064. of_match_device(gpmc_dt_ids, &pdev->dev);
  1065. if (!of_id)
  1066. return 0;
  1067. for_each_node_by_name(child, "nand") {
  1068. ret = gpmc_probe_nand_child(pdev, child);
  1069. if (ret < 0) {
  1070. of_node_put(child);
  1071. return ret;
  1072. }
  1073. }
  1074. for_each_node_by_name(child, "onenand") {
  1075. ret = gpmc_probe_onenand_child(pdev, child);
  1076. if (ret < 0) {
  1077. of_node_put(child);
  1078. return ret;
  1079. }
  1080. }
  1081. return 0;
  1082. }
  1083. #else
  1084. static int gpmc_probe_dt(struct platform_device *pdev)
  1085. {
  1086. return 0;
  1087. }
  1088. #endif
  1089. static int gpmc_probe(struct platform_device *pdev)
  1090. {
  1091. int rc;
  1092. u32 l;
  1093. struct resource *res;
  1094. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1095. if (res == NULL)
  1096. return -ENOENT;
  1097. phys_base = res->start;
  1098. mem_size = resource_size(res);
  1099. gpmc_base = devm_ioremap_resource(&pdev->dev, res);
  1100. if (IS_ERR(gpmc_base))
  1101. return PTR_ERR(gpmc_base);
  1102. res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
  1103. if (res == NULL)
  1104. dev_warn(&pdev->dev, "Failed to get resource: irq\n");
  1105. else
  1106. gpmc_irq = res->start;
  1107. gpmc_l3_clk = clk_get(&pdev->dev, "fck");
  1108. if (IS_ERR(gpmc_l3_clk)) {
  1109. dev_err(&pdev->dev, "error: clk_get\n");
  1110. gpmc_irq = 0;
  1111. return PTR_ERR(gpmc_l3_clk);
  1112. }
  1113. clk_prepare_enable(gpmc_l3_clk);
  1114. gpmc_dev = &pdev->dev;
  1115. l = gpmc_read_reg(GPMC_REVISION);
  1116. if (GPMC_REVISION_MAJOR(l) > 0x4)
  1117. gpmc_capability = GPMC_HAS_WR_ACCESS | GPMC_HAS_WR_DATA_MUX_BUS;
  1118. dev_info(gpmc_dev, "GPMC revision %d.%d\n", GPMC_REVISION_MAJOR(l),
  1119. GPMC_REVISION_MINOR(l));
  1120. rc = gpmc_mem_init();
  1121. if (rc < 0) {
  1122. clk_disable_unprepare(gpmc_l3_clk);
  1123. clk_put(gpmc_l3_clk);
  1124. dev_err(gpmc_dev, "failed to reserve memory\n");
  1125. return rc;
  1126. }
  1127. if (gpmc_setup_irq() < 0)
  1128. dev_warn(gpmc_dev, "gpmc_setup_irq failed\n");
  1129. /* Now the GPMC is initialised, unreserve the chip-selects */
  1130. gpmc_cs_map = 0;
  1131. rc = gpmc_probe_dt(pdev);
  1132. if (rc < 0) {
  1133. clk_disable_unprepare(gpmc_l3_clk);
  1134. clk_put(gpmc_l3_clk);
  1135. dev_err(gpmc_dev, "failed to probe DT parameters\n");
  1136. return rc;
  1137. }
  1138. return 0;
  1139. }
  1140. static int gpmc_remove(struct platform_device *pdev)
  1141. {
  1142. gpmc_free_irq();
  1143. gpmc_mem_exit();
  1144. gpmc_dev = NULL;
  1145. return 0;
  1146. }
  1147. static struct platform_driver gpmc_driver = {
  1148. .probe = gpmc_probe,
  1149. .remove = gpmc_remove,
  1150. .driver = {
  1151. .name = DEVICE_NAME,
  1152. .owner = THIS_MODULE,
  1153. .of_match_table = of_match_ptr(gpmc_dt_ids),
  1154. },
  1155. };
  1156. static __init int gpmc_init(void)
  1157. {
  1158. return platform_driver_register(&gpmc_driver);
  1159. }
  1160. static __exit void gpmc_exit(void)
  1161. {
  1162. platform_driver_unregister(&gpmc_driver);
  1163. }
  1164. omap_postcore_initcall(gpmc_init);
  1165. module_exit(gpmc_exit);
  1166. static int __init omap_gpmc_init(void)
  1167. {
  1168. struct omap_hwmod *oh;
  1169. struct platform_device *pdev;
  1170. char *oh_name = "gpmc";
  1171. /*
  1172. * if the board boots up with a populated DT, do not
  1173. * manually add the device from this initcall
  1174. */
  1175. if (of_have_populated_dt())
  1176. return -ENODEV;
  1177. oh = omap_hwmod_lookup(oh_name);
  1178. if (!oh) {
  1179. pr_err("Could not look up %s\n", oh_name);
  1180. return -ENODEV;
  1181. }
  1182. pdev = omap_device_build(DEVICE_NAME, -1, oh, NULL, 0);
  1183. WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name);
  1184. return IS_ERR(pdev) ? PTR_ERR(pdev) : 0;
  1185. }
  1186. omap_postcore_initcall(omap_gpmc_init);
  1187. static irqreturn_t gpmc_handle_irq(int irq, void *dev)
  1188. {
  1189. int i;
  1190. u32 regval;
  1191. regval = gpmc_read_reg(GPMC_IRQSTATUS);
  1192. if (!regval)
  1193. return IRQ_NONE;
  1194. for (i = 0; i < GPMC_NR_IRQ; i++)
  1195. if (regval & gpmc_client_irq[i].bitmask)
  1196. generic_handle_irq(gpmc_client_irq[i].irq);
  1197. gpmc_write_reg(GPMC_IRQSTATUS, regval);
  1198. return IRQ_HANDLED;
  1199. }
  1200. #ifdef CONFIG_ARCH_OMAP3
  1201. static struct omap3_gpmc_regs gpmc_context;
  1202. void omap3_gpmc_save_context(void)
  1203. {
  1204. int i;
  1205. gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
  1206. gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
  1207. gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
  1208. gpmc_context.config = gpmc_read_reg(GPMC_CONFIG);
  1209. gpmc_context.prefetch_config1 = gpmc_read_reg(GPMC_PREFETCH_CONFIG1);
  1210. gpmc_context.prefetch_config2 = gpmc_read_reg(GPMC_PREFETCH_CONFIG2);
  1211. gpmc_context.prefetch_control = gpmc_read_reg(GPMC_PREFETCH_CONTROL);
  1212. for (i = 0; i < GPMC_CS_NUM; i++) {
  1213. gpmc_context.cs_context[i].is_valid = gpmc_cs_mem_enabled(i);
  1214. if (gpmc_context.cs_context[i].is_valid) {
  1215. gpmc_context.cs_context[i].config1 =
  1216. gpmc_cs_read_reg(i, GPMC_CS_CONFIG1);
  1217. gpmc_context.cs_context[i].config2 =
  1218. gpmc_cs_read_reg(i, GPMC_CS_CONFIG2);
  1219. gpmc_context.cs_context[i].config3 =
  1220. gpmc_cs_read_reg(i, GPMC_CS_CONFIG3);
  1221. gpmc_context.cs_context[i].config4 =
  1222. gpmc_cs_read_reg(i, GPMC_CS_CONFIG4);
  1223. gpmc_context.cs_context[i].config5 =
  1224. gpmc_cs_read_reg(i, GPMC_CS_CONFIG5);
  1225. gpmc_context.cs_context[i].config6 =
  1226. gpmc_cs_read_reg(i, GPMC_CS_CONFIG6);
  1227. gpmc_context.cs_context[i].config7 =
  1228. gpmc_cs_read_reg(i, GPMC_CS_CONFIG7);
  1229. }
  1230. }
  1231. }
  1232. void omap3_gpmc_restore_context(void)
  1233. {
  1234. int i;
  1235. gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
  1236. gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
  1237. gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
  1238. gpmc_write_reg(GPMC_CONFIG, gpmc_context.config);
  1239. gpmc_write_reg(GPMC_PREFETCH_CONFIG1, gpmc_context.prefetch_config1);
  1240. gpmc_write_reg(GPMC_PREFETCH_CONFIG2, gpmc_context.prefetch_config2);
  1241. gpmc_write_reg(GPMC_PREFETCH_CONTROL, gpmc_context.prefetch_control);
  1242. for (i = 0; i < GPMC_CS_NUM; i++) {
  1243. if (gpmc_context.cs_context[i].is_valid) {
  1244. gpmc_cs_write_reg(i, GPMC_CS_CONFIG1,
  1245. gpmc_context.cs_context[i].config1);
  1246. gpmc_cs_write_reg(i, GPMC_CS_CONFIG2,
  1247. gpmc_context.cs_context[i].config2);
  1248. gpmc_cs_write_reg(i, GPMC_CS_CONFIG3,
  1249. gpmc_context.cs_context[i].config3);
  1250. gpmc_cs_write_reg(i, GPMC_CS_CONFIG4,
  1251. gpmc_context.cs_context[i].config4);
  1252. gpmc_cs_write_reg(i, GPMC_CS_CONFIG5,
  1253. gpmc_context.cs_context[i].config5);
  1254. gpmc_cs_write_reg(i, GPMC_CS_CONFIG6,
  1255. gpmc_context.cs_context[i].config6);
  1256. gpmc_cs_write_reg(i, GPMC_CS_CONFIG7,
  1257. gpmc_context.cs_context[i].config7);
  1258. }
  1259. }
  1260. }
  1261. #endif /* CONFIG_ARCH_OMAP3 */