proc-xsc3.S 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. /*
  2. * linux/arch/arm/mm/proc-xsc3.S
  3. *
  4. * Original Author: Matthew Gilbert
  5. * Current Maintainer: Lennert Buytenhek <buytenh@wantstofly.org>
  6. *
  7. * Copyright 2004 (C) Intel Corp.
  8. * Copyright 2005 (C) MontaVista Software, Inc.
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License version 2 as
  12. * published by the Free Software Foundation.
  13. *
  14. * MMU functions for the Intel XScale3 Core (XSC3). The XSC3 core is
  15. * an extension to Intel's original XScale core that adds the following
  16. * features:
  17. *
  18. * - ARMv6 Supersections
  19. * - Low Locality Reference pages (replaces mini-cache)
  20. * - 36-bit addressing
  21. * - L2 cache
  22. * - Cache coherency if chipset supports it
  23. *
  24. * Based on original XScale code by Nicolas Pitre.
  25. */
  26. #include <linux/linkage.h>
  27. #include <linux/init.h>
  28. #include <asm/assembler.h>
  29. #include <asm/hwcap.h>
  30. #include <asm/pgtable.h>
  31. #include <asm/pgtable-hwdef.h>
  32. #include <asm/page.h>
  33. #include <asm/ptrace.h>
  34. #include "proc-macros.S"
  35. /*
  36. * This is the maximum size of an area which will be flushed. If the
  37. * area is larger than this, then we flush the whole cache.
  38. */
  39. #define MAX_AREA_SIZE 32768
  40. /*
  41. * The cache line size of the L1 I, L1 D and unified L2 cache.
  42. */
  43. #define CACHELINESIZE 32
  44. /*
  45. * The size of the L1 D cache.
  46. */
  47. #define CACHESIZE 32768
  48. /*
  49. * This macro is used to wait for a CP15 write and is needed when we
  50. * have to ensure that the last operation to the coprocessor was
  51. * completed before continuing with operation.
  52. */
  53. .macro cpwait_ret, lr, rd
  54. mrc p15, 0, \rd, c2, c0, 0 @ arbitrary read of cp15
  55. sub pc, \lr, \rd, LSR #32 @ wait for completion and
  56. @ flush instruction pipeline
  57. .endm
  58. /*
  59. * This macro cleans and invalidates the entire L1 D cache.
  60. */
  61. .macro clean_d_cache rd, rs
  62. mov \rd, #0x1f00
  63. orr \rd, \rd, #0x00e0
  64. 1: mcr p15, 0, \rd, c7, c14, 2 @ clean/invalidate L1 D line
  65. adds \rd, \rd, #0x40000000
  66. bcc 1b
  67. subs \rd, \rd, #0x20
  68. bpl 1b
  69. .endm
  70. .text
  71. /*
  72. * cpu_xsc3_proc_init()
  73. *
  74. * Nothing too exciting at the moment
  75. */
  76. ENTRY(cpu_xsc3_proc_init)
  77. mov pc, lr
  78. /*
  79. * cpu_xsc3_proc_fin()
  80. */
  81. ENTRY(cpu_xsc3_proc_fin)
  82. mrc p15, 0, r0, c1, c0, 0 @ ctrl register
  83. bic r0, r0, #0x1800 @ ...IZ...........
  84. bic r0, r0, #0x0006 @ .............CA.
  85. mcr p15, 0, r0, c1, c0, 0 @ disable caches
  86. mov pc, lr
  87. /*
  88. * cpu_xsc3_reset(loc)
  89. *
  90. * Perform a soft reset of the system. Put the CPU into the
  91. * same state as it would be if it had been reset, and branch
  92. * to what would be the reset vector.
  93. *
  94. * loc: location to jump to for soft reset
  95. */
  96. .align 5
  97. ENTRY(cpu_xsc3_reset)
  98. mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
  99. msr cpsr_c, r1 @ reset CPSR
  100. mrc p15, 0, r1, c1, c0, 0 @ ctrl register
  101. bic r1, r1, #0x3900 @ ..VIZ..S........
  102. bic r1, r1, #0x0086 @ ........B....CA.
  103. mcr p15, 0, r1, c1, c0, 0 @ ctrl register
  104. mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB
  105. bic r1, r1, #0x0001 @ ...............M
  106. mcr p15, 0, r1, c1, c0, 0 @ ctrl register
  107. @ CAUTION: MMU turned off from this point. We count on the pipeline
  108. @ already containing those two last instructions to survive.
  109. mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
  110. mov pc, r0
  111. /*
  112. * cpu_xsc3_do_idle()
  113. *
  114. * Cause the processor to idle
  115. *
  116. * For now we do nothing but go to idle mode for every case
  117. *
  118. * XScale supports clock switching, but using idle mode support
  119. * allows external hardware to react to system state changes.
  120. */
  121. .align 5
  122. ENTRY(cpu_xsc3_do_idle)
  123. mov r0, #1
  124. mcr p14, 0, r0, c7, c0, 0 @ go to idle
  125. mov pc, lr
  126. /* ================================= CACHE ================================ */
  127. /*
  128. * flush_icache_all()
  129. *
  130. * Unconditionally clean and invalidate the entire icache.
  131. */
  132. ENTRY(xsc3_flush_icache_all)
  133. mov r0, #0
  134. mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
  135. mov pc, lr
  136. ENDPROC(xsc3_flush_icache_all)
  137. /*
  138. * flush_user_cache_all()
  139. *
  140. * Invalidate all cache entries in a particular address
  141. * space.
  142. */
  143. ENTRY(xsc3_flush_user_cache_all)
  144. /* FALLTHROUGH */
  145. /*
  146. * flush_kern_cache_all()
  147. *
  148. * Clean and invalidate the entire cache.
  149. */
  150. ENTRY(xsc3_flush_kern_cache_all)
  151. mov r2, #VM_EXEC
  152. mov ip, #0
  153. __flush_whole_cache:
  154. clean_d_cache r0, r1
  155. tst r2, #VM_EXEC
  156. mcrne p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB
  157. mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
  158. mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
  159. mov pc, lr
  160. /*
  161. * flush_user_cache_range(start, end, vm_flags)
  162. *
  163. * Invalidate a range of cache entries in the specified
  164. * address space.
  165. *
  166. * - start - start address (may not be aligned)
  167. * - end - end address (exclusive, may not be aligned)
  168. * - vma - vma_area_struct describing address space
  169. */
  170. .align 5
  171. ENTRY(xsc3_flush_user_cache_range)
  172. mov ip, #0
  173. sub r3, r1, r0 @ calculate total size
  174. cmp r3, #MAX_AREA_SIZE
  175. bhs __flush_whole_cache
  176. 1: tst r2, #VM_EXEC
  177. mcrne p15, 0, r0, c7, c5, 1 @ invalidate L1 I line
  178. mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
  179. add r0, r0, #CACHELINESIZE
  180. cmp r0, r1
  181. blo 1b
  182. tst r2, #VM_EXEC
  183. mcrne p15, 0, ip, c7, c5, 6 @ invalidate BTB
  184. mcrne p15, 0, ip, c7, c10, 4 @ data write barrier
  185. mcrne p15, 0, ip, c7, c5, 4 @ prefetch flush
  186. mov pc, lr
  187. /*
  188. * coherent_kern_range(start, end)
  189. *
  190. * Ensure coherency between the I cache and the D cache in the
  191. * region described by start. If you have non-snooping
  192. * Harvard caches, you need to implement this function.
  193. *
  194. * - start - virtual start address
  195. * - end - virtual end address
  196. *
  197. * Note: single I-cache line invalidation isn't used here since
  198. * it also trashes the mini I-cache used by JTAG debuggers.
  199. */
  200. ENTRY(xsc3_coherent_kern_range)
  201. /* FALLTHROUGH */
  202. ENTRY(xsc3_coherent_user_range)
  203. bic r0, r0, #CACHELINESIZE - 1
  204. 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
  205. add r0, r0, #CACHELINESIZE
  206. cmp r0, r1
  207. blo 1b
  208. mov r0, #0
  209. mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
  210. mcr p15, 0, r0, c7, c10, 4 @ data write barrier
  211. mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
  212. mov pc, lr
  213. /*
  214. * flush_kern_dcache_area(void *addr, size_t size)
  215. *
  216. * Ensure no D cache aliasing occurs, either with itself or
  217. * the I cache.
  218. *
  219. * - addr - kernel address
  220. * - size - region size
  221. */
  222. ENTRY(xsc3_flush_kern_dcache_area)
  223. add r1, r0, r1
  224. 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
  225. add r0, r0, #CACHELINESIZE
  226. cmp r0, r1
  227. blo 1b
  228. mov r0, #0
  229. mcr p15, 0, r0, c7, c5, 0 @ invalidate L1 I cache and BTB
  230. mcr p15, 0, r0, c7, c10, 4 @ data write barrier
  231. mcr p15, 0, r0, c7, c5, 4 @ prefetch flush
  232. mov pc, lr
  233. /*
  234. * dma_inv_range(start, end)
  235. *
  236. * Invalidate (discard) the specified virtual address range.
  237. * May not write back any entries. If 'start' or 'end'
  238. * are not cache line aligned, those lines must be written
  239. * back.
  240. *
  241. * - start - virtual start address
  242. * - end - virtual end address
  243. */
  244. xsc3_dma_inv_range:
  245. tst r0, #CACHELINESIZE - 1
  246. bic r0, r0, #CACHELINESIZE - 1
  247. mcrne p15, 0, r0, c7, c10, 1 @ clean L1 D line
  248. tst r1, #CACHELINESIZE - 1
  249. mcrne p15, 0, r1, c7, c10, 1 @ clean L1 D line
  250. 1: mcr p15, 0, r0, c7, c6, 1 @ invalidate L1 D line
  251. add r0, r0, #CACHELINESIZE
  252. cmp r0, r1
  253. blo 1b
  254. mcr p15, 0, r0, c7, c10, 4 @ data write barrier
  255. mov pc, lr
  256. /*
  257. * dma_clean_range(start, end)
  258. *
  259. * Clean the specified virtual address range.
  260. *
  261. * - start - virtual start address
  262. * - end - virtual end address
  263. */
  264. xsc3_dma_clean_range:
  265. bic r0, r0, #CACHELINESIZE - 1
  266. 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
  267. add r0, r0, #CACHELINESIZE
  268. cmp r0, r1
  269. blo 1b
  270. mcr p15, 0, r0, c7, c10, 4 @ data write barrier
  271. mov pc, lr
  272. /*
  273. * dma_flush_range(start, end)
  274. *
  275. * Clean and invalidate the specified virtual address range.
  276. *
  277. * - start - virtual start address
  278. * - end - virtual end address
  279. */
  280. ENTRY(xsc3_dma_flush_range)
  281. bic r0, r0, #CACHELINESIZE - 1
  282. 1: mcr p15, 0, r0, c7, c14, 1 @ clean/invalidate L1 D line
  283. add r0, r0, #CACHELINESIZE
  284. cmp r0, r1
  285. blo 1b
  286. mcr p15, 0, r0, c7, c10, 4 @ data write barrier
  287. mov pc, lr
  288. /*
  289. * dma_map_area(start, size, dir)
  290. * - start - kernel virtual start address
  291. * - size - size of region
  292. * - dir - DMA direction
  293. */
  294. ENTRY(xsc3_dma_map_area)
  295. add r1, r1, r0
  296. cmp r2, #DMA_TO_DEVICE
  297. beq xsc3_dma_clean_range
  298. bcs xsc3_dma_inv_range
  299. b xsc3_dma_flush_range
  300. ENDPROC(xsc3_dma_map_area)
  301. /*
  302. * dma_unmap_area(start, size, dir)
  303. * - start - kernel virtual start address
  304. * - size - size of region
  305. * - dir - DMA direction
  306. */
  307. ENTRY(xsc3_dma_unmap_area)
  308. mov pc, lr
  309. ENDPROC(xsc3_dma_unmap_area)
  310. ENTRY(xsc3_cache_fns)
  311. .long xsc3_flush_icache_all
  312. .long xsc3_flush_kern_cache_all
  313. .long xsc3_flush_user_cache_all
  314. .long xsc3_flush_user_cache_range
  315. .long xsc3_coherent_kern_range
  316. .long xsc3_coherent_user_range
  317. .long xsc3_flush_kern_dcache_area
  318. .long xsc3_dma_map_area
  319. .long xsc3_dma_unmap_area
  320. .long xsc3_dma_flush_range
  321. ENTRY(cpu_xsc3_dcache_clean_area)
  322. 1: mcr p15, 0, r0, c7, c10, 1 @ clean L1 D line
  323. add r0, r0, #CACHELINESIZE
  324. subs r1, r1, #CACHELINESIZE
  325. bhi 1b
  326. mov pc, lr
  327. /* =============================== PageTable ============================== */
  328. /*
  329. * cpu_xsc3_switch_mm(pgd)
  330. *
  331. * Set the translation base pointer to be as described by pgd.
  332. *
  333. * pgd: new page tables
  334. */
  335. .align 5
  336. ENTRY(cpu_xsc3_switch_mm)
  337. clean_d_cache r1, r2
  338. mcr p15, 0, ip, c7, c5, 0 @ invalidate L1 I cache and BTB
  339. mcr p15, 0, ip, c7, c10, 4 @ data write barrier
  340. mcr p15, 0, ip, c7, c5, 4 @ prefetch flush
  341. orr r0, r0, #0x18 @ cache the page table in L2
  342. mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
  343. mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
  344. cpwait_ret lr, ip
  345. /*
  346. * cpu_xsc3_set_pte_ext(ptep, pte, ext)
  347. *
  348. * Set a PTE and flush it out
  349. */
  350. cpu_xsc3_mt_table:
  351. .long 0x00 @ L_PTE_MT_UNCACHED
  352. .long PTE_EXT_TEX(1) @ L_PTE_MT_BUFFERABLE
  353. .long PTE_EXT_TEX(5) | PTE_CACHEABLE @ L_PTE_MT_WRITETHROUGH
  354. .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEBACK
  355. .long PTE_EXT_TEX(1) | PTE_BUFFERABLE @ L_PTE_MT_DEV_SHARED
  356. .long 0x00 @ unused
  357. .long 0x00 @ L_PTE_MT_MINICACHE (not present)
  358. .long PTE_EXT_TEX(5) | PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_WRITEALLOC (not present?)
  359. .long 0x00 @ unused
  360. .long PTE_EXT_TEX(1) @ L_PTE_MT_DEV_WC
  361. .long 0x00 @ unused
  362. .long PTE_CACHEABLE | PTE_BUFFERABLE @ L_PTE_MT_DEV_CACHED
  363. .long PTE_EXT_TEX(2) @ L_PTE_MT_DEV_NONSHARED
  364. .long 0x00 @ unused
  365. .long 0x00 @ unused
  366. .long 0x00 @ unused
  367. .align 5
  368. ENTRY(cpu_xsc3_set_pte_ext)
  369. xscale_set_pte_ext_prologue
  370. tst r1, #L_PTE_SHARED @ shared?
  371. and r1, r1, #L_PTE_MT_MASK
  372. adr ip, cpu_xsc3_mt_table
  373. ldr ip, [ip, r1]
  374. orrne r2, r2, #PTE_EXT_COHERENT @ interlock: mask in coherent bit
  375. bic r2, r2, #0x0c @ clear old C,B bits
  376. orr r2, r2, ip
  377. xscale_set_pte_ext_epilogue
  378. mov pc, lr
  379. .ltorg
  380. .align
  381. .globl cpu_xsc3_suspend_size
  382. .equ cpu_xsc3_suspend_size, 4 * 8
  383. #ifdef CONFIG_PM_SLEEP
  384. ENTRY(cpu_xsc3_do_suspend)
  385. stmfd sp!, {r4 - r10, lr}
  386. mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
  387. mrc p15, 0, r5, c15, c1, 0 @ CP access reg
  388. mrc p15, 0, r6, c13, c0, 0 @ PID
  389. mrc p15, 0, r7, c3, c0, 0 @ domain ID
  390. mrc p15, 0, r8, c2, c0, 0 @ translation table base addr
  391. mrc p15, 0, r9, c1, c0, 1 @ auxiliary control reg
  392. mrc p15, 0, r10, c1, c0, 0 @ control reg
  393. bic r4, r4, #2 @ clear frequency change bit
  394. stmia r0, {r1, r4 - r10} @ store v:p offset + cp regs
  395. ldmia sp!, {r4 - r10, pc}
  396. ENDPROC(cpu_xsc3_do_suspend)
  397. ENTRY(cpu_xsc3_do_resume)
  398. ldmia r0, {r1, r4 - r10} @ load v:p offset + cp regs
  399. mov ip, #0
  400. mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
  401. mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer
  402. mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer
  403. mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
  404. mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode.
  405. mcr p15, 0, r5, c15, c1, 0 @ CP access reg
  406. mcr p15, 0, r6, c13, c0, 0 @ PID
  407. mcr p15, 0, r7, c3, c0, 0 @ domain ID
  408. mcr p15, 0, r8, c2, c0, 0 @ translation table base addr
  409. mcr p15, 0, r9, c1, c0, 1 @ auxiliary control reg
  410. @ temporarily map resume_turn_on_mmu into the page table,
  411. @ otherwise prefetch abort occurs after MMU is turned on
  412. mov r0, r10 @ control register
  413. mov r2, r8, lsr #14 @ get TTB0 base
  414. mov r2, r2, lsl #14
  415. ldr r3, =0x542e @ section flags
  416. b cpu_resume_mmu
  417. ENDPROC(cpu_xsc3_do_resume)
  418. #else
  419. #define cpu_xsc3_do_suspend 0
  420. #define cpu_xsc3_do_resume 0
  421. #endif
  422. __CPUINIT
  423. .type __xsc3_setup, #function
  424. __xsc3_setup:
  425. mov r0, #PSR_F_BIT|PSR_I_BIT|SVC_MODE
  426. msr cpsr_c, r0
  427. mcr p15, 0, ip, c7, c7, 0 @ invalidate L1 caches and BTB
  428. mcr p15, 0, ip, c7, c10, 4 @ data write barrier
  429. mcr p15, 0, ip, c7, c5, 4 @ prefetch flush
  430. mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs
  431. orr r4, r4, #0x18 @ cache the page table in L2
  432. mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
  433. mov r0, #1 << 6 @ cp6 access for early sched_clock
  434. mcr p15, 0, r0, c15, c1, 0 @ write CP access register
  435. mrc p15, 0, r0, c1, c0, 1 @ get auxiliary control reg
  436. and r0, r0, #2 @ preserve bit P bit setting
  437. orr r0, r0, #(1 << 10) @ enable L2 for LLR cache
  438. mcr p15, 0, r0, c1, c0, 1 @ set auxiliary control reg
  439. adr r5, xsc3_crval
  440. ldmia r5, {r5, r6}
  441. #ifdef CONFIG_CACHE_XSC3L2
  442. mrc p15, 1, r0, c0, c0, 1 @ get L2 present information
  443. ands r0, r0, #0xf8
  444. orrne r6, r6, #(1 << 26) @ enable L2 if present
  445. #endif
  446. mrc p15, 0, r0, c1, c0, 0 @ get control register
  447. bic r0, r0, r5 @ ..V. ..R. .... ..A.
  448. orr r0, r0, r6 @ ..VI Z..S .... .C.M (mmu)
  449. @ ...I Z..S .... .... (uc)
  450. mov pc, lr
  451. .size __xsc3_setup, . - __xsc3_setup
  452. .type xsc3_crval, #object
  453. xsc3_crval:
  454. crval clear=0x04002202, mmuset=0x00003905, ucset=0x00001900
  455. __INITDATA
  456. /*
  457. * Purpose : Function pointers used to access above functions - all calls
  458. * come through these
  459. */
  460. .type xsc3_processor_functions, #object
  461. ENTRY(xsc3_processor_functions)
  462. .word v5t_early_abort
  463. .word legacy_pabort
  464. .word cpu_xsc3_proc_init
  465. .word cpu_xsc3_proc_fin
  466. .word cpu_xsc3_reset
  467. .word cpu_xsc3_do_idle
  468. .word cpu_xsc3_dcache_clean_area
  469. .word cpu_xsc3_switch_mm
  470. .word cpu_xsc3_set_pte_ext
  471. .word cpu_xsc3_suspend_size
  472. .word cpu_xsc3_do_suspend
  473. .word cpu_xsc3_do_resume
  474. .size xsc3_processor_functions, . - xsc3_processor_functions
  475. .section ".rodata"
  476. .type cpu_arch_name, #object
  477. cpu_arch_name:
  478. .asciz "armv5te"
  479. .size cpu_arch_name, . - cpu_arch_name
  480. .type cpu_elf_name, #object
  481. cpu_elf_name:
  482. .asciz "v5"
  483. .size cpu_elf_name, . - cpu_elf_name
  484. .type cpu_xsc3_name, #object
  485. cpu_xsc3_name:
  486. .asciz "XScale-V3 based processor"
  487. .size cpu_xsc3_name, . - cpu_xsc3_name
  488. .align
  489. .section ".proc.info.init", #alloc, #execinstr
  490. .type __xsc3_proc_info,#object
  491. __xsc3_proc_info:
  492. .long 0x69056000
  493. .long 0xffffe000
  494. .long PMD_TYPE_SECT | \
  495. PMD_SECT_BUFFERABLE | \
  496. PMD_SECT_CACHEABLE | \
  497. PMD_SECT_AP_WRITE | \
  498. PMD_SECT_AP_READ
  499. .long PMD_TYPE_SECT | \
  500. PMD_SECT_AP_WRITE | \
  501. PMD_SECT_AP_READ
  502. b __xsc3_setup
  503. .long cpu_arch_name
  504. .long cpu_elf_name
  505. .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
  506. .long cpu_xsc3_name
  507. .long xsc3_processor_functions
  508. .long v4wbi_tlb_fns
  509. .long xsc3_mc_user_fns
  510. .long xsc3_cache_fns
  511. .size __xsc3_proc_info, . - __xsc3_proc_info
  512. /* Note: PXA935 changed its implementor ID from Intel to Marvell */
  513. .type __xsc3_pxa935_proc_info,#object
  514. __xsc3_pxa935_proc_info:
  515. .long 0x56056000
  516. .long 0xffffe000
  517. .long PMD_TYPE_SECT | \
  518. PMD_SECT_BUFFERABLE | \
  519. PMD_SECT_CACHEABLE | \
  520. PMD_SECT_AP_WRITE | \
  521. PMD_SECT_AP_READ
  522. .long PMD_TYPE_SECT | \
  523. PMD_SECT_AP_WRITE | \
  524. PMD_SECT_AP_READ
  525. b __xsc3_setup
  526. .long cpu_arch_name
  527. .long cpu_elf_name
  528. .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
  529. .long cpu_xsc3_name
  530. .long xsc3_processor_functions
  531. .long v4wbi_tlb_fns
  532. .long xsc3_mc_user_fns
  533. .long xsc3_cache_fns
  534. .size __xsc3_pxa935_proc_info, . - __xsc3_pxa935_proc_info