pgtable.h 44 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999, 2000
  4. * Author(s): Hartmut Penner (hp@de.ibm.com)
  5. * Ulrich Weigand (weigand@de.ibm.com)
  6. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. *
  8. * Derived from "include/asm-i386/pgtable.h"
  9. */
  10. #ifndef _ASM_S390_PGTABLE_H
  11. #define _ASM_S390_PGTABLE_H
  12. /*
  13. * The Linux memory management assumes a three-level page table setup. For
  14. * s390 31 bit we "fold" the mid level into the top-level page table, so
  15. * that we physically have the same two-level page table as the s390 mmu
  16. * expects in 31 bit mode. For s390 64 bit we use three of the five levels
  17. * the hardware provides (region first and region second tables are not
  18. * used).
  19. *
  20. * The "pgd_xxx()" functions are trivial for a folded two-level
  21. * setup: the pgd is never bad, and a pmd always exists (as it's folded
  22. * into the pgd entry)
  23. *
  24. * This file contains the functions and defines necessary to modify and use
  25. * the S390 page table tree.
  26. */
  27. #ifndef __ASSEMBLY__
  28. #include <linux/sched.h>
  29. #include <linux/mm_types.h>
  30. #include <linux/page-flags.h>
  31. #include <asm/bug.h>
  32. #include <asm/page.h>
  33. extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
  34. extern void paging_init(void);
  35. extern void vmem_map_init(void);
  36. /*
  37. * The S390 doesn't have any external MMU info: the kernel page
  38. * tables contain all the necessary information.
  39. */
  40. #define update_mmu_cache(vma, address, ptep) do { } while (0)
  41. #define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
  42. /*
  43. * ZERO_PAGE is a global shared page that is always zero; used
  44. * for zero-mapped memory areas etc..
  45. */
  46. extern unsigned long empty_zero_page;
  47. extern unsigned long zero_page_mask;
  48. #define ZERO_PAGE(vaddr) \
  49. (virt_to_page((void *)(empty_zero_page + \
  50. (((unsigned long)(vaddr)) &zero_page_mask))))
  51. #define __HAVE_COLOR_ZERO_PAGE
  52. /* TODO: s390 cannot support io_remap_pfn_range... */
  53. #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
  54. remap_pfn_range(vma, vaddr, pfn, size, prot)
  55. #endif /* !__ASSEMBLY__ */
  56. /*
  57. * PMD_SHIFT determines the size of the area a second-level page
  58. * table can map
  59. * PGDIR_SHIFT determines what a third-level page table entry can map
  60. */
  61. #ifndef CONFIG_64BIT
  62. # define PMD_SHIFT 20
  63. # define PUD_SHIFT 20
  64. # define PGDIR_SHIFT 20
  65. #else /* CONFIG_64BIT */
  66. # define PMD_SHIFT 20
  67. # define PUD_SHIFT 31
  68. # define PGDIR_SHIFT 42
  69. #endif /* CONFIG_64BIT */
  70. #define PMD_SIZE (1UL << PMD_SHIFT)
  71. #define PMD_MASK (~(PMD_SIZE-1))
  72. #define PUD_SIZE (1UL << PUD_SHIFT)
  73. #define PUD_MASK (~(PUD_SIZE-1))
  74. #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  75. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  76. /*
  77. * entries per page directory level: the S390 is two-level, so
  78. * we don't really have any PMD directory physically.
  79. * for S390 segment-table entries are combined to one PGD
  80. * that leads to 1024 pte per pgd
  81. */
  82. #define PTRS_PER_PTE 256
  83. #ifndef CONFIG_64BIT
  84. #define PTRS_PER_PMD 1
  85. #define PTRS_PER_PUD 1
  86. #else /* CONFIG_64BIT */
  87. #define PTRS_PER_PMD 2048
  88. #define PTRS_PER_PUD 2048
  89. #endif /* CONFIG_64BIT */
  90. #define PTRS_PER_PGD 2048
  91. #define FIRST_USER_ADDRESS 0
  92. #define pte_ERROR(e) \
  93. printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
  94. #define pmd_ERROR(e) \
  95. printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
  96. #define pud_ERROR(e) \
  97. printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
  98. #define pgd_ERROR(e) \
  99. printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
  100. #ifndef __ASSEMBLY__
  101. /*
  102. * The vmalloc and module area will always be on the topmost area of the kernel
  103. * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
  104. * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
  105. * modules will reside. That makes sure that inter module branches always
  106. * happen without trampolines and in addition the placement within a 2GB frame
  107. * is branch prediction unit friendly.
  108. */
  109. extern unsigned long VMALLOC_START;
  110. extern unsigned long VMALLOC_END;
  111. extern struct page *vmemmap;
  112. #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
  113. #ifdef CONFIG_64BIT
  114. extern unsigned long MODULES_VADDR;
  115. extern unsigned long MODULES_END;
  116. #define MODULES_VADDR MODULES_VADDR
  117. #define MODULES_END MODULES_END
  118. #define MODULES_LEN (1UL << 31)
  119. #endif
  120. /*
  121. * A 31 bit pagetable entry of S390 has following format:
  122. * | PFRA | | OS |
  123. * 0 0IP0
  124. * 00000000001111111111222222222233
  125. * 01234567890123456789012345678901
  126. *
  127. * I Page-Invalid Bit: Page is not available for address-translation
  128. * P Page-Protection Bit: Store access not possible for page
  129. *
  130. * A 31 bit segmenttable entry of S390 has following format:
  131. * | P-table origin | |PTL
  132. * 0 IC
  133. * 00000000001111111111222222222233
  134. * 01234567890123456789012345678901
  135. *
  136. * I Segment-Invalid Bit: Segment is not available for address-translation
  137. * C Common-Segment Bit: Segment is not private (PoP 3-30)
  138. * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
  139. *
  140. * The 31 bit segmenttable origin of S390 has following format:
  141. *
  142. * |S-table origin | | STL |
  143. * X **GPS
  144. * 00000000001111111111222222222233
  145. * 01234567890123456789012345678901
  146. *
  147. * X Space-Switch event:
  148. * G Segment-Invalid Bit: *
  149. * P Private-Space Bit: Segment is not private (PoP 3-30)
  150. * S Storage-Alteration:
  151. * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
  152. *
  153. * A 64 bit pagetable entry of S390 has following format:
  154. * | PFRA |0IPC| OS |
  155. * 0000000000111111111122222222223333333333444444444455555555556666
  156. * 0123456789012345678901234567890123456789012345678901234567890123
  157. *
  158. * I Page-Invalid Bit: Page is not available for address-translation
  159. * P Page-Protection Bit: Store access not possible for page
  160. * C Change-bit override: HW is not required to set change bit
  161. *
  162. * A 64 bit segmenttable entry of S390 has following format:
  163. * | P-table origin | TT
  164. * 0000000000111111111122222222223333333333444444444455555555556666
  165. * 0123456789012345678901234567890123456789012345678901234567890123
  166. *
  167. * I Segment-Invalid Bit: Segment is not available for address-translation
  168. * C Common-Segment Bit: Segment is not private (PoP 3-30)
  169. * P Page-Protection Bit: Store access not possible for page
  170. * TT Type 00
  171. *
  172. * A 64 bit region table entry of S390 has following format:
  173. * | S-table origin | TF TTTL
  174. * 0000000000111111111122222222223333333333444444444455555555556666
  175. * 0123456789012345678901234567890123456789012345678901234567890123
  176. *
  177. * I Segment-Invalid Bit: Segment is not available for address-translation
  178. * TT Type 01
  179. * TF
  180. * TL Table length
  181. *
  182. * The 64 bit regiontable origin of S390 has following format:
  183. * | region table origon | DTTL
  184. * 0000000000111111111122222222223333333333444444444455555555556666
  185. * 0123456789012345678901234567890123456789012345678901234567890123
  186. *
  187. * X Space-Switch event:
  188. * G Segment-Invalid Bit:
  189. * P Private-Space Bit:
  190. * S Storage-Alteration:
  191. * R Real space
  192. * TL Table-Length:
  193. *
  194. * A storage key has the following format:
  195. * | ACC |F|R|C|0|
  196. * 0 3 4 5 6 7
  197. * ACC: access key
  198. * F : fetch protection bit
  199. * R : referenced bit
  200. * C : changed bit
  201. */
  202. /* Hardware bits in the page table entry */
  203. #define _PAGE_CO 0x100 /* HW Change-bit override */
  204. #define _PAGE_RO 0x200 /* HW read-only bit */
  205. #define _PAGE_INVALID 0x400 /* HW invalid bit */
  206. /* Software bits in the page table entry */
  207. #define _PAGE_SWT 0x001 /* SW pte type bit t */
  208. #define _PAGE_SWX 0x002 /* SW pte type bit x */
  209. #define _PAGE_SWC 0x004 /* SW pte changed bit */
  210. #define _PAGE_SWR 0x008 /* SW pte referenced bit */
  211. #define _PAGE_SWW 0x010 /* SW pte write bit */
  212. #define _PAGE_SPECIAL 0x020 /* SW associated with special page */
  213. #define __HAVE_ARCH_PTE_SPECIAL
  214. /* Set of bits not changed in pte_modify */
  215. #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
  216. _PAGE_SWC | _PAGE_SWR)
  217. /* Six different types of pages. */
  218. #define _PAGE_TYPE_EMPTY 0x400
  219. #define _PAGE_TYPE_NONE 0x401
  220. #define _PAGE_TYPE_SWAP 0x403
  221. #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
  222. #define _PAGE_TYPE_RO 0x200
  223. #define _PAGE_TYPE_RW 0x000
  224. /*
  225. * Only four types for huge pages, using the invalid bit and protection bit
  226. * of a segment table entry.
  227. */
  228. #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
  229. #define _HPAGE_TYPE_NONE 0x220
  230. #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
  231. #define _HPAGE_TYPE_RW 0x000
  232. /*
  233. * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
  234. * pte_none and pte_file to find out the pte type WITHOUT holding the page
  235. * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
  236. * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
  237. * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
  238. * This change is done while holding the lock, but the intermediate step
  239. * of a previously valid pte with the hw invalid bit set can be observed by
  240. * handle_pte_fault. That makes it necessary that all valid pte types with
  241. * the hw invalid bit set must be distinguishable from the four pte types
  242. * empty, none, swap and file.
  243. *
  244. * irxt ipte irxt
  245. * _PAGE_TYPE_EMPTY 1000 -> 1000
  246. * _PAGE_TYPE_NONE 1001 -> 1001
  247. * _PAGE_TYPE_SWAP 1011 -> 1011
  248. * _PAGE_TYPE_FILE 11?1 -> 11?1
  249. * _PAGE_TYPE_RO 0100 -> 1100
  250. * _PAGE_TYPE_RW 0000 -> 1000
  251. *
  252. * pte_none is true for bits combinations 1000, 1010, 1100, 1110
  253. * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
  254. * pte_file is true for bits combinations 1101, 1111
  255. * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
  256. */
  257. #ifndef CONFIG_64BIT
  258. /* Bits in the segment table address-space-control-element */
  259. #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
  260. #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
  261. #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
  262. #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
  263. #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
  264. /* Bits in the segment table entry */
  265. #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
  266. #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
  267. #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
  268. #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
  269. #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
  270. #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
  271. #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
  272. /* Page status table bits for virtualization */
  273. #define RCP_ACC_BITS 0xf0000000UL
  274. #define RCP_FP_BIT 0x08000000UL
  275. #define RCP_PCL_BIT 0x00800000UL
  276. #define RCP_HR_BIT 0x00400000UL
  277. #define RCP_HC_BIT 0x00200000UL
  278. #define RCP_GR_BIT 0x00040000UL
  279. #define RCP_GC_BIT 0x00020000UL
  280. /* User dirty / referenced bit for KVM's migration feature */
  281. #define KVM_UR_BIT 0x00008000UL
  282. #define KVM_UC_BIT 0x00004000UL
  283. #else /* CONFIG_64BIT */
  284. /* Bits in the segment/region table address-space-control-element */
  285. #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
  286. #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
  287. #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
  288. #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
  289. #define _ASCE_REAL_SPACE 0x20 /* real space control */
  290. #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
  291. #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
  292. #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
  293. #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
  294. #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
  295. #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
  296. /* Bits in the region table entry */
  297. #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
  298. #define _REGION_ENTRY_RO 0x200 /* region protection bit */
  299. #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
  300. #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
  301. #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
  302. #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
  303. #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
  304. #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
  305. #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
  306. #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
  307. #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
  308. #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
  309. #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
  310. #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
  311. #define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
  312. #define _REGION3_ENTRY_RO 0x200 /* page protection bit */
  313. #define _REGION3_ENTRY_CO 0x100 /* change-recording override */
  314. /* Bits in the segment table entry */
  315. #define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
  316. #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
  317. #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
  318. #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
  319. #define _SEGMENT_ENTRY (0)
  320. #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
  321. #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
  322. #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
  323. #define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */
  324. #define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT)
  325. /* Set of bits not changed in pmd_modify */
  326. #define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
  327. | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
  328. /* Page status table bits for virtualization */
  329. #define RCP_ACC_BITS 0xf000000000000000UL
  330. #define RCP_FP_BIT 0x0800000000000000UL
  331. #define RCP_PCL_BIT 0x0080000000000000UL
  332. #define RCP_HR_BIT 0x0040000000000000UL
  333. #define RCP_HC_BIT 0x0020000000000000UL
  334. #define RCP_GR_BIT 0x0004000000000000UL
  335. #define RCP_GC_BIT 0x0002000000000000UL
  336. /* User dirty / referenced bit for KVM's migration feature */
  337. #define KVM_UR_BIT 0x0000800000000000UL
  338. #define KVM_UC_BIT 0x0000400000000000UL
  339. #endif /* CONFIG_64BIT */
  340. /*
  341. * A user page table pointer has the space-switch-event bit, the
  342. * private-space-control bit and the storage-alteration-event-control
  343. * bit set. A kernel page table pointer doesn't need them.
  344. */
  345. #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
  346. _ASCE_ALT_EVENT)
  347. /*
  348. * Page protection definitions.
  349. */
  350. #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
  351. #define PAGE_RO __pgprot(_PAGE_TYPE_RO)
  352. #define PAGE_RW __pgprot(_PAGE_TYPE_RO | _PAGE_SWW)
  353. #define PAGE_RWC __pgprot(_PAGE_TYPE_RW | _PAGE_SWW | _PAGE_SWC)
  354. #define PAGE_KERNEL PAGE_RWC
  355. #define PAGE_SHARED PAGE_KERNEL
  356. #define PAGE_COPY PAGE_RO
  357. /*
  358. * On s390 the page table entry has an invalid bit and a read-only bit.
  359. * Read permission implies execute permission and write permission
  360. * implies read permission.
  361. */
  362. /*xwr*/
  363. #define __P000 PAGE_NONE
  364. #define __P001 PAGE_RO
  365. #define __P010 PAGE_RO
  366. #define __P011 PAGE_RO
  367. #define __P100 PAGE_RO
  368. #define __P101 PAGE_RO
  369. #define __P110 PAGE_RO
  370. #define __P111 PAGE_RO
  371. #define __S000 PAGE_NONE
  372. #define __S001 PAGE_RO
  373. #define __S010 PAGE_RW
  374. #define __S011 PAGE_RW
  375. #define __S100 PAGE_RO
  376. #define __S101 PAGE_RO
  377. #define __S110 PAGE_RW
  378. #define __S111 PAGE_RW
  379. /*
  380. * Segment entry (large page) protection definitions.
  381. */
  382. #define SEGMENT_NONE __pgprot(_HPAGE_TYPE_NONE)
  383. #define SEGMENT_RO __pgprot(_HPAGE_TYPE_RO)
  384. #define SEGMENT_RW __pgprot(_HPAGE_TYPE_RW)
  385. static inline int mm_exclusive(struct mm_struct *mm)
  386. {
  387. return likely(mm == current->active_mm &&
  388. atomic_read(&mm->context.attach_count) <= 1);
  389. }
  390. static inline int mm_has_pgste(struct mm_struct *mm)
  391. {
  392. #ifdef CONFIG_PGSTE
  393. if (unlikely(mm->context.has_pgste))
  394. return 1;
  395. #endif
  396. return 0;
  397. }
  398. /*
  399. * pgd/pmd/pte query functions
  400. */
  401. #ifndef CONFIG_64BIT
  402. static inline int pgd_present(pgd_t pgd) { return 1; }
  403. static inline int pgd_none(pgd_t pgd) { return 0; }
  404. static inline int pgd_bad(pgd_t pgd) { return 0; }
  405. static inline int pud_present(pud_t pud) { return 1; }
  406. static inline int pud_none(pud_t pud) { return 0; }
  407. static inline int pud_large(pud_t pud) { return 0; }
  408. static inline int pud_bad(pud_t pud) { return 0; }
  409. #else /* CONFIG_64BIT */
  410. static inline int pgd_present(pgd_t pgd)
  411. {
  412. if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
  413. return 1;
  414. return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
  415. }
  416. static inline int pgd_none(pgd_t pgd)
  417. {
  418. if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
  419. return 0;
  420. return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
  421. }
  422. static inline int pgd_bad(pgd_t pgd)
  423. {
  424. /*
  425. * With dynamic page table levels the pgd can be a region table
  426. * entry or a segment table entry. Check for the bit that are
  427. * invalid for either table entry.
  428. */
  429. unsigned long mask =
  430. ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
  431. ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
  432. return (pgd_val(pgd) & mask) != 0;
  433. }
  434. static inline int pud_present(pud_t pud)
  435. {
  436. if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
  437. return 1;
  438. return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
  439. }
  440. static inline int pud_none(pud_t pud)
  441. {
  442. if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
  443. return 0;
  444. return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
  445. }
  446. static inline int pud_large(pud_t pud)
  447. {
  448. if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
  449. return 0;
  450. return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
  451. }
  452. static inline int pud_bad(pud_t pud)
  453. {
  454. /*
  455. * With dynamic page table levels the pud can be a region table
  456. * entry or a segment table entry. Check for the bit that are
  457. * invalid for either table entry.
  458. */
  459. unsigned long mask =
  460. ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
  461. ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
  462. return (pud_val(pud) & mask) != 0;
  463. }
  464. #endif /* CONFIG_64BIT */
  465. static inline int pmd_present(pmd_t pmd)
  466. {
  467. unsigned long mask = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO;
  468. return (pmd_val(pmd) & mask) == _HPAGE_TYPE_NONE ||
  469. !(pmd_val(pmd) & _SEGMENT_ENTRY_INV);
  470. }
  471. static inline int pmd_none(pmd_t pmd)
  472. {
  473. return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) &&
  474. !(pmd_val(pmd) & _SEGMENT_ENTRY_RO);
  475. }
  476. static inline int pmd_large(pmd_t pmd)
  477. {
  478. #ifdef CONFIG_64BIT
  479. return !!(pmd_val(pmd) & _SEGMENT_ENTRY_LARGE);
  480. #else
  481. return 0;
  482. #endif
  483. }
  484. static inline int pmd_bad(pmd_t pmd)
  485. {
  486. unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
  487. return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
  488. }
  489. #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
  490. extern void pmdp_splitting_flush(struct vm_area_struct *vma,
  491. unsigned long addr, pmd_t *pmdp);
  492. #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  493. extern int pmdp_set_access_flags(struct vm_area_struct *vma,
  494. unsigned long address, pmd_t *pmdp,
  495. pmd_t entry, int dirty);
  496. #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
  497. extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
  498. unsigned long address, pmd_t *pmdp);
  499. #define __HAVE_ARCH_PMD_WRITE
  500. static inline int pmd_write(pmd_t pmd)
  501. {
  502. return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0;
  503. }
  504. static inline int pmd_young(pmd_t pmd)
  505. {
  506. return 0;
  507. }
  508. static inline int pte_none(pte_t pte)
  509. {
  510. return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
  511. }
  512. static inline int pte_present(pte_t pte)
  513. {
  514. unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
  515. return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
  516. (!(pte_val(pte) & _PAGE_INVALID) &&
  517. !(pte_val(pte) & _PAGE_SWT));
  518. }
  519. static inline int pte_file(pte_t pte)
  520. {
  521. unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
  522. return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
  523. }
  524. static inline int pte_special(pte_t pte)
  525. {
  526. return (pte_val(pte) & _PAGE_SPECIAL);
  527. }
  528. #define __HAVE_ARCH_PTE_SAME
  529. static inline int pte_same(pte_t a, pte_t b)
  530. {
  531. return pte_val(a) == pte_val(b);
  532. }
  533. static inline pgste_t pgste_get_lock(pte_t *ptep)
  534. {
  535. unsigned long new = 0;
  536. #ifdef CONFIG_PGSTE
  537. unsigned long old;
  538. preempt_disable();
  539. asm(
  540. " lg %0,%2\n"
  541. "0: lgr %1,%0\n"
  542. " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */
  543. " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */
  544. " csg %0,%1,%2\n"
  545. " jl 0b\n"
  546. : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
  547. : "Q" (ptep[PTRS_PER_PTE]) : "cc");
  548. #endif
  549. return __pgste(new);
  550. }
  551. static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
  552. {
  553. #ifdef CONFIG_PGSTE
  554. asm(
  555. " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
  556. " stg %1,%0\n"
  557. : "=Q" (ptep[PTRS_PER_PTE])
  558. : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
  559. preempt_enable();
  560. #endif
  561. }
  562. static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
  563. {
  564. #ifdef CONFIG_PGSTE
  565. unsigned long address, bits;
  566. unsigned char skey;
  567. if (!pte_present(*ptep))
  568. return pgste;
  569. address = pte_val(*ptep) & PAGE_MASK;
  570. skey = page_get_storage_key(address);
  571. bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
  572. /* Clear page changed & referenced bit in the storage key */
  573. if (bits & _PAGE_CHANGED)
  574. page_set_storage_key(address, skey ^ bits, 0);
  575. else if (bits)
  576. page_reset_referenced(address);
  577. /* Transfer page changed & referenced bit to guest bits in pgste */
  578. pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
  579. /* Get host changed & referenced bits from pgste */
  580. bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
  581. /* Transfer page changed & referenced bit to kvm user bits */
  582. pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
  583. /* Clear relevant host bits in pgste. */
  584. pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
  585. pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
  586. /* Copy page access key and fetch protection bit to pgste */
  587. pgste_val(pgste) |=
  588. (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
  589. /* Transfer referenced bit to pte */
  590. pte_val(*ptep) |= (bits & _PAGE_REFERENCED) << 1;
  591. #endif
  592. return pgste;
  593. }
  594. static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
  595. {
  596. #ifdef CONFIG_PGSTE
  597. int young;
  598. if (!pte_present(*ptep))
  599. return pgste;
  600. /* Get referenced bit from storage key */
  601. young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
  602. if (young)
  603. pgste_val(pgste) |= RCP_GR_BIT;
  604. /* Get host referenced bit from pgste */
  605. if (pgste_val(pgste) & RCP_HR_BIT) {
  606. pgste_val(pgste) &= ~RCP_HR_BIT;
  607. young = 1;
  608. }
  609. /* Transfer referenced bit to kvm user bits and pte */
  610. if (young) {
  611. pgste_val(pgste) |= KVM_UR_BIT;
  612. pte_val(*ptep) |= _PAGE_SWR;
  613. }
  614. #endif
  615. return pgste;
  616. }
  617. static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
  618. {
  619. #ifdef CONFIG_PGSTE
  620. unsigned long address;
  621. unsigned long okey, nkey;
  622. if (!pte_present(entry))
  623. return;
  624. address = pte_val(entry) & PAGE_MASK;
  625. okey = nkey = page_get_storage_key(address);
  626. nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
  627. /* Set page access key and fetch protection bit from pgste */
  628. nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
  629. if (okey != nkey)
  630. page_set_storage_key(address, nkey, 0);
  631. #endif
  632. }
  633. static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
  634. {
  635. if (!MACHINE_HAS_ESOP && (pte_val(entry) & _PAGE_SWW)) {
  636. /*
  637. * Without enhanced suppression-on-protection force
  638. * the dirty bit on for all writable ptes.
  639. */
  640. pte_val(entry) |= _PAGE_SWC;
  641. pte_val(entry) &= ~_PAGE_RO;
  642. }
  643. *ptep = entry;
  644. }
  645. /**
  646. * struct gmap_struct - guest address space
  647. * @mm: pointer to the parent mm_struct
  648. * @table: pointer to the page directory
  649. * @asce: address space control element for gmap page table
  650. * @crst_list: list of all crst tables used in the guest address space
  651. */
  652. struct gmap {
  653. struct list_head list;
  654. struct mm_struct *mm;
  655. unsigned long *table;
  656. unsigned long asce;
  657. struct list_head crst_list;
  658. };
  659. /**
  660. * struct gmap_rmap - reverse mapping for segment table entries
  661. * @next: pointer to the next gmap_rmap structure in the list
  662. * @entry: pointer to a segment table entry
  663. */
  664. struct gmap_rmap {
  665. struct list_head list;
  666. unsigned long *entry;
  667. };
  668. /**
  669. * struct gmap_pgtable - gmap information attached to a page table
  670. * @vmaddr: address of the 1MB segment in the process virtual memory
  671. * @mapper: list of segment table entries maping a page table
  672. */
  673. struct gmap_pgtable {
  674. unsigned long vmaddr;
  675. struct list_head mapper;
  676. };
  677. struct gmap *gmap_alloc(struct mm_struct *mm);
  678. void gmap_free(struct gmap *gmap);
  679. void gmap_enable(struct gmap *gmap);
  680. void gmap_disable(struct gmap *gmap);
  681. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  682. unsigned long to, unsigned long length);
  683. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
  684. unsigned long __gmap_translate(unsigned long address, struct gmap *);
  685. unsigned long gmap_translate(unsigned long address, struct gmap *);
  686. unsigned long __gmap_fault(unsigned long address, struct gmap *);
  687. unsigned long gmap_fault(unsigned long address, struct gmap *);
  688. void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
  689. /*
  690. * Certain architectures need to do special things when PTEs
  691. * within a page table are directly modified. Thus, the following
  692. * hook is made available.
  693. */
  694. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  695. pte_t *ptep, pte_t entry)
  696. {
  697. pgste_t pgste;
  698. if (mm_has_pgste(mm)) {
  699. pgste = pgste_get_lock(ptep);
  700. pgste_set_key(ptep, pgste, entry);
  701. pgste_set_pte(ptep, entry);
  702. pgste_set_unlock(ptep, pgste);
  703. } else {
  704. if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1)
  705. pte_val(entry) |= _PAGE_CO;
  706. *ptep = entry;
  707. }
  708. }
  709. /*
  710. * query functions pte_write/pte_dirty/pte_young only work if
  711. * pte_present() is true. Undefined behaviour if not..
  712. */
  713. static inline int pte_write(pte_t pte)
  714. {
  715. return (pte_val(pte) & _PAGE_SWW) != 0;
  716. }
  717. static inline int pte_dirty(pte_t pte)
  718. {
  719. return (pte_val(pte) & _PAGE_SWC) != 0;
  720. }
  721. static inline int pte_young(pte_t pte)
  722. {
  723. #ifdef CONFIG_PGSTE
  724. if (pte_val(pte) & _PAGE_SWR)
  725. return 1;
  726. #endif
  727. return 0;
  728. }
  729. /*
  730. * pgd/pmd/pte modification functions
  731. */
  732. static inline void pgd_clear(pgd_t *pgd)
  733. {
  734. #ifdef CONFIG_64BIT
  735. if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
  736. pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
  737. #endif
  738. }
  739. static inline void pud_clear(pud_t *pud)
  740. {
  741. #ifdef CONFIG_64BIT
  742. if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
  743. pud_val(*pud) = _REGION3_ENTRY_EMPTY;
  744. #endif
  745. }
  746. static inline void pmd_clear(pmd_t *pmdp)
  747. {
  748. pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
  749. }
  750. static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  751. {
  752. pte_val(*ptep) = _PAGE_TYPE_EMPTY;
  753. }
  754. /*
  755. * The following pte modification functions only work if
  756. * pte_present() is true. Undefined behaviour if not..
  757. */
  758. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  759. {
  760. pte_val(pte) &= _PAGE_CHG_MASK;
  761. pte_val(pte) |= pgprot_val(newprot);
  762. if ((pte_val(pte) & _PAGE_SWC) && (pte_val(pte) & _PAGE_SWW))
  763. pte_val(pte) &= ~_PAGE_RO;
  764. return pte;
  765. }
  766. static inline pte_t pte_wrprotect(pte_t pte)
  767. {
  768. pte_val(pte) &= ~_PAGE_SWW;
  769. /* Do not clobber _PAGE_TYPE_NONE pages! */
  770. if (!(pte_val(pte) & _PAGE_INVALID))
  771. pte_val(pte) |= _PAGE_RO;
  772. return pte;
  773. }
  774. static inline pte_t pte_mkwrite(pte_t pte)
  775. {
  776. pte_val(pte) |= _PAGE_SWW;
  777. if (pte_val(pte) & _PAGE_SWC)
  778. pte_val(pte) &= ~_PAGE_RO;
  779. return pte;
  780. }
  781. static inline pte_t pte_mkclean(pte_t pte)
  782. {
  783. pte_val(pte) &= ~_PAGE_SWC;
  784. /* Do not clobber _PAGE_TYPE_NONE pages! */
  785. if (!(pte_val(pte) & _PAGE_INVALID))
  786. pte_val(pte) |= _PAGE_RO;
  787. return pte;
  788. }
  789. static inline pte_t pte_mkdirty(pte_t pte)
  790. {
  791. pte_val(pte) |= _PAGE_SWC;
  792. if (pte_val(pte) & _PAGE_SWW)
  793. pte_val(pte) &= ~_PAGE_RO;
  794. return pte;
  795. }
  796. static inline pte_t pte_mkold(pte_t pte)
  797. {
  798. #ifdef CONFIG_PGSTE
  799. pte_val(pte) &= ~_PAGE_SWR;
  800. #endif
  801. return pte;
  802. }
  803. static inline pte_t pte_mkyoung(pte_t pte)
  804. {
  805. return pte;
  806. }
  807. static inline pte_t pte_mkspecial(pte_t pte)
  808. {
  809. pte_val(pte) |= _PAGE_SPECIAL;
  810. return pte;
  811. }
  812. #ifdef CONFIG_HUGETLB_PAGE
  813. static inline pte_t pte_mkhuge(pte_t pte)
  814. {
  815. pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
  816. return pte;
  817. }
  818. #endif
  819. /*
  820. * Get (and clear) the user dirty bit for a pte.
  821. */
  822. static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
  823. pte_t *ptep)
  824. {
  825. pgste_t pgste;
  826. int dirty = 0;
  827. if (mm_has_pgste(mm)) {
  828. pgste = pgste_get_lock(ptep);
  829. pgste = pgste_update_all(ptep, pgste);
  830. dirty = !!(pgste_val(pgste) & KVM_UC_BIT);
  831. pgste_val(pgste) &= ~KVM_UC_BIT;
  832. pgste_set_unlock(ptep, pgste);
  833. return dirty;
  834. }
  835. return dirty;
  836. }
  837. /*
  838. * Get (and clear) the user referenced bit for a pte.
  839. */
  840. static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
  841. pte_t *ptep)
  842. {
  843. pgste_t pgste;
  844. int young = 0;
  845. if (mm_has_pgste(mm)) {
  846. pgste = pgste_get_lock(ptep);
  847. pgste = pgste_update_young(ptep, pgste);
  848. young = !!(pgste_val(pgste) & KVM_UR_BIT);
  849. pgste_val(pgste) &= ~KVM_UR_BIT;
  850. pgste_set_unlock(ptep, pgste);
  851. }
  852. return young;
  853. }
  854. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  855. static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
  856. unsigned long addr, pte_t *ptep)
  857. {
  858. pgste_t pgste;
  859. pte_t pte;
  860. if (mm_has_pgste(vma->vm_mm)) {
  861. pgste = pgste_get_lock(ptep);
  862. pgste = pgste_update_young(ptep, pgste);
  863. pte = *ptep;
  864. *ptep = pte_mkold(pte);
  865. pgste_set_unlock(ptep, pgste);
  866. return pte_young(pte);
  867. }
  868. return 0;
  869. }
  870. #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  871. static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
  872. unsigned long address, pte_t *ptep)
  873. {
  874. /* No need to flush TLB
  875. * On s390 reference bits are in storage key and never in TLB
  876. * With virtualization we handle the reference bit, without we
  877. * we can simply return */
  878. return ptep_test_and_clear_young(vma, address, ptep);
  879. }
  880. static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
  881. {
  882. if (!(pte_val(*ptep) & _PAGE_INVALID)) {
  883. #ifndef CONFIG_64BIT
  884. /* pto must point to the start of the segment table */
  885. pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
  886. #else
  887. /* ipte in zarch mode can do the math */
  888. pte_t *pto = ptep;
  889. #endif
  890. asm volatile(
  891. " ipte %2,%3"
  892. : "=m" (*ptep) : "m" (*ptep),
  893. "a" (pto), "a" (address));
  894. }
  895. }
  896. /*
  897. * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
  898. * both clear the TLB for the unmapped pte. The reason is that
  899. * ptep_get_and_clear is used in common code (e.g. change_pte_range)
  900. * to modify an active pte. The sequence is
  901. * 1) ptep_get_and_clear
  902. * 2) set_pte_at
  903. * 3) flush_tlb_range
  904. * On s390 the tlb needs to get flushed with the modification of the pte
  905. * if the pte is active. The only way how this can be implemented is to
  906. * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
  907. * is a nop.
  908. */
  909. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  910. static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
  911. unsigned long address, pte_t *ptep)
  912. {
  913. pgste_t pgste;
  914. pte_t pte;
  915. mm->context.flush_mm = 1;
  916. if (mm_has_pgste(mm))
  917. pgste = pgste_get_lock(ptep);
  918. pte = *ptep;
  919. if (!mm_exclusive(mm))
  920. __ptep_ipte(address, ptep);
  921. pte_val(*ptep) = _PAGE_TYPE_EMPTY;
  922. if (mm_has_pgste(mm)) {
  923. pgste = pgste_update_all(&pte, pgste);
  924. pgste_set_unlock(ptep, pgste);
  925. }
  926. return pte;
  927. }
  928. #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
  929. static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
  930. unsigned long address,
  931. pte_t *ptep)
  932. {
  933. pte_t pte;
  934. mm->context.flush_mm = 1;
  935. if (mm_has_pgste(mm))
  936. pgste_get_lock(ptep);
  937. pte = *ptep;
  938. if (!mm_exclusive(mm))
  939. __ptep_ipte(address, ptep);
  940. return pte;
  941. }
  942. static inline void ptep_modify_prot_commit(struct mm_struct *mm,
  943. unsigned long address,
  944. pte_t *ptep, pte_t pte)
  945. {
  946. if (mm_has_pgste(mm)) {
  947. pgste_set_pte(ptep, pte);
  948. pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
  949. } else
  950. *ptep = pte;
  951. }
  952. #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
  953. static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
  954. unsigned long address, pte_t *ptep)
  955. {
  956. pgste_t pgste;
  957. pte_t pte;
  958. if (mm_has_pgste(vma->vm_mm))
  959. pgste = pgste_get_lock(ptep);
  960. pte = *ptep;
  961. __ptep_ipte(address, ptep);
  962. pte_val(*ptep) = _PAGE_TYPE_EMPTY;
  963. if (mm_has_pgste(vma->vm_mm)) {
  964. pgste = pgste_update_all(&pte, pgste);
  965. pgste_set_unlock(ptep, pgste);
  966. }
  967. return pte;
  968. }
  969. /*
  970. * The batched pte unmap code uses ptep_get_and_clear_full to clear the
  971. * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
  972. * tlbs of an mm if it can guarantee that the ptes of the mm_struct
  973. * cannot be accessed while the batched unmap is running. In this case
  974. * full==1 and a simple pte_clear is enough. See tlb.h.
  975. */
  976. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  977. static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  978. unsigned long address,
  979. pte_t *ptep, int full)
  980. {
  981. pgste_t pgste;
  982. pte_t pte;
  983. if (mm_has_pgste(mm))
  984. pgste = pgste_get_lock(ptep);
  985. pte = *ptep;
  986. if (!full)
  987. __ptep_ipte(address, ptep);
  988. pte_val(*ptep) = _PAGE_TYPE_EMPTY;
  989. if (mm_has_pgste(mm)) {
  990. pgste = pgste_update_all(&pte, pgste);
  991. pgste_set_unlock(ptep, pgste);
  992. }
  993. return pte;
  994. }
  995. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  996. static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
  997. unsigned long address, pte_t *ptep)
  998. {
  999. pgste_t pgste;
  1000. pte_t pte = *ptep;
  1001. if (pte_write(pte)) {
  1002. mm->context.flush_mm = 1;
  1003. if (mm_has_pgste(mm))
  1004. pgste = pgste_get_lock(ptep);
  1005. if (!mm_exclusive(mm))
  1006. __ptep_ipte(address, ptep);
  1007. pte = pte_wrprotect(pte);
  1008. if (mm_has_pgste(mm)) {
  1009. pgste_set_pte(ptep, pte);
  1010. pgste_set_unlock(ptep, pgste);
  1011. } else
  1012. *ptep = pte;
  1013. }
  1014. return pte;
  1015. }
  1016. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  1017. static inline int ptep_set_access_flags(struct vm_area_struct *vma,
  1018. unsigned long address, pte_t *ptep,
  1019. pte_t entry, int dirty)
  1020. {
  1021. pgste_t pgste;
  1022. if (pte_same(*ptep, entry))
  1023. return 0;
  1024. if (mm_has_pgste(vma->vm_mm))
  1025. pgste = pgste_get_lock(ptep);
  1026. __ptep_ipte(address, ptep);
  1027. if (mm_has_pgste(vma->vm_mm)) {
  1028. pgste_set_pte(ptep, entry);
  1029. pgste_set_unlock(ptep, pgste);
  1030. } else
  1031. *ptep = entry;
  1032. return 1;
  1033. }
  1034. /*
  1035. * Conversion functions: convert a page and protection to a page entry,
  1036. * and a page entry and page directory to the page they refer to.
  1037. */
  1038. static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
  1039. {
  1040. pte_t __pte;
  1041. pte_val(__pte) = physpage + pgprot_val(pgprot);
  1042. return __pte;
  1043. }
  1044. static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
  1045. {
  1046. unsigned long physpage = page_to_phys(page);
  1047. pte_t __pte = mk_pte_phys(physpage, pgprot);
  1048. if ((pte_val(__pte) & _PAGE_SWW) && PageDirty(page)) {
  1049. pte_val(__pte) |= _PAGE_SWC;
  1050. pte_val(__pte) &= ~_PAGE_RO;
  1051. }
  1052. return __pte;
  1053. }
  1054. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  1055. #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
  1056. #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  1057. #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
  1058. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
  1059. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  1060. #ifndef CONFIG_64BIT
  1061. #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
  1062. #define pud_deref(pmd) ({ BUG(); 0UL; })
  1063. #define pgd_deref(pmd) ({ BUG(); 0UL; })
  1064. #define pud_offset(pgd, address) ((pud_t *) pgd)
  1065. #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
  1066. #else /* CONFIG_64BIT */
  1067. #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
  1068. #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
  1069. #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
  1070. static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
  1071. {
  1072. pud_t *pud = (pud_t *) pgd;
  1073. if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
  1074. pud = (pud_t *) pgd_deref(*pgd);
  1075. return pud + pud_index(address);
  1076. }
  1077. static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
  1078. {
  1079. pmd_t *pmd = (pmd_t *) pud;
  1080. if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
  1081. pmd = (pmd_t *) pud_deref(*pud);
  1082. return pmd + pmd_index(address);
  1083. }
  1084. #endif /* CONFIG_64BIT */
  1085. #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
  1086. #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
  1087. #define pte_page(x) pfn_to_page(pte_pfn(x))
  1088. #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
  1089. /* Find an entry in the lowest level page table.. */
  1090. #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
  1091. #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
  1092. #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
  1093. #define pte_unmap(pte) do { } while (0)
  1094. static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
  1095. {
  1096. unsigned long sto = (unsigned long) pmdp -
  1097. pmd_index(address) * sizeof(pmd_t);
  1098. if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
  1099. asm volatile(
  1100. " .insn rrf,0xb98e0000,%2,%3,0,0"
  1101. : "=m" (*pmdp)
  1102. : "m" (*pmdp), "a" (sto),
  1103. "a" ((address & HPAGE_MASK))
  1104. : "cc"
  1105. );
  1106. }
  1107. }
  1108. #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
  1109. static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
  1110. {
  1111. /*
  1112. * pgprot is PAGE_NONE, PAGE_RO, or PAGE_RW (see __Pxxx / __Sxxx)
  1113. * Convert to segment table entry format.
  1114. */
  1115. if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
  1116. return pgprot_val(SEGMENT_NONE);
  1117. if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
  1118. return pgprot_val(SEGMENT_RO);
  1119. return pgprot_val(SEGMENT_RW);
  1120. }
  1121. static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
  1122. {
  1123. pmd_val(pmd) &= _SEGMENT_CHG_MASK;
  1124. pmd_val(pmd) |= massage_pgprot_pmd(newprot);
  1125. return pmd;
  1126. }
  1127. static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
  1128. {
  1129. pmd_t __pmd;
  1130. pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
  1131. return __pmd;
  1132. }
  1133. static inline pmd_t pmd_mkwrite(pmd_t pmd)
  1134. {
  1135. /* Do not clobber _HPAGE_TYPE_NONE pages! */
  1136. if (!(pmd_val(pmd) & _SEGMENT_ENTRY_INV))
  1137. pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
  1138. return pmd;
  1139. }
  1140. #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
  1141. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1142. #define __HAVE_ARCH_PGTABLE_DEPOSIT
  1143. extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
  1144. #define __HAVE_ARCH_PGTABLE_WITHDRAW
  1145. extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
  1146. static inline int pmd_trans_splitting(pmd_t pmd)
  1147. {
  1148. return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
  1149. }
  1150. static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
  1151. pmd_t *pmdp, pmd_t entry)
  1152. {
  1153. if (!(pmd_val(entry) & _SEGMENT_ENTRY_INV) && MACHINE_HAS_EDAT1)
  1154. pmd_val(entry) |= _SEGMENT_ENTRY_CO;
  1155. *pmdp = entry;
  1156. }
  1157. static inline pmd_t pmd_mkhuge(pmd_t pmd)
  1158. {
  1159. pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
  1160. return pmd;
  1161. }
  1162. static inline pmd_t pmd_wrprotect(pmd_t pmd)
  1163. {
  1164. pmd_val(pmd) |= _SEGMENT_ENTRY_RO;
  1165. return pmd;
  1166. }
  1167. static inline pmd_t pmd_mkdirty(pmd_t pmd)
  1168. {
  1169. /* No dirty bit in the segment table entry. */
  1170. return pmd;
  1171. }
  1172. static inline pmd_t pmd_mkold(pmd_t pmd)
  1173. {
  1174. /* No referenced bit in the segment table entry. */
  1175. return pmd;
  1176. }
  1177. static inline pmd_t pmd_mkyoung(pmd_t pmd)
  1178. {
  1179. /* No referenced bit in the segment table entry. */
  1180. return pmd;
  1181. }
  1182. #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
  1183. static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  1184. unsigned long address, pmd_t *pmdp)
  1185. {
  1186. unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK;
  1187. long tmp, rc;
  1188. int counter;
  1189. rc = 0;
  1190. if (MACHINE_HAS_RRBM) {
  1191. counter = PTRS_PER_PTE >> 6;
  1192. asm volatile(
  1193. "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */
  1194. " ogr %1,%0\n"
  1195. " la %3,0(%4,%3)\n"
  1196. " brct %2,0b\n"
  1197. : "=&d" (tmp), "+&d" (rc), "+d" (counter),
  1198. "+a" (pmd_addr)
  1199. : "a" (64 * 4096UL) : "cc");
  1200. rc = !!rc;
  1201. } else {
  1202. counter = PTRS_PER_PTE;
  1203. asm volatile(
  1204. "0: rrbe 0,%2\n"
  1205. " la %2,0(%3,%2)\n"
  1206. " brc 12,1f\n"
  1207. " lhi %0,1\n"
  1208. "1: brct %1,0b\n"
  1209. : "+d" (rc), "+d" (counter), "+a" (pmd_addr)
  1210. : "a" (4096UL) : "cc");
  1211. }
  1212. return rc;
  1213. }
  1214. #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
  1215. static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
  1216. unsigned long address, pmd_t *pmdp)
  1217. {
  1218. pmd_t pmd = *pmdp;
  1219. __pmd_idte(address, pmdp);
  1220. pmd_clear(pmdp);
  1221. return pmd;
  1222. }
  1223. #define __HAVE_ARCH_PMDP_CLEAR_FLUSH
  1224. static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
  1225. unsigned long address, pmd_t *pmdp)
  1226. {
  1227. return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
  1228. }
  1229. #define __HAVE_ARCH_PMDP_INVALIDATE
  1230. static inline void pmdp_invalidate(struct vm_area_struct *vma,
  1231. unsigned long address, pmd_t *pmdp)
  1232. {
  1233. __pmd_idte(address, pmdp);
  1234. }
  1235. #define __HAVE_ARCH_PMDP_SET_WRPROTECT
  1236. static inline void pmdp_set_wrprotect(struct mm_struct *mm,
  1237. unsigned long address, pmd_t *pmdp)
  1238. {
  1239. pmd_t pmd = *pmdp;
  1240. if (pmd_write(pmd)) {
  1241. __pmd_idte(address, pmdp);
  1242. set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
  1243. }
  1244. }
  1245. #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
  1246. #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
  1247. static inline int pmd_trans_huge(pmd_t pmd)
  1248. {
  1249. return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
  1250. }
  1251. static inline int has_transparent_hugepage(void)
  1252. {
  1253. return MACHINE_HAS_HPAGE ? 1 : 0;
  1254. }
  1255. static inline unsigned long pmd_pfn(pmd_t pmd)
  1256. {
  1257. return pmd_val(pmd) >> PAGE_SHIFT;
  1258. }
  1259. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  1260. /*
  1261. * 31 bit swap entry format:
  1262. * A page-table entry has some bits we have to treat in a special way.
  1263. * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
  1264. * exception will occur instead of a page translation exception. The
  1265. * specifiation exception has the bad habit not to store necessary
  1266. * information in the lowcore.
  1267. * Bit 21 and bit 22 are the page invalid bit and the page protection
  1268. * bit. We set both to indicate a swapped page.
  1269. * Bit 30 and 31 are used to distinguish the different page types. For
  1270. * a swapped page these bits need to be zero.
  1271. * This leaves the bits 1-19 and bits 24-29 to store type and offset.
  1272. * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
  1273. * plus 24 for the offset.
  1274. * 0| offset |0110|o|type |00|
  1275. * 0 0000000001111111111 2222 2 22222 33
  1276. * 0 1234567890123456789 0123 4 56789 01
  1277. *
  1278. * 64 bit swap entry format:
  1279. * A page-table entry has some bits we have to treat in a special way.
  1280. * Bits 52 and bit 55 have to be zero, otherwise an specification
  1281. * exception will occur instead of a page translation exception. The
  1282. * specifiation exception has the bad habit not to store necessary
  1283. * information in the lowcore.
  1284. * Bit 53 and bit 54 are the page invalid bit and the page protection
  1285. * bit. We set both to indicate a swapped page.
  1286. * Bit 62 and 63 are used to distinguish the different page types. For
  1287. * a swapped page these bits need to be zero.
  1288. * This leaves the bits 0-51 and bits 56-61 to store type and offset.
  1289. * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
  1290. * plus 56 for the offset.
  1291. * | offset |0110|o|type |00|
  1292. * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
  1293. * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
  1294. */
  1295. #ifndef CONFIG_64BIT
  1296. #define __SWP_OFFSET_MASK (~0UL >> 12)
  1297. #else
  1298. #define __SWP_OFFSET_MASK (~0UL >> 11)
  1299. #endif
  1300. static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
  1301. {
  1302. pte_t pte;
  1303. offset &= __SWP_OFFSET_MASK;
  1304. pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
  1305. ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
  1306. return pte;
  1307. }
  1308. #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
  1309. #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
  1310. #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
  1311. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  1312. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  1313. #ifndef CONFIG_64BIT
  1314. # define PTE_FILE_MAX_BITS 26
  1315. #else /* CONFIG_64BIT */
  1316. # define PTE_FILE_MAX_BITS 59
  1317. #endif /* CONFIG_64BIT */
  1318. #define pte_to_pgoff(__pte) \
  1319. ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
  1320. #define pgoff_to_pte(__off) \
  1321. ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
  1322. | _PAGE_TYPE_FILE })
  1323. #endif /* !__ASSEMBLY__ */
  1324. #define kern_addr_valid(addr) (1)
  1325. extern int vmem_add_mapping(unsigned long start, unsigned long size);
  1326. extern int vmem_remove_mapping(unsigned long start, unsigned long size);
  1327. extern int s390_enable_sie(void);
  1328. /*
  1329. * No page table caches to initialise
  1330. */
  1331. static inline void pgtable_cache_init(void) { }
  1332. static inline void check_pgt_cache(void) { }
  1333. #include <asm-generic/pgtable.h>
  1334. #endif /* _S390_PAGE_H */