pgtable.h 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999, 2000
  4. * Author(s): Hartmut Penner (hp@de.ibm.com)
  5. * Ulrich Weigand (weigand@de.ibm.com)
  6. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. *
  8. * Derived from "include/asm-i386/pgtable.h"
  9. */
  10. #ifndef _ASM_S390_PGTABLE_H
  11. #define _ASM_S390_PGTABLE_H
  12. /*
  13. * The Linux memory management assumes a three-level page table setup. For
  14. * s390 31 bit we "fold" the mid level into the top-level page table, so
  15. * that we physically have the same two-level page table as the s390 mmu
  16. * expects in 31 bit mode. For s390 64 bit we use three of the five levels
  17. * the hardware provides (region first and region second tables are not
  18. * used).
  19. *
  20. * The "pgd_xxx()" functions are trivial for a folded two-level
  21. * setup: the pgd is never bad, and a pmd always exists (as it's folded
  22. * into the pgd entry)
  23. *
  24. * This file contains the functions and defines necessary to modify and use
  25. * the S390 page table tree.
  26. */
  27. #ifndef __ASSEMBLY__
  28. #include <linux/sched.h>
  29. #include <linux/mm_types.h>
  30. #include <asm/bug.h>
  31. #include <asm/page.h>
  32. extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
  33. extern void paging_init(void);
  34. extern void vmem_map_init(void);
  35. extern void fault_init(void);
  36. /*
  37. * The S390 doesn't have any external MMU info: the kernel page
  38. * tables contain all the necessary information.
  39. */
  40. #define update_mmu_cache(vma, address, ptep) do { } while (0)
  41. /*
  42. * ZERO_PAGE is a global shared page that is always zero; used
  43. * for zero-mapped memory areas etc..
  44. */
  45. extern unsigned long empty_zero_page;
  46. extern unsigned long zero_page_mask;
  47. #define ZERO_PAGE(vaddr) \
  48. (virt_to_page((void *)(empty_zero_page + \
  49. (((unsigned long)(vaddr)) &zero_page_mask))))
  50. #define is_zero_pfn is_zero_pfn
  51. static inline int is_zero_pfn(unsigned long pfn)
  52. {
  53. extern unsigned long zero_pfn;
  54. unsigned long offset_from_zero_pfn = pfn - zero_pfn;
  55. return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
  56. }
  57. #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
  58. #endif /* !__ASSEMBLY__ */
  59. /*
  60. * PMD_SHIFT determines the size of the area a second-level page
  61. * table can map
  62. * PGDIR_SHIFT determines what a third-level page table entry can map
  63. */
  64. #ifndef CONFIG_64BIT
  65. # define PMD_SHIFT 20
  66. # define PUD_SHIFT 20
  67. # define PGDIR_SHIFT 20
  68. #else /* CONFIG_64BIT */
  69. # define PMD_SHIFT 20
  70. # define PUD_SHIFT 31
  71. # define PGDIR_SHIFT 42
  72. #endif /* CONFIG_64BIT */
  73. #define PMD_SIZE (1UL << PMD_SHIFT)
  74. #define PMD_MASK (~(PMD_SIZE-1))
  75. #define PUD_SIZE (1UL << PUD_SHIFT)
  76. #define PUD_MASK (~(PUD_SIZE-1))
  77. #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  78. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  79. /*
  80. * entries per page directory level: the S390 is two-level, so
  81. * we don't really have any PMD directory physically.
  82. * for S390 segment-table entries are combined to one PGD
  83. * that leads to 1024 pte per pgd
  84. */
  85. #define PTRS_PER_PTE 256
  86. #ifndef CONFIG_64BIT
  87. #define PTRS_PER_PMD 1
  88. #define PTRS_PER_PUD 1
  89. #else /* CONFIG_64BIT */
  90. #define PTRS_PER_PMD 2048
  91. #define PTRS_PER_PUD 2048
  92. #endif /* CONFIG_64BIT */
  93. #define PTRS_PER_PGD 2048
  94. #define FIRST_USER_ADDRESS 0
  95. #define pte_ERROR(e) \
  96. printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
  97. #define pmd_ERROR(e) \
  98. printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
  99. #define pud_ERROR(e) \
  100. printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
  101. #define pgd_ERROR(e) \
  102. printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
  103. #ifndef __ASSEMBLY__
  104. /*
  105. * The vmalloc area will always be on the topmost area of the kernel
  106. * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc,
  107. * which should be enough for any sane case.
  108. * By putting vmalloc at the top, we maximise the gap between physical
  109. * memory and vmalloc to catch misplaced memory accesses. As a side
  110. * effect, this also makes sure that 64 bit module code cannot be used
  111. * as system call address.
  112. */
  113. extern unsigned long VMALLOC_START;
  114. extern unsigned long VMALLOC_END;
  115. extern struct page *vmemmap;
  116. #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
  117. /*
  118. * A 31 bit pagetable entry of S390 has following format:
  119. * | PFRA | | OS |
  120. * 0 0IP0
  121. * 00000000001111111111222222222233
  122. * 01234567890123456789012345678901
  123. *
  124. * I Page-Invalid Bit: Page is not available for address-translation
  125. * P Page-Protection Bit: Store access not possible for page
  126. *
  127. * A 31 bit segmenttable entry of S390 has following format:
  128. * | P-table origin | |PTL
  129. * 0 IC
  130. * 00000000001111111111222222222233
  131. * 01234567890123456789012345678901
  132. *
  133. * I Segment-Invalid Bit: Segment is not available for address-translation
  134. * C Common-Segment Bit: Segment is not private (PoP 3-30)
  135. * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
  136. *
  137. * The 31 bit segmenttable origin of S390 has following format:
  138. *
  139. * |S-table origin | | STL |
  140. * X **GPS
  141. * 00000000001111111111222222222233
  142. * 01234567890123456789012345678901
  143. *
  144. * X Space-Switch event:
  145. * G Segment-Invalid Bit: *
  146. * P Private-Space Bit: Segment is not private (PoP 3-30)
  147. * S Storage-Alteration:
  148. * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
  149. *
  150. * A 64 bit pagetable entry of S390 has following format:
  151. * | PFRA |0IPC| OS |
  152. * 0000000000111111111122222222223333333333444444444455555555556666
  153. * 0123456789012345678901234567890123456789012345678901234567890123
  154. *
  155. * I Page-Invalid Bit: Page is not available for address-translation
  156. * P Page-Protection Bit: Store access not possible for page
  157. * C Change-bit override: HW is not required to set change bit
  158. *
  159. * A 64 bit segmenttable entry of S390 has following format:
  160. * | P-table origin | TT
  161. * 0000000000111111111122222222223333333333444444444455555555556666
  162. * 0123456789012345678901234567890123456789012345678901234567890123
  163. *
  164. * I Segment-Invalid Bit: Segment is not available for address-translation
  165. * C Common-Segment Bit: Segment is not private (PoP 3-30)
  166. * P Page-Protection Bit: Store access not possible for page
  167. * TT Type 00
  168. *
  169. * A 64 bit region table entry of S390 has following format:
  170. * | S-table origin | TF TTTL
  171. * 0000000000111111111122222222223333333333444444444455555555556666
  172. * 0123456789012345678901234567890123456789012345678901234567890123
  173. *
  174. * I Segment-Invalid Bit: Segment is not available for address-translation
  175. * TT Type 01
  176. * TF
  177. * TL Table length
  178. *
  179. * The 64 bit regiontable origin of S390 has following format:
  180. * | region table origon | DTTL
  181. * 0000000000111111111122222222223333333333444444444455555555556666
  182. * 0123456789012345678901234567890123456789012345678901234567890123
  183. *
  184. * X Space-Switch event:
  185. * G Segment-Invalid Bit:
  186. * P Private-Space Bit:
  187. * S Storage-Alteration:
  188. * R Real space
  189. * TL Table-Length:
  190. *
  191. * A storage key has the following format:
  192. * | ACC |F|R|C|0|
  193. * 0 3 4 5 6 7
  194. * ACC: access key
  195. * F : fetch protection bit
  196. * R : referenced bit
  197. * C : changed bit
  198. */
  199. /* Hardware bits in the page table entry */
  200. #define _PAGE_CO 0x100 /* HW Change-bit override */
  201. #define _PAGE_RO 0x200 /* HW read-only bit */
  202. #define _PAGE_INVALID 0x400 /* HW invalid bit */
  203. /* Software bits in the page table entry */
  204. #define _PAGE_SWT 0x001 /* SW pte type bit t */
  205. #define _PAGE_SWX 0x002 /* SW pte type bit x */
  206. #define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */
  207. #define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */
  208. #define _PAGE_SPECIAL 0x010 /* SW associated with special page */
  209. #define __HAVE_ARCH_PTE_SPECIAL
  210. /* Set of bits not changed in pte_modify */
  211. #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR)
  212. /* Six different types of pages. */
  213. #define _PAGE_TYPE_EMPTY 0x400
  214. #define _PAGE_TYPE_NONE 0x401
  215. #define _PAGE_TYPE_SWAP 0x403
  216. #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
  217. #define _PAGE_TYPE_RO 0x200
  218. #define _PAGE_TYPE_RW 0x000
  219. /*
  220. * Only four types for huge pages, using the invalid bit and protection bit
  221. * of a segment table entry.
  222. */
  223. #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
  224. #define _HPAGE_TYPE_NONE 0x220
  225. #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
  226. #define _HPAGE_TYPE_RW 0x000
  227. /*
  228. * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
  229. * pte_none and pte_file to find out the pte type WITHOUT holding the page
  230. * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
  231. * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
  232. * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
  233. * This change is done while holding the lock, but the intermediate step
  234. * of a previously valid pte with the hw invalid bit set can be observed by
  235. * handle_pte_fault. That makes it necessary that all valid pte types with
  236. * the hw invalid bit set must be distinguishable from the four pte types
  237. * empty, none, swap and file.
  238. *
  239. * irxt ipte irxt
  240. * _PAGE_TYPE_EMPTY 1000 -> 1000
  241. * _PAGE_TYPE_NONE 1001 -> 1001
  242. * _PAGE_TYPE_SWAP 1011 -> 1011
  243. * _PAGE_TYPE_FILE 11?1 -> 11?1
  244. * _PAGE_TYPE_RO 0100 -> 1100
  245. * _PAGE_TYPE_RW 0000 -> 1000
  246. *
  247. * pte_none is true for bits combinations 1000, 1010, 1100, 1110
  248. * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
  249. * pte_file is true for bits combinations 1101, 1111
  250. * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
  251. */
  252. #ifndef CONFIG_64BIT
  253. /* Bits in the segment table address-space-control-element */
  254. #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
  255. #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
  256. #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
  257. #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
  258. #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
  259. /* Bits in the segment table entry */
  260. #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
  261. #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
  262. #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
  263. #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
  264. #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
  265. #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
  266. #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
  267. /* Page status table bits for virtualization */
  268. #define RCP_ACC_BITS 0xf0000000UL
  269. #define RCP_FP_BIT 0x08000000UL
  270. #define RCP_PCL_BIT 0x00800000UL
  271. #define RCP_HR_BIT 0x00400000UL
  272. #define RCP_HC_BIT 0x00200000UL
  273. #define RCP_GR_BIT 0x00040000UL
  274. #define RCP_GC_BIT 0x00020000UL
  275. /* User dirty / referenced bit for KVM's migration feature */
  276. #define KVM_UR_BIT 0x00008000UL
  277. #define KVM_UC_BIT 0x00004000UL
  278. #else /* CONFIG_64BIT */
  279. /* Bits in the segment/region table address-space-control-element */
  280. #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
  281. #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
  282. #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
  283. #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
  284. #define _ASCE_REAL_SPACE 0x20 /* real space control */
  285. #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
  286. #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
  287. #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
  288. #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
  289. #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
  290. #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
  291. /* Bits in the region table entry */
  292. #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
  293. #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
  294. #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
  295. #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
  296. #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
  297. #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
  298. #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
  299. #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
  300. #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
  301. #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
  302. #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
  303. #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
  304. #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
  305. /* Bits in the segment table entry */
  306. #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
  307. #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
  308. #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
  309. #define _SEGMENT_ENTRY (0)
  310. #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
  311. #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
  312. #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
  313. #define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */
  314. #define _SEGMENT_ENTRY_SPLIT (1UL << _SEGMENT_ENTRY_SPLIT_BIT)
  315. /* Set of bits not changed in pmd_modify */
  316. #define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
  317. | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
  318. /* Page status table bits for virtualization */
  319. #define RCP_ACC_BITS 0xf000000000000000UL
  320. #define RCP_FP_BIT 0x0800000000000000UL
  321. #define RCP_PCL_BIT 0x0080000000000000UL
  322. #define RCP_HR_BIT 0x0040000000000000UL
  323. #define RCP_HC_BIT 0x0020000000000000UL
  324. #define RCP_GR_BIT 0x0004000000000000UL
  325. #define RCP_GC_BIT 0x0002000000000000UL
  326. /* User dirty / referenced bit for KVM's migration feature */
  327. #define KVM_UR_BIT 0x0000800000000000UL
  328. #define KVM_UC_BIT 0x0000400000000000UL
  329. #endif /* CONFIG_64BIT */
  330. /*
  331. * A user page table pointer has the space-switch-event bit, the
  332. * private-space-control bit and the storage-alteration-event-control
  333. * bit set. A kernel page table pointer doesn't need them.
  334. */
  335. #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
  336. _ASCE_ALT_EVENT)
  337. /*
  338. * Page protection definitions.
  339. */
  340. #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
  341. #define PAGE_RO __pgprot(_PAGE_TYPE_RO)
  342. #define PAGE_RW __pgprot(_PAGE_TYPE_RW)
  343. #define PAGE_KERNEL PAGE_RW
  344. #define PAGE_COPY PAGE_RO
  345. /*
  346. * On s390 the page table entry has an invalid bit and a read-only bit.
  347. * Read permission implies execute permission and write permission
  348. * implies read permission.
  349. */
  350. /*xwr*/
  351. #define __P000 PAGE_NONE
  352. #define __P001 PAGE_RO
  353. #define __P010 PAGE_RO
  354. #define __P011 PAGE_RO
  355. #define __P100 PAGE_RO
  356. #define __P101 PAGE_RO
  357. #define __P110 PAGE_RO
  358. #define __P111 PAGE_RO
  359. #define __S000 PAGE_NONE
  360. #define __S001 PAGE_RO
  361. #define __S010 PAGE_RW
  362. #define __S011 PAGE_RW
  363. #define __S100 PAGE_RO
  364. #define __S101 PAGE_RO
  365. #define __S110 PAGE_RW
  366. #define __S111 PAGE_RW
  367. static inline int mm_exclusive(struct mm_struct *mm)
  368. {
  369. return likely(mm == current->active_mm &&
  370. atomic_read(&mm->context.attach_count) <= 1);
  371. }
  372. static inline int mm_has_pgste(struct mm_struct *mm)
  373. {
  374. #ifdef CONFIG_PGSTE
  375. if (unlikely(mm->context.has_pgste))
  376. return 1;
  377. #endif
  378. return 0;
  379. }
  380. /*
  381. * pgd/pmd/pte query functions
  382. */
  383. #ifndef CONFIG_64BIT
  384. static inline int pgd_present(pgd_t pgd) { return 1; }
  385. static inline int pgd_none(pgd_t pgd) { return 0; }
  386. static inline int pgd_bad(pgd_t pgd) { return 0; }
  387. static inline int pud_present(pud_t pud) { return 1; }
  388. static inline int pud_none(pud_t pud) { return 0; }
  389. static inline int pud_bad(pud_t pud) { return 0; }
  390. #else /* CONFIG_64BIT */
  391. static inline int pgd_present(pgd_t pgd)
  392. {
  393. if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
  394. return 1;
  395. return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
  396. }
  397. static inline int pgd_none(pgd_t pgd)
  398. {
  399. if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
  400. return 0;
  401. return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
  402. }
  403. static inline int pgd_bad(pgd_t pgd)
  404. {
  405. /*
  406. * With dynamic page table levels the pgd can be a region table
  407. * entry or a segment table entry. Check for the bit that are
  408. * invalid for either table entry.
  409. */
  410. unsigned long mask =
  411. ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
  412. ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
  413. return (pgd_val(pgd) & mask) != 0;
  414. }
  415. static inline int pud_present(pud_t pud)
  416. {
  417. if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
  418. return 1;
  419. return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
  420. }
  421. static inline int pud_none(pud_t pud)
  422. {
  423. if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
  424. return 0;
  425. return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
  426. }
  427. static inline int pud_bad(pud_t pud)
  428. {
  429. /*
  430. * With dynamic page table levels the pud can be a region table
  431. * entry or a segment table entry. Check for the bit that are
  432. * invalid for either table entry.
  433. */
  434. unsigned long mask =
  435. ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
  436. ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
  437. return (pud_val(pud) & mask) != 0;
  438. }
  439. #endif /* CONFIG_64BIT */
  440. static inline int pmd_present(pmd_t pmd)
  441. {
  442. return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
  443. }
  444. static inline int pmd_none(pmd_t pmd)
  445. {
  446. return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
  447. }
  448. static inline int pmd_bad(pmd_t pmd)
  449. {
  450. unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
  451. return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
  452. }
  453. #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
  454. extern void pmdp_splitting_flush(struct vm_area_struct *vma,
  455. unsigned long addr, pmd_t *pmdp);
  456. #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  457. extern int pmdp_set_access_flags(struct vm_area_struct *vma,
  458. unsigned long address, pmd_t *pmdp,
  459. pmd_t entry, int dirty);
  460. #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
  461. extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
  462. unsigned long address, pmd_t *pmdp);
  463. #define __HAVE_ARCH_PMD_WRITE
  464. static inline int pmd_write(pmd_t pmd)
  465. {
  466. return (pmd_val(pmd) & _SEGMENT_ENTRY_RO) == 0;
  467. }
  468. static inline int pmd_young(pmd_t pmd)
  469. {
  470. return 0;
  471. }
  472. static inline int pte_none(pte_t pte)
  473. {
  474. return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
  475. }
  476. static inline int pte_present(pte_t pte)
  477. {
  478. unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
  479. return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
  480. (!(pte_val(pte) & _PAGE_INVALID) &&
  481. !(pte_val(pte) & _PAGE_SWT));
  482. }
  483. static inline int pte_file(pte_t pte)
  484. {
  485. unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
  486. return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
  487. }
  488. static inline int pte_special(pte_t pte)
  489. {
  490. return (pte_val(pte) & _PAGE_SPECIAL);
  491. }
  492. #define __HAVE_ARCH_PTE_SAME
  493. static inline int pte_same(pte_t a, pte_t b)
  494. {
  495. return pte_val(a) == pte_val(b);
  496. }
  497. static inline pgste_t pgste_get_lock(pte_t *ptep)
  498. {
  499. unsigned long new = 0;
  500. #ifdef CONFIG_PGSTE
  501. unsigned long old;
  502. preempt_disable();
  503. asm(
  504. " lg %0,%2\n"
  505. "0: lgr %1,%0\n"
  506. " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */
  507. " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */
  508. " csg %0,%1,%2\n"
  509. " jl 0b\n"
  510. : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
  511. : "Q" (ptep[PTRS_PER_PTE]) : "cc");
  512. #endif
  513. return __pgste(new);
  514. }
  515. static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
  516. {
  517. #ifdef CONFIG_PGSTE
  518. asm(
  519. " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
  520. " stg %1,%0\n"
  521. : "=Q" (ptep[PTRS_PER_PTE])
  522. : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
  523. preempt_enable();
  524. #endif
  525. }
  526. static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
  527. {
  528. #ifdef CONFIG_PGSTE
  529. unsigned long address, bits;
  530. unsigned char skey;
  531. if (!pte_present(*ptep))
  532. return pgste;
  533. address = pte_val(*ptep) & PAGE_MASK;
  534. skey = page_get_storage_key(address);
  535. bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
  536. /* Clear page changed & referenced bit in the storage key */
  537. if (bits & _PAGE_CHANGED)
  538. page_set_storage_key(address, skey ^ bits, 1);
  539. else if (bits)
  540. page_reset_referenced(address);
  541. /* Transfer page changed & referenced bit to guest bits in pgste */
  542. pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
  543. /* Get host changed & referenced bits from pgste */
  544. bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
  545. /* Clear host bits in pgste. */
  546. pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
  547. pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
  548. /* Copy page access key and fetch protection bit to pgste */
  549. pgste_val(pgste) |=
  550. (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
  551. /* Transfer changed and referenced to kvm user bits */
  552. pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
  553. /* Transfer changed & referenced to pte sofware bits */
  554. pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */
  555. #endif
  556. return pgste;
  557. }
  558. static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
  559. {
  560. #ifdef CONFIG_PGSTE
  561. int young;
  562. if (!pte_present(*ptep))
  563. return pgste;
  564. young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
  565. /* Transfer page referenced bit to pte software bit (host view) */
  566. if (young || (pgste_val(pgste) & RCP_HR_BIT))
  567. pte_val(*ptep) |= _PAGE_SWR;
  568. /* Clear host referenced bit in pgste. */
  569. pgste_val(pgste) &= ~RCP_HR_BIT;
  570. /* Transfer page referenced bit to guest bit in pgste */
  571. pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */
  572. #endif
  573. return pgste;
  574. }
  575. static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
  576. {
  577. #ifdef CONFIG_PGSTE
  578. unsigned long address;
  579. unsigned long okey, nkey;
  580. if (!pte_present(entry))
  581. return;
  582. address = pte_val(entry) & PAGE_MASK;
  583. okey = nkey = page_get_storage_key(address);
  584. nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
  585. /* Set page access key and fetch protection bit from pgste */
  586. nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
  587. if (okey != nkey)
  588. page_set_storage_key(address, nkey, 1);
  589. #endif
  590. }
  591. /**
  592. * struct gmap_struct - guest address space
  593. * @mm: pointer to the parent mm_struct
  594. * @table: pointer to the page directory
  595. * @asce: address space control element for gmap page table
  596. * @crst_list: list of all crst tables used in the guest address space
  597. */
  598. struct gmap {
  599. struct list_head list;
  600. struct mm_struct *mm;
  601. unsigned long *table;
  602. unsigned long asce;
  603. struct list_head crst_list;
  604. };
  605. /**
  606. * struct gmap_rmap - reverse mapping for segment table entries
  607. * @next: pointer to the next gmap_rmap structure in the list
  608. * @entry: pointer to a segment table entry
  609. */
  610. struct gmap_rmap {
  611. struct list_head list;
  612. unsigned long *entry;
  613. };
  614. /**
  615. * struct gmap_pgtable - gmap information attached to a page table
  616. * @vmaddr: address of the 1MB segment in the process virtual memory
  617. * @mapper: list of segment table entries maping a page table
  618. */
  619. struct gmap_pgtable {
  620. unsigned long vmaddr;
  621. struct list_head mapper;
  622. };
  623. struct gmap *gmap_alloc(struct mm_struct *mm);
  624. void gmap_free(struct gmap *gmap);
  625. void gmap_enable(struct gmap *gmap);
  626. void gmap_disable(struct gmap *gmap);
  627. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  628. unsigned long to, unsigned long length);
  629. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
  630. unsigned long __gmap_fault(unsigned long address, struct gmap *);
  631. unsigned long gmap_fault(unsigned long address, struct gmap *);
  632. void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
  633. /*
  634. * Certain architectures need to do special things when PTEs
  635. * within a page table are directly modified. Thus, the following
  636. * hook is made available.
  637. */
  638. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  639. pte_t *ptep, pte_t entry)
  640. {
  641. pgste_t pgste;
  642. if (mm_has_pgste(mm)) {
  643. pgste = pgste_get_lock(ptep);
  644. pgste_set_pte(ptep, pgste, entry);
  645. *ptep = entry;
  646. pgste_set_unlock(ptep, pgste);
  647. } else
  648. *ptep = entry;
  649. }
  650. /*
  651. * query functions pte_write/pte_dirty/pte_young only work if
  652. * pte_present() is true. Undefined behaviour if not..
  653. */
  654. static inline int pte_write(pte_t pte)
  655. {
  656. return (pte_val(pte) & _PAGE_RO) == 0;
  657. }
  658. static inline int pte_dirty(pte_t pte)
  659. {
  660. #ifdef CONFIG_PGSTE
  661. if (pte_val(pte) & _PAGE_SWC)
  662. return 1;
  663. #endif
  664. return 0;
  665. }
  666. static inline int pte_young(pte_t pte)
  667. {
  668. #ifdef CONFIG_PGSTE
  669. if (pte_val(pte) & _PAGE_SWR)
  670. return 1;
  671. #endif
  672. return 0;
  673. }
  674. /*
  675. * pgd/pmd/pte modification functions
  676. */
  677. static inline void pgd_clear(pgd_t *pgd)
  678. {
  679. #ifdef CONFIG_64BIT
  680. if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
  681. pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
  682. #endif
  683. }
  684. static inline void pud_clear(pud_t *pud)
  685. {
  686. #ifdef CONFIG_64BIT
  687. if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
  688. pud_val(*pud) = _REGION3_ENTRY_EMPTY;
  689. #endif
  690. }
  691. static inline void pmd_clear(pmd_t *pmdp)
  692. {
  693. pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
  694. }
  695. static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  696. {
  697. pte_val(*ptep) = _PAGE_TYPE_EMPTY;
  698. }
  699. /*
  700. * The following pte modification functions only work if
  701. * pte_present() is true. Undefined behaviour if not..
  702. */
  703. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  704. {
  705. pte_val(pte) &= _PAGE_CHG_MASK;
  706. pte_val(pte) |= pgprot_val(newprot);
  707. return pte;
  708. }
  709. static inline pte_t pte_wrprotect(pte_t pte)
  710. {
  711. /* Do not clobber _PAGE_TYPE_NONE pages! */
  712. if (!(pte_val(pte) & _PAGE_INVALID))
  713. pte_val(pte) |= _PAGE_RO;
  714. return pte;
  715. }
  716. static inline pte_t pte_mkwrite(pte_t pte)
  717. {
  718. pte_val(pte) &= ~_PAGE_RO;
  719. return pte;
  720. }
  721. static inline pte_t pte_mkclean(pte_t pte)
  722. {
  723. #ifdef CONFIG_PGSTE
  724. pte_val(pte) &= ~_PAGE_SWC;
  725. #endif
  726. return pte;
  727. }
  728. static inline pte_t pte_mkdirty(pte_t pte)
  729. {
  730. return pte;
  731. }
  732. static inline pte_t pte_mkold(pte_t pte)
  733. {
  734. #ifdef CONFIG_PGSTE
  735. pte_val(pte) &= ~_PAGE_SWR;
  736. #endif
  737. return pte;
  738. }
  739. static inline pte_t pte_mkyoung(pte_t pte)
  740. {
  741. return pte;
  742. }
  743. static inline pte_t pte_mkspecial(pte_t pte)
  744. {
  745. pte_val(pte) |= _PAGE_SPECIAL;
  746. return pte;
  747. }
  748. #ifdef CONFIG_HUGETLB_PAGE
  749. static inline pte_t pte_mkhuge(pte_t pte)
  750. {
  751. /*
  752. * PROT_NONE needs to be remapped from the pte type to the ste type.
  753. * The HW invalid bit is also different for pte and ste. The pte
  754. * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
  755. * bit, so we don't have to clear it.
  756. */
  757. if (pte_val(pte) & _PAGE_INVALID) {
  758. if (pte_val(pte) & _PAGE_SWT)
  759. pte_val(pte) |= _HPAGE_TYPE_NONE;
  760. pte_val(pte) |= _SEGMENT_ENTRY_INV;
  761. }
  762. /*
  763. * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
  764. * table entry.
  765. */
  766. pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
  767. /*
  768. * Also set the change-override bit because we don't need dirty bit
  769. * tracking for hugetlbfs pages.
  770. */
  771. pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
  772. return pte;
  773. }
  774. #endif
  775. /*
  776. * Get (and clear) the user dirty bit for a pte.
  777. */
  778. static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
  779. pte_t *ptep)
  780. {
  781. pgste_t pgste;
  782. int dirty = 0;
  783. if (mm_has_pgste(mm)) {
  784. pgste = pgste_get_lock(ptep);
  785. pgste = pgste_update_all(ptep, pgste);
  786. dirty = !!(pgste_val(pgste) & KVM_UC_BIT);
  787. pgste_val(pgste) &= ~KVM_UC_BIT;
  788. pgste_set_unlock(ptep, pgste);
  789. return dirty;
  790. }
  791. return dirty;
  792. }
  793. /*
  794. * Get (and clear) the user referenced bit for a pte.
  795. */
  796. static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
  797. pte_t *ptep)
  798. {
  799. pgste_t pgste;
  800. int young = 0;
  801. if (mm_has_pgste(mm)) {
  802. pgste = pgste_get_lock(ptep);
  803. pgste = pgste_update_young(ptep, pgste);
  804. young = !!(pgste_val(pgste) & KVM_UR_BIT);
  805. pgste_val(pgste) &= ~KVM_UR_BIT;
  806. pgste_set_unlock(ptep, pgste);
  807. }
  808. return young;
  809. }
  810. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  811. static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
  812. unsigned long addr, pte_t *ptep)
  813. {
  814. pgste_t pgste;
  815. pte_t pte;
  816. if (mm_has_pgste(vma->vm_mm)) {
  817. pgste = pgste_get_lock(ptep);
  818. pgste = pgste_update_young(ptep, pgste);
  819. pte = *ptep;
  820. *ptep = pte_mkold(pte);
  821. pgste_set_unlock(ptep, pgste);
  822. return pte_young(pte);
  823. }
  824. return 0;
  825. }
  826. #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  827. static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
  828. unsigned long address, pte_t *ptep)
  829. {
  830. /* No need to flush TLB
  831. * On s390 reference bits are in storage key and never in TLB
  832. * With virtualization we handle the reference bit, without we
  833. * we can simply return */
  834. return ptep_test_and_clear_young(vma, address, ptep);
  835. }
  836. static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
  837. {
  838. if (!(pte_val(*ptep) & _PAGE_INVALID)) {
  839. #ifndef CONFIG_64BIT
  840. /* pto must point to the start of the segment table */
  841. pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
  842. #else
  843. /* ipte in zarch mode can do the math */
  844. pte_t *pto = ptep;
  845. #endif
  846. asm volatile(
  847. " ipte %2,%3"
  848. : "=m" (*ptep) : "m" (*ptep),
  849. "a" (pto), "a" (address));
  850. }
  851. }
  852. /*
  853. * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
  854. * both clear the TLB for the unmapped pte. The reason is that
  855. * ptep_get_and_clear is used in common code (e.g. change_pte_range)
  856. * to modify an active pte. The sequence is
  857. * 1) ptep_get_and_clear
  858. * 2) set_pte_at
  859. * 3) flush_tlb_range
  860. * On s390 the tlb needs to get flushed with the modification of the pte
  861. * if the pte is active. The only way how this can be implemented is to
  862. * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
  863. * is a nop.
  864. */
  865. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  866. static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
  867. unsigned long address, pte_t *ptep)
  868. {
  869. pgste_t pgste;
  870. pte_t pte;
  871. mm->context.flush_mm = 1;
  872. if (mm_has_pgste(mm))
  873. pgste = pgste_get_lock(ptep);
  874. pte = *ptep;
  875. if (!mm_exclusive(mm))
  876. __ptep_ipte(address, ptep);
  877. pte_val(*ptep) = _PAGE_TYPE_EMPTY;
  878. if (mm_has_pgste(mm)) {
  879. pgste = pgste_update_all(&pte, pgste);
  880. pgste_set_unlock(ptep, pgste);
  881. }
  882. return pte;
  883. }
  884. #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
  885. static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
  886. unsigned long address,
  887. pte_t *ptep)
  888. {
  889. pte_t pte;
  890. mm->context.flush_mm = 1;
  891. if (mm_has_pgste(mm))
  892. pgste_get_lock(ptep);
  893. pte = *ptep;
  894. if (!mm_exclusive(mm))
  895. __ptep_ipte(address, ptep);
  896. return pte;
  897. }
  898. static inline void ptep_modify_prot_commit(struct mm_struct *mm,
  899. unsigned long address,
  900. pte_t *ptep, pte_t pte)
  901. {
  902. *ptep = pte;
  903. if (mm_has_pgste(mm))
  904. pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
  905. }
  906. #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
  907. static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
  908. unsigned long address, pte_t *ptep)
  909. {
  910. pgste_t pgste;
  911. pte_t pte;
  912. if (mm_has_pgste(vma->vm_mm))
  913. pgste = pgste_get_lock(ptep);
  914. pte = *ptep;
  915. __ptep_ipte(address, ptep);
  916. pte_val(*ptep) = _PAGE_TYPE_EMPTY;
  917. if (mm_has_pgste(vma->vm_mm)) {
  918. pgste = pgste_update_all(&pte, pgste);
  919. pgste_set_unlock(ptep, pgste);
  920. }
  921. return pte;
  922. }
  923. /*
  924. * The batched pte unmap code uses ptep_get_and_clear_full to clear the
  925. * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
  926. * tlbs of an mm if it can guarantee that the ptes of the mm_struct
  927. * cannot be accessed while the batched unmap is running. In this case
  928. * full==1 and a simple pte_clear is enough. See tlb.h.
  929. */
  930. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  931. static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  932. unsigned long address,
  933. pte_t *ptep, int full)
  934. {
  935. pgste_t pgste;
  936. pte_t pte;
  937. if (mm_has_pgste(mm))
  938. pgste = pgste_get_lock(ptep);
  939. pte = *ptep;
  940. if (!full)
  941. __ptep_ipte(address, ptep);
  942. pte_val(*ptep) = _PAGE_TYPE_EMPTY;
  943. if (mm_has_pgste(mm)) {
  944. pgste = pgste_update_all(&pte, pgste);
  945. pgste_set_unlock(ptep, pgste);
  946. }
  947. return pte;
  948. }
  949. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  950. static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
  951. unsigned long address, pte_t *ptep)
  952. {
  953. pgste_t pgste;
  954. pte_t pte = *ptep;
  955. if (pte_write(pte)) {
  956. mm->context.flush_mm = 1;
  957. if (mm_has_pgste(mm))
  958. pgste = pgste_get_lock(ptep);
  959. if (!mm_exclusive(mm))
  960. __ptep_ipte(address, ptep);
  961. *ptep = pte_wrprotect(pte);
  962. if (mm_has_pgste(mm))
  963. pgste_set_unlock(ptep, pgste);
  964. }
  965. return pte;
  966. }
  967. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  968. static inline int ptep_set_access_flags(struct vm_area_struct *vma,
  969. unsigned long address, pte_t *ptep,
  970. pte_t entry, int dirty)
  971. {
  972. pgste_t pgste;
  973. if (pte_same(*ptep, entry))
  974. return 0;
  975. if (mm_has_pgste(vma->vm_mm))
  976. pgste = pgste_get_lock(ptep);
  977. __ptep_ipte(address, ptep);
  978. *ptep = entry;
  979. if (mm_has_pgste(vma->vm_mm))
  980. pgste_set_unlock(ptep, pgste);
  981. return 1;
  982. }
  983. /*
  984. * Conversion functions: convert a page and protection to a page entry,
  985. * and a page entry and page directory to the page they refer to.
  986. */
  987. static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
  988. {
  989. pte_t __pte;
  990. pte_val(__pte) = physpage + pgprot_val(pgprot);
  991. return __pte;
  992. }
  993. static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
  994. {
  995. unsigned long physpage = page_to_phys(page);
  996. return mk_pte_phys(physpage, pgprot);
  997. }
  998. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  999. #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
  1000. #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  1001. #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
  1002. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
  1003. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  1004. #ifndef CONFIG_64BIT
  1005. #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
  1006. #define pud_deref(pmd) ({ BUG(); 0UL; })
  1007. #define pgd_deref(pmd) ({ BUG(); 0UL; })
  1008. #define pud_offset(pgd, address) ((pud_t *) pgd)
  1009. #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
  1010. #else /* CONFIG_64BIT */
  1011. #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
  1012. #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
  1013. #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
  1014. static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
  1015. {
  1016. pud_t *pud = (pud_t *) pgd;
  1017. if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
  1018. pud = (pud_t *) pgd_deref(*pgd);
  1019. return pud + pud_index(address);
  1020. }
  1021. static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
  1022. {
  1023. pmd_t *pmd = (pmd_t *) pud;
  1024. if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
  1025. pmd = (pmd_t *) pud_deref(*pud);
  1026. return pmd + pmd_index(address);
  1027. }
  1028. #endif /* CONFIG_64BIT */
  1029. #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
  1030. #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
  1031. #define pte_page(x) pfn_to_page(pte_pfn(x))
  1032. #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
  1033. /* Find an entry in the lowest level page table.. */
  1034. #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
  1035. #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
  1036. #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
  1037. #define pte_unmap(pte) do { } while (0)
  1038. static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
  1039. {
  1040. unsigned long sto = (unsigned long) pmdp -
  1041. pmd_index(address) * sizeof(pmd_t);
  1042. if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
  1043. asm volatile(
  1044. " .insn rrf,0xb98e0000,%2,%3,0,0"
  1045. : "=m" (*pmdp)
  1046. : "m" (*pmdp), "a" (sto),
  1047. "a" ((address & HPAGE_MASK))
  1048. : "cc"
  1049. );
  1050. }
  1051. }
  1052. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  1053. #define __HAVE_ARCH_PGTABLE_DEPOSIT
  1054. extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pgtable_t pgtable);
  1055. #define __HAVE_ARCH_PGTABLE_WITHDRAW
  1056. extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm);
  1057. static inline int pmd_trans_splitting(pmd_t pmd)
  1058. {
  1059. return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
  1060. }
  1061. static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
  1062. pmd_t *pmdp, pmd_t entry)
  1063. {
  1064. *pmdp = entry;
  1065. }
  1066. static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
  1067. {
  1068. unsigned long pgprot_pmd = 0;
  1069. if (pgprot_val(pgprot) & _PAGE_INVALID) {
  1070. if (pgprot_val(pgprot) & _PAGE_SWT)
  1071. pgprot_pmd |= _HPAGE_TYPE_NONE;
  1072. pgprot_pmd |= _SEGMENT_ENTRY_INV;
  1073. }
  1074. if (pgprot_val(pgprot) & _PAGE_RO)
  1075. pgprot_pmd |= _SEGMENT_ENTRY_RO;
  1076. return pgprot_pmd;
  1077. }
  1078. static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
  1079. {
  1080. pmd_val(pmd) &= _SEGMENT_CHG_MASK;
  1081. pmd_val(pmd) |= massage_pgprot_pmd(newprot);
  1082. return pmd;
  1083. }
  1084. static inline pmd_t pmd_mkhuge(pmd_t pmd)
  1085. {
  1086. pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
  1087. return pmd;
  1088. }
  1089. static inline pmd_t pmd_mkwrite(pmd_t pmd)
  1090. {
  1091. pmd_val(pmd) &= ~_SEGMENT_ENTRY_RO;
  1092. return pmd;
  1093. }
  1094. static inline pmd_t pmd_wrprotect(pmd_t pmd)
  1095. {
  1096. pmd_val(pmd) |= _SEGMENT_ENTRY_RO;
  1097. return pmd;
  1098. }
  1099. static inline pmd_t pmd_mkdirty(pmd_t pmd)
  1100. {
  1101. /* No dirty bit in the segment table entry. */
  1102. return pmd;
  1103. }
  1104. static inline pmd_t pmd_mkold(pmd_t pmd)
  1105. {
  1106. /* No referenced bit in the segment table entry. */
  1107. return pmd;
  1108. }
  1109. static inline pmd_t pmd_mkyoung(pmd_t pmd)
  1110. {
  1111. /* No referenced bit in the segment table entry. */
  1112. return pmd;
  1113. }
  1114. #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
  1115. static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  1116. unsigned long address, pmd_t *pmdp)
  1117. {
  1118. unsigned long pmd_addr = pmd_val(*pmdp) & HPAGE_MASK;
  1119. long tmp, rc;
  1120. int counter;
  1121. rc = 0;
  1122. if (MACHINE_HAS_RRBM) {
  1123. counter = PTRS_PER_PTE >> 6;
  1124. asm volatile(
  1125. "0: .insn rre,0xb9ae0000,%0,%3\n" /* rrbm */
  1126. " ogr %1,%0\n"
  1127. " la %3,0(%4,%3)\n"
  1128. " brct %2,0b\n"
  1129. : "=&d" (tmp), "+&d" (rc), "+d" (counter),
  1130. "+a" (pmd_addr)
  1131. : "a" (64 * 4096UL) : "cc");
  1132. rc = !!rc;
  1133. } else {
  1134. counter = PTRS_PER_PTE;
  1135. asm volatile(
  1136. "0: rrbe 0,%2\n"
  1137. " la %2,0(%3,%2)\n"
  1138. " brc 12,1f\n"
  1139. " lhi %0,1\n"
  1140. "1: brct %1,0b\n"
  1141. : "+d" (rc), "+d" (counter), "+a" (pmd_addr)
  1142. : "a" (4096UL) : "cc");
  1143. }
  1144. return rc;
  1145. }
  1146. #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
  1147. static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
  1148. unsigned long address, pmd_t *pmdp)
  1149. {
  1150. pmd_t pmd = *pmdp;
  1151. __pmd_idte(address, pmdp);
  1152. pmd_clear(pmdp);
  1153. return pmd;
  1154. }
  1155. #define __HAVE_ARCH_PMDP_CLEAR_FLUSH
  1156. static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
  1157. unsigned long address, pmd_t *pmdp)
  1158. {
  1159. return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
  1160. }
  1161. #define __HAVE_ARCH_PMDP_INVALIDATE
  1162. static inline void pmdp_invalidate(struct vm_area_struct *vma,
  1163. unsigned long address, pmd_t *pmdp)
  1164. {
  1165. __pmd_idte(address, pmdp);
  1166. }
  1167. static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
  1168. {
  1169. pmd_t __pmd;
  1170. pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
  1171. return __pmd;
  1172. }
  1173. #define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
  1174. #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
  1175. static inline int pmd_trans_huge(pmd_t pmd)
  1176. {
  1177. return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
  1178. }
  1179. static inline int has_transparent_hugepage(void)
  1180. {
  1181. return MACHINE_HAS_HPAGE ? 1 : 0;
  1182. }
  1183. static inline unsigned long pmd_pfn(pmd_t pmd)
  1184. {
  1185. if (pmd_trans_huge(pmd))
  1186. return pmd_val(pmd) >> HPAGE_SHIFT;
  1187. else
  1188. return pmd_val(pmd) >> PAGE_SHIFT;
  1189. }
  1190. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  1191. /*
  1192. * 31 bit swap entry format:
  1193. * A page-table entry has some bits we have to treat in a special way.
  1194. * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
  1195. * exception will occur instead of a page translation exception. The
  1196. * specifiation exception has the bad habit not to store necessary
  1197. * information in the lowcore.
  1198. * Bit 21 and bit 22 are the page invalid bit and the page protection
  1199. * bit. We set both to indicate a swapped page.
  1200. * Bit 30 and 31 are used to distinguish the different page types. For
  1201. * a swapped page these bits need to be zero.
  1202. * This leaves the bits 1-19 and bits 24-29 to store type and offset.
  1203. * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
  1204. * plus 24 for the offset.
  1205. * 0| offset |0110|o|type |00|
  1206. * 0 0000000001111111111 2222 2 22222 33
  1207. * 0 1234567890123456789 0123 4 56789 01
  1208. *
  1209. * 64 bit swap entry format:
  1210. * A page-table entry has some bits we have to treat in a special way.
  1211. * Bits 52 and bit 55 have to be zero, otherwise an specification
  1212. * exception will occur instead of a page translation exception. The
  1213. * specifiation exception has the bad habit not to store necessary
  1214. * information in the lowcore.
  1215. * Bit 53 and bit 54 are the page invalid bit and the page protection
  1216. * bit. We set both to indicate a swapped page.
  1217. * Bit 62 and 63 are used to distinguish the different page types. For
  1218. * a swapped page these bits need to be zero.
  1219. * This leaves the bits 0-51 and bits 56-61 to store type and offset.
  1220. * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
  1221. * plus 56 for the offset.
  1222. * | offset |0110|o|type |00|
  1223. * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
  1224. * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
  1225. */
  1226. #ifndef CONFIG_64BIT
  1227. #define __SWP_OFFSET_MASK (~0UL >> 12)
  1228. #else
  1229. #define __SWP_OFFSET_MASK (~0UL >> 11)
  1230. #endif
  1231. static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
  1232. {
  1233. pte_t pte;
  1234. offset &= __SWP_OFFSET_MASK;
  1235. pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
  1236. ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
  1237. return pte;
  1238. }
  1239. #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
  1240. #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
  1241. #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
  1242. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  1243. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  1244. #ifndef CONFIG_64BIT
  1245. # define PTE_FILE_MAX_BITS 26
  1246. #else /* CONFIG_64BIT */
  1247. # define PTE_FILE_MAX_BITS 59
  1248. #endif /* CONFIG_64BIT */
  1249. #define pte_to_pgoff(__pte) \
  1250. ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
  1251. #define pgoff_to_pte(__off) \
  1252. ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
  1253. | _PAGE_TYPE_FILE })
  1254. #endif /* !__ASSEMBLY__ */
  1255. #define kern_addr_valid(addr) (1)
  1256. extern int vmem_add_mapping(unsigned long start, unsigned long size);
  1257. extern int vmem_remove_mapping(unsigned long start, unsigned long size);
  1258. extern int s390_enable_sie(void);
  1259. /*
  1260. * No page table caches to initialise
  1261. */
  1262. #define pgtable_cache_init() do { } while (0)
  1263. #include <asm-generic/pgtable.h>
  1264. #endif /* _S390_PAGE_H */