pgtable.h 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248
  1. /*
  2. * include/asm-s390/pgtable.h
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com)
  7. * Ulrich Weigand (weigand@de.ibm.com)
  8. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  9. *
  10. * Derived from "include/asm-i386/pgtable.h"
  11. */
  12. #ifndef _ASM_S390_PGTABLE_H
  13. #define _ASM_S390_PGTABLE_H
  14. /*
  15. * The Linux memory management assumes a three-level page table setup. For
  16. * s390 31 bit we "fold" the mid level into the top-level page table, so
  17. * that we physically have the same two-level page table as the s390 mmu
  18. * expects in 31 bit mode. For s390 64 bit we use three of the five levels
  19. * the hardware provides (region first and region second tables are not
  20. * used).
  21. *
  22. * The "pgd_xxx()" functions are trivial for a folded two-level
  23. * setup: the pgd is never bad, and a pmd always exists (as it's folded
  24. * into the pgd entry)
  25. *
  26. * This file contains the functions and defines necessary to modify and use
  27. * the S390 page table tree.
  28. */
  29. #ifndef __ASSEMBLY__
  30. #include <linux/sched.h>
  31. #include <linux/mm_types.h>
  32. #include <asm/bug.h>
  33. #include <asm/page.h>
  34. extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
  35. extern void paging_init(void);
  36. extern void vmem_map_init(void);
  37. extern void fault_init(void);
  38. /*
  39. * The S390 doesn't have any external MMU info: the kernel page
  40. * tables contain all the necessary information.
  41. */
  42. #define update_mmu_cache(vma, address, ptep) do { } while (0)
  43. /*
  44. * ZERO_PAGE is a global shared page that is always zero; used
  45. * for zero-mapped memory areas etc..
  46. */
  47. extern unsigned long empty_zero_page;
  48. extern unsigned long zero_page_mask;
  49. #define ZERO_PAGE(vaddr) \
  50. (virt_to_page((void *)(empty_zero_page + \
  51. (((unsigned long)(vaddr)) &zero_page_mask))))
  52. #define is_zero_pfn is_zero_pfn
  53. static inline int is_zero_pfn(unsigned long pfn)
  54. {
  55. extern unsigned long zero_pfn;
  56. unsigned long offset_from_zero_pfn = pfn - zero_pfn;
  57. return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT);
  58. }
  59. #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr))
  60. #endif /* !__ASSEMBLY__ */
  61. /*
  62. * PMD_SHIFT determines the size of the area a second-level page
  63. * table can map
  64. * PGDIR_SHIFT determines what a third-level page table entry can map
  65. */
  66. #ifndef __s390x__
  67. # define PMD_SHIFT 20
  68. # define PUD_SHIFT 20
  69. # define PGDIR_SHIFT 20
  70. #else /* __s390x__ */
  71. # define PMD_SHIFT 20
  72. # define PUD_SHIFT 31
  73. # define PGDIR_SHIFT 42
  74. #endif /* __s390x__ */
  75. #define PMD_SIZE (1UL << PMD_SHIFT)
  76. #define PMD_MASK (~(PMD_SIZE-1))
  77. #define PUD_SIZE (1UL << PUD_SHIFT)
  78. #define PUD_MASK (~(PUD_SIZE-1))
  79. #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
  80. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  81. /*
  82. * entries per page directory level: the S390 is two-level, so
  83. * we don't really have any PMD directory physically.
  84. * for S390 segment-table entries are combined to one PGD
  85. * that leads to 1024 pte per pgd
  86. */
  87. #define PTRS_PER_PTE 256
  88. #ifndef __s390x__
  89. #define PTRS_PER_PMD 1
  90. #define PTRS_PER_PUD 1
  91. #else /* __s390x__ */
  92. #define PTRS_PER_PMD 2048
  93. #define PTRS_PER_PUD 2048
  94. #endif /* __s390x__ */
  95. #define PTRS_PER_PGD 2048
  96. #define FIRST_USER_ADDRESS 0
  97. #define pte_ERROR(e) \
  98. printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
  99. #define pmd_ERROR(e) \
  100. printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
  101. #define pud_ERROR(e) \
  102. printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
  103. #define pgd_ERROR(e) \
  104. printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
  105. #ifndef __ASSEMBLY__
  106. /*
  107. * The vmalloc area will always be on the topmost area of the kernel
  108. * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc,
  109. * which should be enough for any sane case.
  110. * By putting vmalloc at the top, we maximise the gap between physical
  111. * memory and vmalloc to catch misplaced memory accesses. As a side
  112. * effect, this also makes sure that 64 bit module code cannot be used
  113. * as system call address.
  114. */
  115. extern unsigned long VMALLOC_START;
  116. extern unsigned long VMALLOC_END;
  117. extern struct page *vmemmap;
  118. #define VMEM_MAX_PHYS ((unsigned long) vmemmap)
  119. /*
  120. * A 31 bit pagetable entry of S390 has following format:
  121. * | PFRA | | OS |
  122. * 0 0IP0
  123. * 00000000001111111111222222222233
  124. * 01234567890123456789012345678901
  125. *
  126. * I Page-Invalid Bit: Page is not available for address-translation
  127. * P Page-Protection Bit: Store access not possible for page
  128. *
  129. * A 31 bit segmenttable entry of S390 has following format:
  130. * | P-table origin | |PTL
  131. * 0 IC
  132. * 00000000001111111111222222222233
  133. * 01234567890123456789012345678901
  134. *
  135. * I Segment-Invalid Bit: Segment is not available for address-translation
  136. * C Common-Segment Bit: Segment is not private (PoP 3-30)
  137. * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
  138. *
  139. * The 31 bit segmenttable origin of S390 has following format:
  140. *
  141. * |S-table origin | | STL |
  142. * X **GPS
  143. * 00000000001111111111222222222233
  144. * 01234567890123456789012345678901
  145. *
  146. * X Space-Switch event:
  147. * G Segment-Invalid Bit: *
  148. * P Private-Space Bit: Segment is not private (PoP 3-30)
  149. * S Storage-Alteration:
  150. * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
  151. *
  152. * A 64 bit pagetable entry of S390 has following format:
  153. * | PFRA |0IPC| OS |
  154. * 0000000000111111111122222222223333333333444444444455555555556666
  155. * 0123456789012345678901234567890123456789012345678901234567890123
  156. *
  157. * I Page-Invalid Bit: Page is not available for address-translation
  158. * P Page-Protection Bit: Store access not possible for page
  159. * C Change-bit override: HW is not required to set change bit
  160. *
  161. * A 64 bit segmenttable entry of S390 has following format:
  162. * | P-table origin | TT
  163. * 0000000000111111111122222222223333333333444444444455555555556666
  164. * 0123456789012345678901234567890123456789012345678901234567890123
  165. *
  166. * I Segment-Invalid Bit: Segment is not available for address-translation
  167. * C Common-Segment Bit: Segment is not private (PoP 3-30)
  168. * P Page-Protection Bit: Store access not possible for page
  169. * TT Type 00
  170. *
  171. * A 64 bit region table entry of S390 has following format:
  172. * | S-table origin | TF TTTL
  173. * 0000000000111111111122222222223333333333444444444455555555556666
  174. * 0123456789012345678901234567890123456789012345678901234567890123
  175. *
  176. * I Segment-Invalid Bit: Segment is not available for address-translation
  177. * TT Type 01
  178. * TF
  179. * TL Table length
  180. *
  181. * The 64 bit regiontable origin of S390 has following format:
  182. * | region table origon | DTTL
  183. * 0000000000111111111122222222223333333333444444444455555555556666
  184. * 0123456789012345678901234567890123456789012345678901234567890123
  185. *
  186. * X Space-Switch event:
  187. * G Segment-Invalid Bit:
  188. * P Private-Space Bit:
  189. * S Storage-Alteration:
  190. * R Real space
  191. * TL Table-Length:
  192. *
  193. * A storage key has the following format:
  194. * | ACC |F|R|C|0|
  195. * 0 3 4 5 6 7
  196. * ACC: access key
  197. * F : fetch protection bit
  198. * R : referenced bit
  199. * C : changed bit
  200. */
  201. /* Hardware bits in the page table entry */
  202. #define _PAGE_CO 0x100 /* HW Change-bit override */
  203. #define _PAGE_RO 0x200 /* HW read-only bit */
  204. #define _PAGE_INVALID 0x400 /* HW invalid bit */
  205. /* Software bits in the page table entry */
  206. #define _PAGE_SWT 0x001 /* SW pte type bit t */
  207. #define _PAGE_SWX 0x002 /* SW pte type bit x */
  208. #define _PAGE_SWC 0x004 /* SW pte changed bit (for KVM) */
  209. #define _PAGE_SWR 0x008 /* SW pte referenced bit (for KVM) */
  210. #define _PAGE_SPECIAL 0x010 /* SW associated with special page */
  211. #define __HAVE_ARCH_PTE_SPECIAL
  212. /* Set of bits not changed in pte_modify */
  213. #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_SWC | _PAGE_SWR)
  214. /* Six different types of pages. */
  215. #define _PAGE_TYPE_EMPTY 0x400
  216. #define _PAGE_TYPE_NONE 0x401
  217. #define _PAGE_TYPE_SWAP 0x403
  218. #define _PAGE_TYPE_FILE 0x601 /* bit 0x002 is used for offset !! */
  219. #define _PAGE_TYPE_RO 0x200
  220. #define _PAGE_TYPE_RW 0x000
  221. /*
  222. * Only four types for huge pages, using the invalid bit and protection bit
  223. * of a segment table entry.
  224. */
  225. #define _HPAGE_TYPE_EMPTY 0x020 /* _SEGMENT_ENTRY_INV */
  226. #define _HPAGE_TYPE_NONE 0x220
  227. #define _HPAGE_TYPE_RO 0x200 /* _SEGMENT_ENTRY_RO */
  228. #define _HPAGE_TYPE_RW 0x000
  229. /*
  230. * PTE type bits are rather complicated. handle_pte_fault uses pte_present,
  231. * pte_none and pte_file to find out the pte type WITHOUT holding the page
  232. * table lock. ptep_clear_flush on the other hand uses ptep_clear_flush to
  233. * invalidate a given pte. ipte sets the hw invalid bit and clears all tlbs
  234. * for the page. The page table entry is set to _PAGE_TYPE_EMPTY afterwards.
  235. * This change is done while holding the lock, but the intermediate step
  236. * of a previously valid pte with the hw invalid bit set can be observed by
  237. * handle_pte_fault. That makes it necessary that all valid pte types with
  238. * the hw invalid bit set must be distinguishable from the four pte types
  239. * empty, none, swap and file.
  240. *
  241. * irxt ipte irxt
  242. * _PAGE_TYPE_EMPTY 1000 -> 1000
  243. * _PAGE_TYPE_NONE 1001 -> 1001
  244. * _PAGE_TYPE_SWAP 1011 -> 1011
  245. * _PAGE_TYPE_FILE 11?1 -> 11?1
  246. * _PAGE_TYPE_RO 0100 -> 1100
  247. * _PAGE_TYPE_RW 0000 -> 1000
  248. *
  249. * pte_none is true for bits combinations 1000, 1010, 1100, 1110
  250. * pte_present is true for bits combinations 0000, 0010, 0100, 0110, 1001
  251. * pte_file is true for bits combinations 1101, 1111
  252. * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
  253. */
  254. #ifndef __s390x__
  255. /* Bits in the segment table address-space-control-element */
  256. #define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
  257. #define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
  258. #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
  259. #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
  260. #define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
  261. /* Bits in the segment table entry */
  262. #define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
  263. #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
  264. #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
  265. #define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
  266. #define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
  267. #define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
  268. #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
  269. /* Page status table bits for virtualization */
  270. #define RCP_ACC_BITS 0xf0000000UL
  271. #define RCP_FP_BIT 0x08000000UL
  272. #define RCP_PCL_BIT 0x00800000UL
  273. #define RCP_HR_BIT 0x00400000UL
  274. #define RCP_HC_BIT 0x00200000UL
  275. #define RCP_GR_BIT 0x00040000UL
  276. #define RCP_GC_BIT 0x00020000UL
  277. /* User dirty / referenced bit for KVM's migration feature */
  278. #define KVM_UR_BIT 0x00008000UL
  279. #define KVM_UC_BIT 0x00004000UL
  280. #else /* __s390x__ */
  281. /* Bits in the segment/region table address-space-control-element */
  282. #define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
  283. #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
  284. #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
  285. #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
  286. #define _ASCE_REAL_SPACE 0x20 /* real space control */
  287. #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
  288. #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
  289. #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
  290. #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
  291. #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
  292. #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
  293. /* Bits in the region table entry */
  294. #define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
  295. #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
  296. #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
  297. #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
  298. #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
  299. #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
  300. #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
  301. #define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
  302. #define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INV)
  303. #define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
  304. #define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INV)
  305. #define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
  306. #define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INV)
  307. /* Bits in the segment table entry */
  308. #define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
  309. #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
  310. #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
  311. #define _SEGMENT_ENTRY (0)
  312. #define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INV)
  313. #define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
  314. #define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
  315. /* Page status table bits for virtualization */
  316. #define RCP_ACC_BITS 0xf000000000000000UL
  317. #define RCP_FP_BIT 0x0800000000000000UL
  318. #define RCP_PCL_BIT 0x0080000000000000UL
  319. #define RCP_HR_BIT 0x0040000000000000UL
  320. #define RCP_HC_BIT 0x0020000000000000UL
  321. #define RCP_GR_BIT 0x0004000000000000UL
  322. #define RCP_GC_BIT 0x0002000000000000UL
  323. /* User dirty / referenced bit for KVM's migration feature */
  324. #define KVM_UR_BIT 0x0000800000000000UL
  325. #define KVM_UC_BIT 0x0000400000000000UL
  326. #endif /* __s390x__ */
  327. /*
  328. * A user page table pointer has the space-switch-event bit, the
  329. * private-space-control bit and the storage-alteration-event-control
  330. * bit set. A kernel page table pointer doesn't need them.
  331. */
  332. #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
  333. _ASCE_ALT_EVENT)
  334. /*
  335. * Page protection definitions.
  336. */
  337. #define PAGE_NONE __pgprot(_PAGE_TYPE_NONE)
  338. #define PAGE_RO __pgprot(_PAGE_TYPE_RO)
  339. #define PAGE_RW __pgprot(_PAGE_TYPE_RW)
  340. #define PAGE_KERNEL PAGE_RW
  341. #define PAGE_COPY PAGE_RO
  342. /*
  343. * On s390 the page table entry has an invalid bit and a read-only bit.
  344. * Read permission implies execute permission and write permission
  345. * implies read permission.
  346. */
  347. /*xwr*/
  348. #define __P000 PAGE_NONE
  349. #define __P001 PAGE_RO
  350. #define __P010 PAGE_RO
  351. #define __P011 PAGE_RO
  352. #define __P100 PAGE_RO
  353. #define __P101 PAGE_RO
  354. #define __P110 PAGE_RO
  355. #define __P111 PAGE_RO
  356. #define __S000 PAGE_NONE
  357. #define __S001 PAGE_RO
  358. #define __S010 PAGE_RW
  359. #define __S011 PAGE_RW
  360. #define __S100 PAGE_RO
  361. #define __S101 PAGE_RO
  362. #define __S110 PAGE_RW
  363. #define __S111 PAGE_RW
  364. static inline int mm_exclusive(struct mm_struct *mm)
  365. {
  366. return likely(mm == current->active_mm &&
  367. atomic_read(&mm->context.attach_count) <= 1);
  368. }
  369. static inline int mm_has_pgste(struct mm_struct *mm)
  370. {
  371. #ifdef CONFIG_PGSTE
  372. if (unlikely(mm->context.has_pgste))
  373. return 1;
  374. #endif
  375. return 0;
  376. }
  377. /*
  378. * pgd/pmd/pte query functions
  379. */
  380. #ifndef __s390x__
  381. static inline int pgd_present(pgd_t pgd) { return 1; }
  382. static inline int pgd_none(pgd_t pgd) { return 0; }
  383. static inline int pgd_bad(pgd_t pgd) { return 0; }
  384. static inline int pud_present(pud_t pud) { return 1; }
  385. static inline int pud_none(pud_t pud) { return 0; }
  386. static inline int pud_bad(pud_t pud) { return 0; }
  387. #else /* __s390x__ */
  388. static inline int pgd_present(pgd_t pgd)
  389. {
  390. if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
  391. return 1;
  392. return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
  393. }
  394. static inline int pgd_none(pgd_t pgd)
  395. {
  396. if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
  397. return 0;
  398. return (pgd_val(pgd) & _REGION_ENTRY_INV) != 0UL;
  399. }
  400. static inline int pgd_bad(pgd_t pgd)
  401. {
  402. /*
  403. * With dynamic page table levels the pgd can be a region table
  404. * entry or a segment table entry. Check for the bit that are
  405. * invalid for either table entry.
  406. */
  407. unsigned long mask =
  408. ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
  409. ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
  410. return (pgd_val(pgd) & mask) != 0;
  411. }
  412. static inline int pud_present(pud_t pud)
  413. {
  414. if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
  415. return 1;
  416. return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
  417. }
  418. static inline int pud_none(pud_t pud)
  419. {
  420. if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
  421. return 0;
  422. return (pud_val(pud) & _REGION_ENTRY_INV) != 0UL;
  423. }
  424. static inline int pud_bad(pud_t pud)
  425. {
  426. /*
  427. * With dynamic page table levels the pud can be a region table
  428. * entry or a segment table entry. Check for the bit that are
  429. * invalid for either table entry.
  430. */
  431. unsigned long mask =
  432. ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INV &
  433. ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
  434. return (pud_val(pud) & mask) != 0;
  435. }
  436. #endif /* __s390x__ */
  437. static inline int pmd_present(pmd_t pmd)
  438. {
  439. return (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) != 0UL;
  440. }
  441. static inline int pmd_none(pmd_t pmd)
  442. {
  443. return (pmd_val(pmd) & _SEGMENT_ENTRY_INV) != 0UL;
  444. }
  445. static inline int pmd_bad(pmd_t pmd)
  446. {
  447. unsigned long mask = ~_SEGMENT_ENTRY_ORIGIN & ~_SEGMENT_ENTRY_INV;
  448. return (pmd_val(pmd) & mask) != _SEGMENT_ENTRY;
  449. }
  450. static inline int pte_none(pte_t pte)
  451. {
  452. return (pte_val(pte) & _PAGE_INVALID) && !(pte_val(pte) & _PAGE_SWT);
  453. }
  454. static inline int pte_present(pte_t pte)
  455. {
  456. unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT | _PAGE_SWX;
  457. return (pte_val(pte) & mask) == _PAGE_TYPE_NONE ||
  458. (!(pte_val(pte) & _PAGE_INVALID) &&
  459. !(pte_val(pte) & _PAGE_SWT));
  460. }
  461. static inline int pte_file(pte_t pte)
  462. {
  463. unsigned long mask = _PAGE_RO | _PAGE_INVALID | _PAGE_SWT;
  464. return (pte_val(pte) & mask) == _PAGE_TYPE_FILE;
  465. }
  466. static inline int pte_special(pte_t pte)
  467. {
  468. return (pte_val(pte) & _PAGE_SPECIAL);
  469. }
  470. #define __HAVE_ARCH_PTE_SAME
  471. static inline int pte_same(pte_t a, pte_t b)
  472. {
  473. return pte_val(a) == pte_val(b);
  474. }
  475. static inline pgste_t pgste_get_lock(pte_t *ptep)
  476. {
  477. unsigned long new = 0;
  478. #ifdef CONFIG_PGSTE
  479. unsigned long old;
  480. preempt_disable();
  481. asm(
  482. " lg %0,%2\n"
  483. "0: lgr %1,%0\n"
  484. " nihh %0,0xff7f\n" /* clear RCP_PCL_BIT in old */
  485. " oihh %1,0x0080\n" /* set RCP_PCL_BIT in new */
  486. " csg %0,%1,%2\n"
  487. " jl 0b\n"
  488. : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
  489. : "Q" (ptep[PTRS_PER_PTE]) : "cc");
  490. #endif
  491. return __pgste(new);
  492. }
  493. static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
  494. {
  495. #ifdef CONFIG_PGSTE
  496. asm(
  497. " nihh %1,0xff7f\n" /* clear RCP_PCL_BIT */
  498. " stg %1,%0\n"
  499. : "=Q" (ptep[PTRS_PER_PTE])
  500. : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE]) : "cc");
  501. preempt_enable();
  502. #endif
  503. }
  504. static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
  505. {
  506. #ifdef CONFIG_PGSTE
  507. unsigned long address, bits;
  508. unsigned char skey;
  509. if (!pte_present(*ptep))
  510. return pgste;
  511. address = pte_val(*ptep) & PAGE_MASK;
  512. skey = page_get_storage_key(address);
  513. bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
  514. /* Clear page changed & referenced bit in the storage key */
  515. if (bits & _PAGE_CHANGED)
  516. page_set_storage_key(address, skey ^ bits, 1);
  517. else if (bits)
  518. page_reset_referenced(address);
  519. /* Transfer page changed & referenced bit to guest bits in pgste */
  520. pgste_val(pgste) |= bits << 48; /* RCP_GR_BIT & RCP_GC_BIT */
  521. /* Get host changed & referenced bits from pgste */
  522. bits |= (pgste_val(pgste) & (RCP_HR_BIT | RCP_HC_BIT)) >> 52;
  523. /* Clear host bits in pgste. */
  524. pgste_val(pgste) &= ~(RCP_HR_BIT | RCP_HC_BIT);
  525. pgste_val(pgste) &= ~(RCP_ACC_BITS | RCP_FP_BIT);
  526. /* Copy page access key and fetch protection bit to pgste */
  527. pgste_val(pgste) |=
  528. (unsigned long) (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
  529. /* Transfer changed and referenced to kvm user bits */
  530. pgste_val(pgste) |= bits << 45; /* KVM_UR_BIT & KVM_UC_BIT */
  531. /* Transfer changed & referenced to pte sofware bits */
  532. pte_val(*ptep) |= bits << 1; /* _PAGE_SWR & _PAGE_SWC */
  533. #endif
  534. return pgste;
  535. }
  536. static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
  537. {
  538. #ifdef CONFIG_PGSTE
  539. int young;
  540. if (!pte_present(*ptep))
  541. return pgste;
  542. young = page_reset_referenced(pte_val(*ptep) & PAGE_MASK);
  543. /* Transfer page referenced bit to pte software bit (host view) */
  544. if (young || (pgste_val(pgste) & RCP_HR_BIT))
  545. pte_val(*ptep) |= _PAGE_SWR;
  546. /* Clear host referenced bit in pgste. */
  547. pgste_val(pgste) &= ~RCP_HR_BIT;
  548. /* Transfer page referenced bit to guest bit in pgste */
  549. pgste_val(pgste) |= (unsigned long) young << 50; /* set RCP_GR_BIT */
  550. #endif
  551. return pgste;
  552. }
  553. static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
  554. {
  555. #ifdef CONFIG_PGSTE
  556. unsigned long address;
  557. unsigned long okey, nkey;
  558. if (!pte_present(entry))
  559. return;
  560. address = pte_val(entry) & PAGE_MASK;
  561. okey = nkey = page_get_storage_key(address);
  562. nkey &= ~(_PAGE_ACC_BITS | _PAGE_FP_BIT);
  563. /* Set page access key and fetch protection bit from pgste */
  564. nkey |= (pgste_val(pgste) & (RCP_ACC_BITS | RCP_FP_BIT)) >> 56;
  565. if (okey != nkey)
  566. page_set_storage_key(address, nkey, 1);
  567. #endif
  568. }
  569. /**
  570. * struct gmap_struct - guest address space
  571. * @mm: pointer to the parent mm_struct
  572. * @table: pointer to the page directory
  573. * @asce: address space control element for gmap page table
  574. * @crst_list: list of all crst tables used in the guest address space
  575. */
  576. struct gmap {
  577. struct list_head list;
  578. struct mm_struct *mm;
  579. unsigned long *table;
  580. unsigned long asce;
  581. struct list_head crst_list;
  582. };
  583. /**
  584. * struct gmap_rmap - reverse mapping for segment table entries
  585. * @next: pointer to the next gmap_rmap structure in the list
  586. * @entry: pointer to a segment table entry
  587. */
  588. struct gmap_rmap {
  589. struct list_head list;
  590. unsigned long *entry;
  591. };
  592. /**
  593. * struct gmap_pgtable - gmap information attached to a page table
  594. * @vmaddr: address of the 1MB segment in the process virtual memory
  595. * @mapper: list of segment table entries maping a page table
  596. */
  597. struct gmap_pgtable {
  598. unsigned long vmaddr;
  599. struct list_head mapper;
  600. };
  601. struct gmap *gmap_alloc(struct mm_struct *mm);
  602. void gmap_free(struct gmap *gmap);
  603. void gmap_enable(struct gmap *gmap);
  604. void gmap_disable(struct gmap *gmap);
  605. int gmap_map_segment(struct gmap *gmap, unsigned long from,
  606. unsigned long to, unsigned long length);
  607. int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
  608. unsigned long __gmap_fault(unsigned long address, struct gmap *);
  609. unsigned long gmap_fault(unsigned long address, struct gmap *);
  610. void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
  611. /*
  612. * Certain architectures need to do special things when PTEs
  613. * within a page table are directly modified. Thus, the following
  614. * hook is made available.
  615. */
  616. static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
  617. pte_t *ptep, pte_t entry)
  618. {
  619. pgste_t pgste;
  620. if (mm_has_pgste(mm)) {
  621. pgste = pgste_get_lock(ptep);
  622. pgste_set_pte(ptep, pgste, entry);
  623. *ptep = entry;
  624. pgste_set_unlock(ptep, pgste);
  625. } else
  626. *ptep = entry;
  627. }
  628. /*
  629. * query functions pte_write/pte_dirty/pte_young only work if
  630. * pte_present() is true. Undefined behaviour if not..
  631. */
  632. static inline int pte_write(pte_t pte)
  633. {
  634. return (pte_val(pte) & _PAGE_RO) == 0;
  635. }
  636. static inline int pte_dirty(pte_t pte)
  637. {
  638. #ifdef CONFIG_PGSTE
  639. if (pte_val(pte) & _PAGE_SWC)
  640. return 1;
  641. #endif
  642. return 0;
  643. }
  644. static inline int pte_young(pte_t pte)
  645. {
  646. #ifdef CONFIG_PGSTE
  647. if (pte_val(pte) & _PAGE_SWR)
  648. return 1;
  649. #endif
  650. return 0;
  651. }
  652. /*
  653. * pgd/pmd/pte modification functions
  654. */
  655. static inline void pgd_clear(pgd_t *pgd)
  656. {
  657. #ifdef __s390x__
  658. if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
  659. pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
  660. #endif
  661. }
  662. static inline void pud_clear(pud_t *pud)
  663. {
  664. #ifdef __s390x__
  665. if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
  666. pud_val(*pud) = _REGION3_ENTRY_EMPTY;
  667. #endif
  668. }
  669. static inline void pmd_clear(pmd_t *pmdp)
  670. {
  671. pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
  672. }
  673. static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  674. {
  675. pte_val(*ptep) = _PAGE_TYPE_EMPTY;
  676. }
  677. /*
  678. * The following pte modification functions only work if
  679. * pte_present() is true. Undefined behaviour if not..
  680. */
  681. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  682. {
  683. pte_val(pte) &= _PAGE_CHG_MASK;
  684. pte_val(pte) |= pgprot_val(newprot);
  685. return pte;
  686. }
  687. static inline pte_t pte_wrprotect(pte_t pte)
  688. {
  689. /* Do not clobber _PAGE_TYPE_NONE pages! */
  690. if (!(pte_val(pte) & _PAGE_INVALID))
  691. pte_val(pte) |= _PAGE_RO;
  692. return pte;
  693. }
  694. static inline pte_t pte_mkwrite(pte_t pte)
  695. {
  696. pte_val(pte) &= ~_PAGE_RO;
  697. return pte;
  698. }
  699. static inline pte_t pte_mkclean(pte_t pte)
  700. {
  701. #ifdef CONFIG_PGSTE
  702. pte_val(pte) &= ~_PAGE_SWC;
  703. #endif
  704. return pte;
  705. }
  706. static inline pte_t pte_mkdirty(pte_t pte)
  707. {
  708. return pte;
  709. }
  710. static inline pte_t pte_mkold(pte_t pte)
  711. {
  712. #ifdef CONFIG_PGSTE
  713. pte_val(pte) &= ~_PAGE_SWR;
  714. #endif
  715. return pte;
  716. }
  717. static inline pte_t pte_mkyoung(pte_t pte)
  718. {
  719. return pte;
  720. }
  721. static inline pte_t pte_mkspecial(pte_t pte)
  722. {
  723. pte_val(pte) |= _PAGE_SPECIAL;
  724. return pte;
  725. }
  726. #ifdef CONFIG_HUGETLB_PAGE
  727. static inline pte_t pte_mkhuge(pte_t pte)
  728. {
  729. /*
  730. * PROT_NONE needs to be remapped from the pte type to the ste type.
  731. * The HW invalid bit is also different for pte and ste. The pte
  732. * invalid bit happens to be the same as the ste _SEGMENT_ENTRY_LARGE
  733. * bit, so we don't have to clear it.
  734. */
  735. if (pte_val(pte) & _PAGE_INVALID) {
  736. if (pte_val(pte) & _PAGE_SWT)
  737. pte_val(pte) |= _HPAGE_TYPE_NONE;
  738. pte_val(pte) |= _SEGMENT_ENTRY_INV;
  739. }
  740. /*
  741. * Clear SW pte bits SWT and SWX, there are no SW bits in a segment
  742. * table entry.
  743. */
  744. pte_val(pte) &= ~(_PAGE_SWT | _PAGE_SWX);
  745. /*
  746. * Also set the change-override bit because we don't need dirty bit
  747. * tracking for hugetlbfs pages.
  748. */
  749. pte_val(pte) |= (_SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_CO);
  750. return pte;
  751. }
  752. #endif
  753. /*
  754. * Get (and clear) the user dirty bit for a pte.
  755. */
  756. static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
  757. pte_t *ptep)
  758. {
  759. pgste_t pgste;
  760. int dirty = 0;
  761. if (mm_has_pgste(mm)) {
  762. pgste = pgste_get_lock(ptep);
  763. pgste = pgste_update_all(ptep, pgste);
  764. dirty = !!(pgste_val(pgste) & KVM_UC_BIT);
  765. pgste_val(pgste) &= ~KVM_UC_BIT;
  766. pgste_set_unlock(ptep, pgste);
  767. return dirty;
  768. }
  769. return dirty;
  770. }
  771. /*
  772. * Get (and clear) the user referenced bit for a pte.
  773. */
  774. static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
  775. pte_t *ptep)
  776. {
  777. pgste_t pgste;
  778. int young = 0;
  779. if (mm_has_pgste(mm)) {
  780. pgste = pgste_get_lock(ptep);
  781. pgste = pgste_update_young(ptep, pgste);
  782. young = !!(pgste_val(pgste) & KVM_UR_BIT);
  783. pgste_val(pgste) &= ~KVM_UR_BIT;
  784. pgste_set_unlock(ptep, pgste);
  785. }
  786. return young;
  787. }
  788. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  789. static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
  790. unsigned long addr, pte_t *ptep)
  791. {
  792. pgste_t pgste;
  793. pte_t pte;
  794. if (mm_has_pgste(vma->vm_mm)) {
  795. pgste = pgste_get_lock(ptep);
  796. pgste = pgste_update_young(ptep, pgste);
  797. pte = *ptep;
  798. *ptep = pte_mkold(pte);
  799. pgste_set_unlock(ptep, pgste);
  800. return pte_young(pte);
  801. }
  802. return 0;
  803. }
  804. #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  805. static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
  806. unsigned long address, pte_t *ptep)
  807. {
  808. /* No need to flush TLB
  809. * On s390 reference bits are in storage key and never in TLB
  810. * With virtualization we handle the reference bit, without we
  811. * we can simply return */
  812. return ptep_test_and_clear_young(vma, address, ptep);
  813. }
  814. static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
  815. {
  816. if (!(pte_val(*ptep) & _PAGE_INVALID)) {
  817. #ifndef __s390x__
  818. /* pto must point to the start of the segment table */
  819. pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
  820. #else
  821. /* ipte in zarch mode can do the math */
  822. pte_t *pto = ptep;
  823. #endif
  824. asm volatile(
  825. " ipte %2,%3"
  826. : "=m" (*ptep) : "m" (*ptep),
  827. "a" (pto), "a" (address));
  828. }
  829. }
  830. /*
  831. * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
  832. * both clear the TLB for the unmapped pte. The reason is that
  833. * ptep_get_and_clear is used in common code (e.g. change_pte_range)
  834. * to modify an active pte. The sequence is
  835. * 1) ptep_get_and_clear
  836. * 2) set_pte_at
  837. * 3) flush_tlb_range
  838. * On s390 the tlb needs to get flushed with the modification of the pte
  839. * if the pte is active. The only way how this can be implemented is to
  840. * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
  841. * is a nop.
  842. */
  843. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  844. static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
  845. unsigned long address, pte_t *ptep)
  846. {
  847. pgste_t pgste;
  848. pte_t pte;
  849. mm->context.flush_mm = 1;
  850. if (mm_has_pgste(mm))
  851. pgste = pgste_get_lock(ptep);
  852. pte = *ptep;
  853. if (!mm_exclusive(mm))
  854. __ptep_ipte(address, ptep);
  855. pte_val(*ptep) = _PAGE_TYPE_EMPTY;
  856. if (mm_has_pgste(mm)) {
  857. pgste = pgste_update_all(&pte, pgste);
  858. pgste_set_unlock(ptep, pgste);
  859. }
  860. return pte;
  861. }
  862. #define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
  863. static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
  864. unsigned long address,
  865. pte_t *ptep)
  866. {
  867. pte_t pte;
  868. mm->context.flush_mm = 1;
  869. if (mm_has_pgste(mm))
  870. pgste_get_lock(ptep);
  871. pte = *ptep;
  872. if (!mm_exclusive(mm))
  873. __ptep_ipte(address, ptep);
  874. return pte;
  875. }
  876. static inline void ptep_modify_prot_commit(struct mm_struct *mm,
  877. unsigned long address,
  878. pte_t *ptep, pte_t pte)
  879. {
  880. *ptep = pte;
  881. if (mm_has_pgste(mm))
  882. pgste_set_unlock(ptep, *(pgste_t *)(ptep + PTRS_PER_PTE));
  883. }
  884. #define __HAVE_ARCH_PTEP_CLEAR_FLUSH
  885. static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
  886. unsigned long address, pte_t *ptep)
  887. {
  888. pgste_t pgste;
  889. pte_t pte;
  890. if (mm_has_pgste(vma->vm_mm))
  891. pgste = pgste_get_lock(ptep);
  892. pte = *ptep;
  893. __ptep_ipte(address, ptep);
  894. pte_val(*ptep) = _PAGE_TYPE_EMPTY;
  895. if (mm_has_pgste(vma->vm_mm)) {
  896. pgste = pgste_update_all(&pte, pgste);
  897. pgste_set_unlock(ptep, pgste);
  898. }
  899. return pte;
  900. }
  901. /*
  902. * The batched pte unmap code uses ptep_get_and_clear_full to clear the
  903. * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
  904. * tlbs of an mm if it can guarantee that the ptes of the mm_struct
  905. * cannot be accessed while the batched unmap is running. In this case
  906. * full==1 and a simple pte_clear is enough. See tlb.h.
  907. */
  908. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  909. static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  910. unsigned long address,
  911. pte_t *ptep, int full)
  912. {
  913. pgste_t pgste;
  914. pte_t pte;
  915. if (mm_has_pgste(mm))
  916. pgste = pgste_get_lock(ptep);
  917. pte = *ptep;
  918. if (!full)
  919. __ptep_ipte(address, ptep);
  920. pte_val(*ptep) = _PAGE_TYPE_EMPTY;
  921. if (mm_has_pgste(mm)) {
  922. pgste = pgste_update_all(&pte, pgste);
  923. pgste_set_unlock(ptep, pgste);
  924. }
  925. return pte;
  926. }
  927. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  928. static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
  929. unsigned long address, pte_t *ptep)
  930. {
  931. pgste_t pgste;
  932. pte_t pte = *ptep;
  933. if (pte_write(pte)) {
  934. mm->context.flush_mm = 1;
  935. if (mm_has_pgste(mm))
  936. pgste = pgste_get_lock(ptep);
  937. if (!mm_exclusive(mm))
  938. __ptep_ipte(address, ptep);
  939. *ptep = pte_wrprotect(pte);
  940. if (mm_has_pgste(mm))
  941. pgste_set_unlock(ptep, pgste);
  942. }
  943. return pte;
  944. }
  945. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  946. static inline int ptep_set_access_flags(struct vm_area_struct *vma,
  947. unsigned long address, pte_t *ptep,
  948. pte_t entry, int dirty)
  949. {
  950. pgste_t pgste;
  951. if (pte_same(*ptep, entry))
  952. return 0;
  953. if (mm_has_pgste(vma->vm_mm))
  954. pgste = pgste_get_lock(ptep);
  955. __ptep_ipte(address, ptep);
  956. *ptep = entry;
  957. if (mm_has_pgste(vma->vm_mm))
  958. pgste_set_unlock(ptep, pgste);
  959. return 1;
  960. }
  961. /*
  962. * Conversion functions: convert a page and protection to a page entry,
  963. * and a page entry and page directory to the page they refer to.
  964. */
  965. static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
  966. {
  967. pte_t __pte;
  968. pte_val(__pte) = physpage + pgprot_val(pgprot);
  969. return __pte;
  970. }
  971. static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
  972. {
  973. unsigned long physpage = page_to_phys(page);
  974. return mk_pte_phys(physpage, pgprot);
  975. }
  976. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
  977. #define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
  978. #define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
  979. #define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
  980. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
  981. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  982. #ifndef __s390x__
  983. #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
  984. #define pud_deref(pmd) ({ BUG(); 0UL; })
  985. #define pgd_deref(pmd) ({ BUG(); 0UL; })
  986. #define pud_offset(pgd, address) ((pud_t *) pgd)
  987. #define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
  988. #else /* __s390x__ */
  989. #define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
  990. #define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
  991. #define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
  992. static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
  993. {
  994. pud_t *pud = (pud_t *) pgd;
  995. if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
  996. pud = (pud_t *) pgd_deref(*pgd);
  997. return pud + pud_index(address);
  998. }
  999. static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
  1000. {
  1001. pmd_t *pmd = (pmd_t *) pud;
  1002. if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
  1003. pmd = (pmd_t *) pud_deref(*pud);
  1004. return pmd + pmd_index(address);
  1005. }
  1006. #endif /* __s390x__ */
  1007. #define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
  1008. #define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
  1009. #define pte_page(x) pfn_to_page(pte_pfn(x))
  1010. #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
  1011. /* Find an entry in the lowest level page table.. */
  1012. #define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
  1013. #define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
  1014. #define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
  1015. #define pte_unmap(pte) do { } while (0)
  1016. /*
  1017. * 31 bit swap entry format:
  1018. * A page-table entry has some bits we have to treat in a special way.
  1019. * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
  1020. * exception will occur instead of a page translation exception. The
  1021. * specifiation exception has the bad habit not to store necessary
  1022. * information in the lowcore.
  1023. * Bit 21 and bit 22 are the page invalid bit and the page protection
  1024. * bit. We set both to indicate a swapped page.
  1025. * Bit 30 and 31 are used to distinguish the different page types. For
  1026. * a swapped page these bits need to be zero.
  1027. * This leaves the bits 1-19 and bits 24-29 to store type and offset.
  1028. * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
  1029. * plus 24 for the offset.
  1030. * 0| offset |0110|o|type |00|
  1031. * 0 0000000001111111111 2222 2 22222 33
  1032. * 0 1234567890123456789 0123 4 56789 01
  1033. *
  1034. * 64 bit swap entry format:
  1035. * A page-table entry has some bits we have to treat in a special way.
  1036. * Bits 52 and bit 55 have to be zero, otherwise an specification
  1037. * exception will occur instead of a page translation exception. The
  1038. * specifiation exception has the bad habit not to store necessary
  1039. * information in the lowcore.
  1040. * Bit 53 and bit 54 are the page invalid bit and the page protection
  1041. * bit. We set both to indicate a swapped page.
  1042. * Bit 62 and 63 are used to distinguish the different page types. For
  1043. * a swapped page these bits need to be zero.
  1044. * This leaves the bits 0-51 and bits 56-61 to store type and offset.
  1045. * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
  1046. * plus 56 for the offset.
  1047. * | offset |0110|o|type |00|
  1048. * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
  1049. * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
  1050. */
  1051. #ifndef __s390x__
  1052. #define __SWP_OFFSET_MASK (~0UL >> 12)
  1053. #else
  1054. #define __SWP_OFFSET_MASK (~0UL >> 11)
  1055. #endif
  1056. static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
  1057. {
  1058. pte_t pte;
  1059. offset &= __SWP_OFFSET_MASK;
  1060. pte_val(pte) = _PAGE_TYPE_SWAP | ((type & 0x1f) << 2) |
  1061. ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
  1062. return pte;
  1063. }
  1064. #define __swp_type(entry) (((entry).val >> 2) & 0x1f)
  1065. #define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
  1066. #define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
  1067. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  1068. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  1069. #ifndef __s390x__
  1070. # define PTE_FILE_MAX_BITS 26
  1071. #else /* __s390x__ */
  1072. # define PTE_FILE_MAX_BITS 59
  1073. #endif /* __s390x__ */
  1074. #define pte_to_pgoff(__pte) \
  1075. ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
  1076. #define pgoff_to_pte(__off) \
  1077. ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
  1078. | _PAGE_TYPE_FILE })
  1079. #endif /* !__ASSEMBLY__ */
  1080. #define kern_addr_valid(addr) (1)
  1081. extern int vmem_add_mapping(unsigned long start, unsigned long size);
  1082. extern int vmem_remove_mapping(unsigned long start, unsigned long size);
  1083. extern int s390_enable_sie(void);
  1084. /*
  1085. * No page table caches to initialise
  1086. */
  1087. #define pgtable_cache_init() do { } while (0)
  1088. #include <asm-generic/pgtable.h>
  1089. #endif /* _S390_PAGE_H */