highmem.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114
  1. /* MN10300 Virtual kernel memory mappings for high memory
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. * - Derived from include/asm-i386/highmem.h
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public Licence
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the Licence, or (at your option) any later version.
  11. */
  12. #ifndef _ASM_HIGHMEM_H
  13. #define _ASM_HIGHMEM_H
  14. #ifdef __KERNEL__
  15. #include <linux/init.h>
  16. #include <linux/interrupt.h>
  17. #include <asm/kmap_types.h>
  18. #include <asm/pgtable.h>
  19. /* undef for production */
  20. #undef HIGHMEM_DEBUG
  21. /* declarations for highmem.c */
  22. extern unsigned long highstart_pfn, highend_pfn;
  23. extern pte_t *kmap_pte;
  24. extern pgprot_t kmap_prot;
  25. extern pte_t *pkmap_page_table;
  26. extern void __init kmap_init(void);
  27. /*
  28. * Right now we initialize only a single pte table. It can be extended
  29. * easily, subsequent pte tables have to be allocated in one physical
  30. * chunk of RAM.
  31. */
  32. #define PKMAP_BASE 0xfe000000UL
  33. #define LAST_PKMAP 1024
  34. #define LAST_PKMAP_MASK (LAST_PKMAP - 1)
  35. #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
  36. #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
  37. extern unsigned long kmap_high(struct page *page);
  38. extern void kunmap_high(struct page *page);
  39. static inline unsigned long kmap(struct page *page)
  40. {
  41. if (in_interrupt())
  42. BUG();
  43. if (page < highmem_start_page)
  44. return page_address(page);
  45. return kmap_high(page);
  46. }
  47. static inline void kunmap(struct page *page)
  48. {
  49. if (in_interrupt())
  50. BUG();
  51. if (page < highmem_start_page)
  52. return;
  53. kunmap_high(page);
  54. }
  55. /*
  56. * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  57. * gives a more generic (and caching) interface. But kmap_atomic can
  58. * be used in IRQ contexts, so in some (very limited) cases we need
  59. * it.
  60. */
  61. static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
  62. {
  63. enum fixed_addresses idx;
  64. unsigned long vaddr;
  65. if (page < highmem_start_page)
  66. return page_address(page);
  67. idx = type + KM_TYPE_NR * smp_processor_id();
  68. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  69. #if HIGHMEM_DEBUG
  70. if (!pte_none(*(kmap_pte - idx)))
  71. BUG();
  72. #endif
  73. set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
  74. __flush_tlb_one(vaddr);
  75. return vaddr;
  76. }
  77. static inline void kunmap_atomic(unsigned long vaddr, enum km_type type)
  78. {
  79. #if HIGHMEM_DEBUG
  80. enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id();
  81. if (vaddr < FIXADDR_START) /* FIXME */
  82. return;
  83. if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
  84. BUG();
  85. /*
  86. * force other mappings to Oops if they'll try to access
  87. * this pte without first remap it
  88. */
  89. pte_clear(kmap_pte - idx);
  90. __flush_tlb_one(vaddr);
  91. #endif
  92. }
  93. #endif /* __KERNEL__ */
  94. #endif /* _ASM_HIGHMEM_H */