highmem.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
  1. /* MN10300 Virtual kernel memory mappings for high memory
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. * - Derived from include/asm-i386/highmem.h
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public Licence
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the Licence, or (at your option) any later version.
  11. */
  12. #ifndef _ASM_HIGHMEM_H
  13. #define _ASM_HIGHMEM_H
  14. #ifdef __KERNEL__
  15. #include <linux/init.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/highmem.h>
  18. #include <asm/kmap_types.h>
  19. #include <asm/pgtable.h>
  20. /* undef for production */
  21. #undef HIGHMEM_DEBUG
  22. /* declarations for highmem.c */
  23. extern unsigned long highstart_pfn, highend_pfn;
  24. extern pte_t *kmap_pte;
  25. extern pgprot_t kmap_prot;
  26. extern pte_t *pkmap_page_table;
  27. extern void __init kmap_init(void);
  28. /*
  29. * Right now we initialize only a single pte table. It can be extended
  30. * easily, subsequent pte tables have to be allocated in one physical
  31. * chunk of RAM.
  32. */
  33. #define PKMAP_BASE 0xfe000000UL
  34. #define LAST_PKMAP 1024
  35. #define LAST_PKMAP_MASK (LAST_PKMAP - 1)
  36. #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT)
  37. #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
  38. extern unsigned long kmap_high(struct page *page);
  39. extern void kunmap_high(struct page *page);
  40. static inline unsigned long kmap(struct page *page)
  41. {
  42. if (in_interrupt())
  43. BUG();
  44. if (page < highmem_start_page)
  45. return page_address(page);
  46. return kmap_high(page);
  47. }
  48. static inline void kunmap(struct page *page)
  49. {
  50. if (in_interrupt())
  51. BUG();
  52. if (page < highmem_start_page)
  53. return;
  54. kunmap_high(page);
  55. }
  56. /*
  57. * The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
  58. * gives a more generic (and caching) interface. But kmap_atomic can
  59. * be used in IRQ contexts, so in some (very limited) cases we need
  60. * it.
  61. */
  62. static inline unsigned long kmap_atomic(struct page *page, enum km_type type)
  63. {
  64. enum fixed_addresses idx;
  65. unsigned long vaddr;
  66. if (page < highmem_start_page)
  67. return page_address(page);
  68. debug_kmap_atomic(type);
  69. idx = type + KM_TYPE_NR * smp_processor_id();
  70. vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
  71. #if HIGHMEM_DEBUG
  72. if (!pte_none(*(kmap_pte - idx)))
  73. BUG();
  74. #endif
  75. set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
  76. __flush_tlb_one(vaddr);
  77. return vaddr;
  78. }
  79. static inline void kunmap_atomic(unsigned long vaddr, enum km_type type)
  80. {
  81. #if HIGHMEM_DEBUG
  82. enum fixed_addresses idx = type + KM_TYPE_NR * smp_processor_id();
  83. if (vaddr < FIXADDR_START) /* FIXME */
  84. return;
  85. if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx))
  86. BUG();
  87. /*
  88. * force other mappings to Oops if they'll try to access
  89. * this pte without first remap it
  90. */
  91. pte_clear(kmap_pte - idx);
  92. __flush_tlb_one(vaddr);
  93. #endif
  94. }
  95. #endif /* __KERNEL__ */
  96. #endif /* _ASM_HIGHMEM_H */