mem_detect.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /*
  2. * Copyright IBM Corp. 2008, 2009
  3. *
  4. * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
  5. */
  6. #include <linux/kernel.h>
  7. #include <linux/module.h>
  8. #include <asm/ipl.h>
  9. #include <asm/sclp.h>
  10. #include <asm/setup.h>
  11. #define ADDR2G (1ULL << 31)
  12. static void find_memory_chunks(struct mem_chunk chunk[])
  13. {
  14. unsigned long long memsize, rnmax, rzm;
  15. unsigned long addr = 0, size;
  16. int i = 0, type;
  17. rzm = sclp_get_rzm();
  18. rnmax = sclp_get_rnmax();
  19. memsize = rzm * rnmax;
  20. if (!rzm)
  21. rzm = 1ULL << 17;
  22. if (sizeof(long) == 4) {
  23. rzm = min(ADDR2G, rzm);
  24. memsize = memsize ? min(ADDR2G, memsize) : ADDR2G;
  25. }
  26. do {
  27. size = 0;
  28. type = tprot(addr);
  29. do {
  30. size += rzm;
  31. if (memsize && addr + size >= memsize)
  32. break;
  33. } while (type == tprot(addr + size));
  34. if (type == CHUNK_READ_WRITE || type == CHUNK_READ_ONLY) {
  35. chunk[i].addr = addr;
  36. chunk[i].size = size;
  37. chunk[i].type = type;
  38. i++;
  39. }
  40. addr += size;
  41. } while (addr < memsize && i < MEMORY_CHUNKS);
  42. }
  43. void detect_memory_layout(struct mem_chunk chunk[])
  44. {
  45. unsigned long flags, flags_dat, cr0;
  46. memset(chunk, 0, MEMORY_CHUNKS * sizeof(struct mem_chunk));
  47. /*
  48. * Disable IRQs, DAT and low address protection so tprot does the
  49. * right thing and we don't get scheduled away with low address
  50. * protection disabled.
  51. */
  52. local_irq_save(flags);
  53. flags_dat = __arch_local_irq_stnsm(0xfb);
  54. /*
  55. * In case DAT was enabled, make sure chunk doesn't reside in vmalloc
  56. * space. We have disabled DAT and any access to vmalloc area will
  57. * cause an exception.
  58. * If DAT was disabled we are called from early ipl code.
  59. */
  60. if (test_bit(5, &flags_dat)) {
  61. if (WARN_ON_ONCE(is_vmalloc_or_module_addr(chunk)))
  62. goto out;
  63. }
  64. __ctl_store(cr0, 0, 0);
  65. __ctl_clear_bit(0, 28);
  66. find_memory_chunks(chunk);
  67. __ctl_load(cr0, 0, 0);
  68. out:
  69. __arch_local_irq_ssm(flags_dat);
  70. local_irq_restore(flags);
  71. }
  72. EXPORT_SYMBOL(detect_memory_layout);
  73. /*
  74. * Move memory chunks array from index "from" to index "to"
  75. */
  76. static void mem_chunk_move(struct mem_chunk chunk[], int to, int from)
  77. {
  78. int cnt = MEMORY_CHUNKS - to;
  79. memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk));
  80. }
  81. /*
  82. * Initialize memory chunk
  83. */
  84. static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr,
  85. unsigned long size, int type)
  86. {
  87. chunk->type = type;
  88. chunk->addr = addr;
  89. chunk->size = size;
  90. }
  91. /*
  92. * Create memory hole with given address, size, and type
  93. */
  94. void create_mem_hole(struct mem_chunk chunk[], unsigned long addr,
  95. unsigned long size, int type)
  96. {
  97. unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size;
  98. int i, ch_type;
  99. for (i = 0; i < MEMORY_CHUNKS; i++) {
  100. if (chunk[i].size == 0)
  101. continue;
  102. /* Define chunk properties */
  103. ch_start = chunk[i].addr;
  104. ch_size = chunk[i].size;
  105. ch_end = ch_start + ch_size - 1;
  106. ch_type = chunk[i].type;
  107. /* Is memory chunk hit by memory hole? */
  108. if (addr + size <= ch_start)
  109. continue; /* No: memory hole in front of chunk */
  110. if (addr > ch_end)
  111. continue; /* No: memory hole after chunk */
  112. /* Yes: Define local hole properties */
  113. lh_start = max(addr, chunk[i].addr);
  114. lh_end = min(addr + size - 1, ch_end);
  115. lh_size = lh_end - lh_start + 1;
  116. if (lh_start == ch_start && lh_end == ch_end) {
  117. /* Hole covers complete memory chunk */
  118. mem_chunk_init(&chunk[i], lh_start, lh_size, type);
  119. } else if (lh_end == ch_end) {
  120. /* Hole starts in memory chunk and convers chunk end */
  121. mem_chunk_move(chunk, i + 1, i);
  122. mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size,
  123. ch_type);
  124. mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
  125. i += 1;
  126. } else if (lh_start == ch_start) {
  127. /* Hole ends in memory chunk */
  128. mem_chunk_move(chunk, i + 1, i);
  129. mem_chunk_init(&chunk[i], lh_start, lh_size, type);
  130. mem_chunk_init(&chunk[i + 1], lh_end + 1,
  131. ch_size - lh_size, ch_type);
  132. break;
  133. } else {
  134. /* Hole splits memory chunk */
  135. mem_chunk_move(chunk, i + 2, i);
  136. mem_chunk_init(&chunk[i], ch_start,
  137. lh_start - ch_start, ch_type);
  138. mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type);
  139. mem_chunk_init(&chunk[i + 2], lh_end + 1,
  140. ch_end - lh_end, ch_type);
  141. break;
  142. }
  143. }
  144. }