cbe_regs.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. /*
  2. * cbe_regs.c
  3. *
  4. * Accessor routines for the various MMIO register blocks of the CBE
  5. *
  6. * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
  7. */
  8. #include <linux/percpu.h>
  9. #include <linux/types.h>
  10. #include <linux/module.h>
  11. #include <asm/io.h>
  12. #include <asm/pgtable.h>
  13. #include <asm/prom.h>
  14. #include <asm/ptrace.h>
  15. #include "cbe_regs.h"
  16. /*
  17. * Current implementation uses "cpu" nodes. We build our own mapping
  18. * array of cpu numbers to cpu nodes locally for now to allow interrupt
  19. * time code to have a fast path rather than call of_get_cpu_node(). If
  20. * we implement cpu hotplug, we'll have to install an appropriate norifier
  21. * in order to release references to the cpu going away
  22. */
  23. static struct cbe_regs_map
  24. {
  25. struct device_node *cpu_node;
  26. struct cbe_pmd_regs __iomem *pmd_regs;
  27. struct cbe_iic_regs __iomem *iic_regs;
  28. struct cbe_mic_tm_regs __iomem *mic_tm_regs;
  29. struct cbe_pmd_shadow_regs pmd_shadow_regs;
  30. } cbe_regs_maps[MAX_CBE];
  31. static int cbe_regs_map_count;
  32. static struct cbe_thread_map
  33. {
  34. struct device_node *cpu_node;
  35. struct cbe_regs_map *regs;
  36. } cbe_thread_map[NR_CPUS];
  37. static struct cbe_regs_map *cbe_find_map(struct device_node *np)
  38. {
  39. int i;
  40. struct device_node *tmp_np;
  41. if (strcasecmp(np->type, "spe") == 0) {
  42. if (np->data == NULL) {
  43. /* walk up path until cpu node was found */
  44. tmp_np = np->parent;
  45. while (tmp_np != NULL && strcasecmp(tmp_np->type, "cpu") != 0)
  46. tmp_np = tmp_np->parent;
  47. np->data = cbe_find_map(tmp_np);
  48. }
  49. return np->data;
  50. }
  51. for (i = 0; i < cbe_regs_map_count; i++)
  52. if (cbe_regs_maps[i].cpu_node == np)
  53. return &cbe_regs_maps[i];
  54. return NULL;
  55. }
  56. struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
  57. {
  58. struct cbe_regs_map *map = cbe_find_map(np);
  59. if (map == NULL)
  60. return NULL;
  61. return map->pmd_regs;
  62. }
  63. EXPORT_SYMBOL_GPL(cbe_get_pmd_regs);
  64. struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
  65. {
  66. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  67. if (map == NULL)
  68. return NULL;
  69. return map->pmd_regs;
  70. }
  71. EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs);
  72. struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np)
  73. {
  74. struct cbe_regs_map *map = cbe_find_map(np);
  75. if (map == NULL)
  76. return NULL;
  77. return &map->pmd_shadow_regs;
  78. }
  79. struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu)
  80. {
  81. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  82. if (map == NULL)
  83. return NULL;
  84. return &map->pmd_shadow_regs;
  85. }
  86. struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
  87. {
  88. struct cbe_regs_map *map = cbe_find_map(np);
  89. if (map == NULL)
  90. return NULL;
  91. return map->iic_regs;
  92. }
  93. struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
  94. {
  95. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  96. if (map == NULL)
  97. return NULL;
  98. return map->iic_regs;
  99. }
  100. struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np)
  101. {
  102. struct cbe_regs_map *map = cbe_find_map(np);
  103. if (map == NULL)
  104. return NULL;
  105. return map->mic_tm_regs;
  106. }
  107. struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
  108. {
  109. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  110. if (map == NULL)
  111. return NULL;
  112. return map->mic_tm_regs;
  113. }
  114. EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);
  115. /* FIXME
  116. * This is little more than a stub at the moment. It should be
  117. * fleshed out so that it works for both SMT and non-SMT, no
  118. * matter if the passed cpu is odd or even.
  119. * For SMT enabled, returns 0 for even-numbered cpu; otherwise 1.
  120. * For SMT disabled, returns 0 for all cpus.
  121. */
  122. u32 cbe_get_hw_thread_id(int cpu)
  123. {
  124. return (cpu & 1);
  125. }
  126. EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id);
  127. void __init cbe_regs_init(void)
  128. {
  129. int i;
  130. struct device_node *cpu;
  131. /* Build local fast map of CPUs */
  132. for_each_possible_cpu(i)
  133. cbe_thread_map[i].cpu_node = of_get_cpu_node(i, NULL);
  134. /* Find maps for each device tree CPU */
  135. for_each_node_by_type(cpu, "cpu") {
  136. struct cbe_regs_map *map = &cbe_regs_maps[cbe_regs_map_count++];
  137. /* That hack must die die die ! */
  138. const struct address_prop {
  139. unsigned long address;
  140. unsigned int len;
  141. } __attribute__((packed)) *prop;
  142. if (cbe_regs_map_count > MAX_CBE) {
  143. printk(KERN_ERR "cbe_regs: More BE chips than supported"
  144. "!\n");
  145. cbe_regs_map_count--;
  146. return;
  147. }
  148. map->cpu_node = cpu;
  149. for_each_possible_cpu(i)
  150. if (cbe_thread_map[i].cpu_node == cpu)
  151. cbe_thread_map[i].regs = map;
  152. prop = get_property(cpu, "pervasive", NULL);
  153. if (prop != NULL)
  154. map->pmd_regs = ioremap(prop->address, prop->len);
  155. prop = get_property(cpu, "iic", NULL);
  156. if (prop != NULL)
  157. map->iic_regs = ioremap(prop->address, prop->len);
  158. prop = (struct address_prop *)get_property(cpu, "mic-tm",
  159. NULL);
  160. if (prop != NULL)
  161. map->mic_tm_regs = ioremap(prop->address, prop->len);
  162. }
  163. }