cbe_regs.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. /*
  2. * cbe_regs.c
  3. *
  4. * Accessor routines for the various MMIO register blocks of the CBE
  5. *
  6. * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
  7. */
  8. #include <linux/percpu.h>
  9. #include <linux/types.h>
  10. #include <linux/module.h>
  11. #include <asm/io.h>
  12. #include <asm/pgtable.h>
  13. #include <asm/prom.h>
  14. #include <asm/ptrace.h>
  15. #include "cbe_regs.h"
  16. /*
  17. * Current implementation uses "cpu" nodes. We build our own mapping
  18. * array of cpu numbers to cpu nodes locally for now to allow interrupt
  19. * time code to have a fast path rather than call of_get_cpu_node(). If
  20. * we implement cpu hotplug, we'll have to install an appropriate norifier
  21. * in order to release references to the cpu going away
  22. */
  23. static struct cbe_regs_map
  24. {
  25. struct device_node *cpu_node;
  26. struct cbe_pmd_regs __iomem *pmd_regs;
  27. struct cbe_iic_regs __iomem *iic_regs;
  28. struct cbe_mic_tm_regs __iomem *mic_tm_regs;
  29. struct cbe_pmd_shadow_regs pmd_shadow_regs;
  30. } cbe_regs_maps[MAX_CBE];
  31. static int cbe_regs_map_count;
  32. static struct cbe_thread_map
  33. {
  34. struct device_node *cpu_node;
  35. struct cbe_regs_map *regs;
  36. unsigned int thread_id;
  37. unsigned int cbe_id;
  38. } cbe_thread_map[NR_CPUS];
  39. static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = CPU_MASK_NONE };
  40. static cpumask_t cbe_first_online_cpu = CPU_MASK_NONE;
  41. static struct cbe_regs_map *cbe_find_map(struct device_node *np)
  42. {
  43. int i;
  44. struct device_node *tmp_np;
  45. if (strcasecmp(np->type, "spe") == 0) {
  46. if (np->data == NULL) {
  47. /* walk up path until cpu node was found */
  48. tmp_np = np->parent;
  49. while (tmp_np != NULL && strcasecmp(tmp_np->type, "cpu") != 0)
  50. tmp_np = tmp_np->parent;
  51. np->data = cbe_find_map(tmp_np);
  52. }
  53. return np->data;
  54. }
  55. for (i = 0; i < cbe_regs_map_count; i++)
  56. if (cbe_regs_maps[i].cpu_node == np)
  57. return &cbe_regs_maps[i];
  58. return NULL;
  59. }
  60. struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
  61. {
  62. struct cbe_regs_map *map = cbe_find_map(np);
  63. if (map == NULL)
  64. return NULL;
  65. return map->pmd_regs;
  66. }
  67. EXPORT_SYMBOL_GPL(cbe_get_pmd_regs);
  68. struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
  69. {
  70. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  71. if (map == NULL)
  72. return NULL;
  73. return map->pmd_regs;
  74. }
  75. EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs);
  76. struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np)
  77. {
  78. struct cbe_regs_map *map = cbe_find_map(np);
  79. if (map == NULL)
  80. return NULL;
  81. return &map->pmd_shadow_regs;
  82. }
  83. struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu)
  84. {
  85. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  86. if (map == NULL)
  87. return NULL;
  88. return &map->pmd_shadow_regs;
  89. }
  90. struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
  91. {
  92. struct cbe_regs_map *map = cbe_find_map(np);
  93. if (map == NULL)
  94. return NULL;
  95. return map->iic_regs;
  96. }
  97. struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
  98. {
  99. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  100. if (map == NULL)
  101. return NULL;
  102. return map->iic_regs;
  103. }
  104. struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np)
  105. {
  106. struct cbe_regs_map *map = cbe_find_map(np);
  107. if (map == NULL)
  108. return NULL;
  109. return map->mic_tm_regs;
  110. }
  111. struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
  112. {
  113. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  114. if (map == NULL)
  115. return NULL;
  116. return map->mic_tm_regs;
  117. }
  118. EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);
  119. u32 cbe_get_hw_thread_id(int cpu)
  120. {
  121. return cbe_thread_map[cpu].thread_id;
  122. }
  123. EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id);
  124. u32 cbe_cpu_to_node(int cpu)
  125. {
  126. return cbe_thread_map[cpu].cbe_id;
  127. }
  128. EXPORT_SYMBOL_GPL(cbe_cpu_to_node);
  129. u32 cbe_node_to_cpu(int node)
  130. {
  131. return find_first_bit( (unsigned long *) &cbe_local_mask[node], sizeof(cpumask_t));
  132. }
  133. EXPORT_SYMBOL_GPL(cbe_node_to_cpu);
  134. void __init cbe_regs_init(void)
  135. {
  136. int i;
  137. unsigned int thread_id;
  138. struct device_node *cpu;
  139. /* Build local fast map of CPUs */
  140. for_each_possible_cpu(i) {
  141. cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id);
  142. cbe_thread_map[i].thread_id = thread_id;
  143. }
  144. /* Find maps for each device tree CPU */
  145. for_each_node_by_type(cpu, "cpu") {
  146. struct cbe_regs_map *map;
  147. unsigned int cbe_id;
  148. /* That hack must die die die ! */
  149. const struct address_prop {
  150. unsigned long address;
  151. unsigned int len;
  152. } __attribute__((packed)) *prop;
  153. cbe_id = cbe_regs_map_count++;
  154. map = &cbe_regs_maps[cbe_id];
  155. if (cbe_regs_map_count > MAX_CBE) {
  156. printk(KERN_ERR "cbe_regs: More BE chips than supported"
  157. "!\n");
  158. cbe_regs_map_count--;
  159. return;
  160. }
  161. map->cpu_node = cpu;
  162. for_each_possible_cpu(i) {
  163. struct cbe_thread_map *thread = &cbe_thread_map[i];
  164. if (thread->cpu_node == cpu) {
  165. thread->regs = map;
  166. thread->cbe_id = cbe_id;
  167. cpu_set(i, cbe_local_mask[cbe_id]);
  168. if(thread->thread_id == 0)
  169. cpu_set(i, cbe_first_online_cpu);
  170. }
  171. }
  172. prop = of_get_property(cpu, "pervasive", NULL);
  173. if (prop != NULL)
  174. map->pmd_regs = ioremap(prop->address, prop->len);
  175. prop = of_get_property(cpu, "iic", NULL);
  176. if (prop != NULL)
  177. map->iic_regs = ioremap(prop->address, prop->len);
  178. prop = of_get_property(cpu, "mic-tm", NULL);
  179. if (prop != NULL)
  180. map->mic_tm_regs = ioremap(prop->address, prop->len);
  181. }
  182. }