cbe_regs.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162
  1. /*
  2. * cbe_regs.c
  3. *
  4. * Accessor routines for the various MMIO register blocks of the CBE
  5. *
  6. * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
  7. */
  8. #include <linux/percpu.h>
  9. #include <linux/types.h>
  10. #include <linux/module.h>
  11. #include <asm/io.h>
  12. #include <asm/pgtable.h>
  13. #include <asm/prom.h>
  14. #include <asm/ptrace.h>
  15. #include "cbe_regs.h"
  16. /*
  17. * Current implementation uses "cpu" nodes. We build our own mapping
  18. * array of cpu numbers to cpu nodes locally for now to allow interrupt
  19. * time code to have a fast path rather than call of_get_cpu_node(). If
  20. * we implement cpu hotplug, we'll have to install an appropriate norifier
  21. * in order to release references to the cpu going away
  22. */
  23. static struct cbe_regs_map
  24. {
  25. struct device_node *cpu_node;
  26. struct cbe_pmd_regs __iomem *pmd_regs;
  27. struct cbe_iic_regs __iomem *iic_regs;
  28. struct cbe_mic_tm_regs __iomem *mic_tm_regs;
  29. } cbe_regs_maps[MAX_CBE];
  30. static int cbe_regs_map_count;
  31. static struct cbe_thread_map
  32. {
  33. struct device_node *cpu_node;
  34. struct cbe_regs_map *regs;
  35. } cbe_thread_map[NR_CPUS];
  36. static struct cbe_regs_map *cbe_find_map(struct device_node *np)
  37. {
  38. int i;
  39. struct device_node *tmp_np;
  40. if (strcasecmp(np->type, "spe") == 0) {
  41. if (np->data == NULL) {
  42. /* walk up path until cpu node was found */
  43. tmp_np = np->parent;
  44. while (tmp_np != NULL && strcasecmp(tmp_np->type, "cpu") != 0)
  45. tmp_np = tmp_np->parent;
  46. np->data = cbe_find_map(tmp_np);
  47. }
  48. return np->data;
  49. }
  50. for (i = 0; i < cbe_regs_map_count; i++)
  51. if (cbe_regs_maps[i].cpu_node == np)
  52. return &cbe_regs_maps[i];
  53. return NULL;
  54. }
  55. struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
  56. {
  57. struct cbe_regs_map *map = cbe_find_map(np);
  58. if (map == NULL)
  59. return NULL;
  60. return map->pmd_regs;
  61. }
  62. EXPORT_SYMBOL_GPL(cbe_get_pmd_regs);
  63. struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
  64. {
  65. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  66. if (map == NULL)
  67. return NULL;
  68. return map->pmd_regs;
  69. }
  70. EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs);
  71. struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
  72. {
  73. struct cbe_regs_map *map = cbe_find_map(np);
  74. if (map == NULL)
  75. return NULL;
  76. return map->iic_regs;
  77. }
  78. struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
  79. {
  80. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  81. if (map == NULL)
  82. return NULL;
  83. return map->iic_regs;
  84. }
  85. struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np)
  86. {
  87. struct cbe_regs_map *map = cbe_find_map(np);
  88. if (map == NULL)
  89. return NULL;
  90. return map->mic_tm_regs;
  91. }
  92. struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
  93. {
  94. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  95. if (map == NULL)
  96. return NULL;
  97. return map->mic_tm_regs;
  98. }
  99. EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);
  100. void __init cbe_regs_init(void)
  101. {
  102. int i;
  103. struct device_node *cpu;
  104. /* Build local fast map of CPUs */
  105. for_each_possible_cpu(i)
  106. cbe_thread_map[i].cpu_node = of_get_cpu_node(i, NULL);
  107. /* Find maps for each device tree CPU */
  108. for_each_node_by_type(cpu, "cpu") {
  109. struct cbe_regs_map *map = &cbe_regs_maps[cbe_regs_map_count++];
  110. /* That hack must die die die ! */
  111. const struct address_prop {
  112. unsigned long address;
  113. unsigned int len;
  114. } __attribute__((packed)) *prop;
  115. if (cbe_regs_map_count > MAX_CBE) {
  116. printk(KERN_ERR "cbe_regs: More BE chips than supported"
  117. "!\n");
  118. cbe_regs_map_count--;
  119. return;
  120. }
  121. map->cpu_node = cpu;
  122. for_each_possible_cpu(i)
  123. if (cbe_thread_map[i].cpu_node == cpu)
  124. cbe_thread_map[i].regs = map;
  125. prop = get_property(cpu, "pervasive", NULL);
  126. if (prop != NULL)
  127. map->pmd_regs = ioremap(prop->address, prop->len);
  128. prop = get_property(cpu, "iic", NULL);
  129. if (prop != NULL)
  130. map->iic_regs = ioremap(prop->address, prop->len);
  131. prop = (struct address_prop *)get_property(cpu, "mic-tm",
  132. NULL);
  133. if (prop != NULL)
  134. map->mic_tm_regs = ioremap(prop->address, prop->len);
  135. }
  136. }