cbe_regs.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126
  1. /*
  2. * cbe_regs.c
  3. *
  4. * Accessor routines for the various MMIO register blocks of the CBE
  5. *
  6. * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
  7. */
  8. #include <linux/config.h>
  9. #include <linux/percpu.h>
  10. #include <linux/types.h>
  11. #include <asm/io.h>
  12. #include <asm/pgtable.h>
  13. #include <asm/prom.h>
  14. #include <asm/ptrace.h>
  15. #include "cbe_regs.h"
  16. #define MAX_CBE 2
  17. /*
  18. * Current implementation uses "cpu" nodes. We build our own mapping
  19. * array of cpu numbers to cpu nodes locally for now to allow interrupt
  20. * time code to have a fast path rather than call of_get_cpu_node(). If
  21. * we implement cpu hotplug, we'll have to install an appropriate norifier
  22. * in order to release references to the cpu going away
  23. */
  24. static struct cbe_regs_map
  25. {
  26. struct device_node *cpu_node;
  27. struct cbe_pmd_regs __iomem *pmd_regs;
  28. struct cbe_iic_regs __iomem *iic_regs;
  29. } cbe_regs_maps[MAX_CBE];
  30. static int cbe_regs_map_count;
  31. static struct cbe_thread_map
  32. {
  33. struct device_node *cpu_node;
  34. struct cbe_regs_map *regs;
  35. } cbe_thread_map[NR_CPUS];
  36. static struct cbe_regs_map *cbe_find_map(struct device_node *np)
  37. {
  38. int i;
  39. for (i = 0; i < cbe_regs_map_count; i++)
  40. if (cbe_regs_maps[i].cpu_node == np)
  41. return &cbe_regs_maps[i];
  42. return NULL;
  43. }
  44. struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
  45. {
  46. struct cbe_regs_map *map = cbe_find_map(np);
  47. if (map == NULL)
  48. return NULL;
  49. return map->pmd_regs;
  50. }
  51. struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
  52. {
  53. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  54. if (map == NULL)
  55. return NULL;
  56. return map->pmd_regs;
  57. }
  58. struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
  59. {
  60. struct cbe_regs_map *map = cbe_find_map(np);
  61. if (map == NULL)
  62. return NULL;
  63. return map->iic_regs;
  64. }
  65. struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
  66. {
  67. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  68. if (map == NULL)
  69. return NULL;
  70. return map->iic_regs;
  71. }
  72. void __init cbe_regs_init(void)
  73. {
  74. int i;
  75. struct device_node *cpu;
  76. /* Build local fast map of CPUs */
  77. for_each_possible_cpu(i)
  78. cbe_thread_map[i].cpu_node = of_get_cpu_node(i, NULL);
  79. /* Find maps for each device tree CPU */
  80. for_each_node_by_type(cpu, "cpu") {
  81. struct cbe_regs_map *map = &cbe_regs_maps[cbe_regs_map_count++];
  82. /* That hack must die die die ! */
  83. const struct address_prop {
  84. unsigned long address;
  85. unsigned int len;
  86. } __attribute__((packed)) *prop;
  87. if (cbe_regs_map_count > MAX_CBE) {
  88. printk(KERN_ERR "cbe_regs: More BE chips than supported"
  89. "!\n");
  90. cbe_regs_map_count--;
  91. return;
  92. }
  93. map->cpu_node = cpu;
  94. for_each_possible_cpu(i)
  95. if (cbe_thread_map[i].cpu_node == cpu)
  96. cbe_thread_map[i].regs = map;
  97. prop = get_property(cpu, "pervasive", NULL);
  98. if (prop != NULL)
  99. map->pmd_regs = ioremap(prop->address, prop->len);
  100. prop = get_property(cpu, "iic", NULL);
  101. if (prop != NULL)
  102. map->iic_regs = ioremap(prop->address, prop->len);
  103. }
  104. }