cbe_regs.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. /*
  2. * cbe_regs.c
  3. *
  4. * Accessor routines for the various MMIO register blocks of the CBE
  5. *
  6. * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
  7. */
  8. #include <linux/percpu.h>
  9. #include <linux/types.h>
  10. #include <asm/io.h>
  11. #include <asm/pgtable.h>
  12. #include <asm/prom.h>
  13. #include <asm/ptrace.h>
  14. #include "cbe_regs.h"
  15. #define MAX_CBE 2
  16. /*
  17. * Current implementation uses "cpu" nodes. We build our own mapping
  18. * array of cpu numbers to cpu nodes locally for now to allow interrupt
  19. * time code to have a fast path rather than call of_get_cpu_node(). If
  20. * we implement cpu hotplug, we'll have to install an appropriate norifier
  21. * in order to release references to the cpu going away
  22. */
  23. static struct cbe_regs_map
  24. {
  25. struct device_node *cpu_node;
  26. struct cbe_pmd_regs __iomem *pmd_regs;
  27. struct cbe_iic_regs __iomem *iic_regs;
  28. } cbe_regs_maps[MAX_CBE];
  29. static int cbe_regs_map_count;
  30. static struct cbe_thread_map
  31. {
  32. struct device_node *cpu_node;
  33. struct cbe_regs_map *regs;
  34. } cbe_thread_map[NR_CPUS];
  35. static struct cbe_regs_map *cbe_find_map(struct device_node *np)
  36. {
  37. int i;
  38. for (i = 0; i < cbe_regs_map_count; i++)
  39. if (cbe_regs_maps[i].cpu_node == np)
  40. return &cbe_regs_maps[i];
  41. return NULL;
  42. }
  43. struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
  44. {
  45. struct cbe_regs_map *map = cbe_find_map(np);
  46. if (map == NULL)
  47. return NULL;
  48. return map->pmd_regs;
  49. }
  50. struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
  51. {
  52. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  53. if (map == NULL)
  54. return NULL;
  55. return map->pmd_regs;
  56. }
  57. struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
  58. {
  59. struct cbe_regs_map *map = cbe_find_map(np);
  60. if (map == NULL)
  61. return NULL;
  62. return map->iic_regs;
  63. }
  64. struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
  65. {
  66. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  67. if (map == NULL)
  68. return NULL;
  69. return map->iic_regs;
  70. }
  71. void __init cbe_regs_init(void)
  72. {
  73. int i;
  74. struct device_node *cpu;
  75. /* Build local fast map of CPUs */
  76. for_each_possible_cpu(i)
  77. cbe_thread_map[i].cpu_node = of_get_cpu_node(i, NULL);
  78. /* Find maps for each device tree CPU */
  79. for_each_node_by_type(cpu, "cpu") {
  80. struct cbe_regs_map *map = &cbe_regs_maps[cbe_regs_map_count++];
  81. /* That hack must die die die ! */
  82. const struct address_prop {
  83. unsigned long address;
  84. unsigned int len;
  85. } __attribute__((packed)) *prop;
  86. if (cbe_regs_map_count > MAX_CBE) {
  87. printk(KERN_ERR "cbe_regs: More BE chips than supported"
  88. "!\n");
  89. cbe_regs_map_count--;
  90. return;
  91. }
  92. map->cpu_node = cpu;
  93. for_each_possible_cpu(i)
  94. if (cbe_thread_map[i].cpu_node == cpu)
  95. cbe_thread_map[i].regs = map;
  96. prop = get_property(cpu, "pervasive", NULL);
  97. if (prop != NULL)
  98. map->pmd_regs = ioremap(prop->address, prop->len);
  99. prop = get_property(cpu, "iic", NULL);
  100. if (prop != NULL)
  101. map->iic_regs = ioremap(prop->address, prop->len);
  102. }
  103. }