coherency.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156
  1. /*
  2. * Coherency fabric (Aurora) support for Armada 370 and XP platforms.
  3. *
  4. * Copyright (C) 2012 Marvell
  5. *
  6. * Yehuda Yitschak <yehuday@marvell.com>
  7. * Gregory Clement <gregory.clement@free-electrons.com>
  8. * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
  9. *
  10. * This file is licensed under the terms of the GNU General Public
  11. * License version 2. This program is licensed "as is" without any
  12. * warranty of any kind, whether express or implied.
  13. *
  14. * The Armada 370 and Armada XP SOCs have a coherency fabric which is
  15. * responsible for ensuring hardware coherency between all CPUs and between
  16. * CPUs and I/O masters. This file initializes the coherency fabric and
  17. * supplies basic routines for configuring and controlling hardware coherency
  18. */
  19. #include <linux/kernel.h>
  20. #include <linux/init.h>
  21. #include <linux/of_address.h>
  22. #include <linux/io.h>
  23. #include <linux/smp.h>
  24. #include <linux/dma-mapping.h>
  25. #include <linux/platform_device.h>
  26. #include <asm/smp_plat.h>
  27. #include <asm/cacheflush.h>
  28. #include "armada-370-xp.h"
  29. unsigned long coherency_phys_base;
  30. static void __iomem *coherency_base;
  31. static void __iomem *coherency_cpu_base;
  32. /* Coherency fabric registers */
  33. #define COHERENCY_FABRIC_CFG_OFFSET 0x4
  34. #define IO_SYNC_BARRIER_CTL_OFFSET 0x0
  35. static struct of_device_id of_coherency_table[] = {
  36. {.compatible = "marvell,coherency-fabric"},
  37. { /* end of list */ },
  38. };
  39. /* Function defined in coherency_ll.S */
  40. int ll_set_cpu_coherent(void __iomem *base_addr, unsigned int hw_cpu_id);
  41. int set_cpu_coherent(unsigned int hw_cpu_id, int smp_group_id)
  42. {
  43. if (!coherency_base) {
  44. pr_warn("Can't make CPU %d cache coherent.\n", hw_cpu_id);
  45. pr_warn("Coherency fabric is not initialized\n");
  46. return 1;
  47. }
  48. return ll_set_cpu_coherent(coherency_base, hw_cpu_id);
  49. }
  50. static inline void mvebu_hwcc_sync_io_barrier(void)
  51. {
  52. writel(0x1, coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET);
  53. while (readl(coherency_cpu_base + IO_SYNC_BARRIER_CTL_OFFSET) & 0x1);
  54. }
  55. static dma_addr_t mvebu_hwcc_dma_map_page(struct device *dev, struct page *page,
  56. unsigned long offset, size_t size,
  57. enum dma_data_direction dir,
  58. struct dma_attrs *attrs)
  59. {
  60. if (dir != DMA_TO_DEVICE)
  61. mvebu_hwcc_sync_io_barrier();
  62. return pfn_to_dma(dev, page_to_pfn(page)) + offset;
  63. }
  64. static void mvebu_hwcc_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
  65. size_t size, enum dma_data_direction dir,
  66. struct dma_attrs *attrs)
  67. {
  68. if (dir != DMA_TO_DEVICE)
  69. mvebu_hwcc_sync_io_barrier();
  70. }
  71. static void mvebu_hwcc_dma_sync(struct device *dev, dma_addr_t dma_handle,
  72. size_t size, enum dma_data_direction dir)
  73. {
  74. if (dir != DMA_TO_DEVICE)
  75. mvebu_hwcc_sync_io_barrier();
  76. }
  77. static struct dma_map_ops mvebu_hwcc_dma_ops = {
  78. .alloc = arm_dma_alloc,
  79. .free = arm_dma_free,
  80. .mmap = arm_dma_mmap,
  81. .map_page = mvebu_hwcc_dma_map_page,
  82. .unmap_page = mvebu_hwcc_dma_unmap_page,
  83. .get_sgtable = arm_dma_get_sgtable,
  84. .map_sg = arm_dma_map_sg,
  85. .unmap_sg = arm_dma_unmap_sg,
  86. .sync_single_for_cpu = mvebu_hwcc_dma_sync,
  87. .sync_single_for_device = mvebu_hwcc_dma_sync,
  88. .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
  89. .sync_sg_for_device = arm_dma_sync_sg_for_device,
  90. .set_dma_mask = arm_dma_set_mask,
  91. };
  92. static int mvebu_hwcc_platform_notifier(struct notifier_block *nb,
  93. unsigned long event, void *__dev)
  94. {
  95. struct device *dev = __dev;
  96. if (event != BUS_NOTIFY_ADD_DEVICE)
  97. return NOTIFY_DONE;
  98. set_dma_ops(dev, &mvebu_hwcc_dma_ops);
  99. return NOTIFY_OK;
  100. }
  101. static struct notifier_block mvebu_hwcc_platform_nb = {
  102. .notifier_call = mvebu_hwcc_platform_notifier,
  103. };
  104. int __init coherency_init(void)
  105. {
  106. struct device_node *np;
  107. np = of_find_matching_node(NULL, of_coherency_table);
  108. if (np) {
  109. struct resource res;
  110. pr_info("Initializing Coherency fabric\n");
  111. of_address_to_resource(np, 0, &res);
  112. coherency_phys_base = res.start;
  113. /*
  114. * Ensure secondary CPUs will see the updated value,
  115. * which they read before they join the coherency
  116. * fabric, and therefore before they are coherent with
  117. * the boot CPU cache.
  118. */
  119. sync_cache_w(&coherency_phys_base);
  120. coherency_base = of_iomap(np, 0);
  121. coherency_cpu_base = of_iomap(np, 1);
  122. set_cpu_coherent(cpu_logical_map(smp_processor_id()), 0);
  123. }
  124. return 0;
  125. }
  126. static int __init coherency_late_init(void)
  127. {
  128. if (of_find_matching_node(NULL, of_coherency_table))
  129. bus_register_notifier(&platform_bus_type,
  130. &mvebu_hwcc_platform_nb);
  131. return 0;
  132. }
  133. postcore_initcall(coherency_late_init);