arm-cci.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426
  1. /*
  2. * CCI cache coherent interconnect driver
  3. *
  4. * Copyright (C) 2013 ARM Ltd.
  5. * Author: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed "as is" WITHOUT ANY WARRANTY of any
  12. * kind, whether express or implied; without even the implied warranty
  13. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. */
  16. #include <linux/arm-cci.h>
  17. #include <linux/io.h>
  18. #include <linux/module.h>
  19. #include <linux/of_address.h>
  20. #include <linux/slab.h>
  21. #include <asm/cacheflush.h>
  22. #include <asm/smp_plat.h>
  23. #define CCI_PORT_CTRL 0x0
  24. #define CCI_CTRL_STATUS 0xc
  25. #define CCI_ENABLE_SNOOP_REQ 0x1
  26. #define CCI_ENABLE_DVM_REQ 0x2
  27. #define CCI_ENABLE_REQ (CCI_ENABLE_SNOOP_REQ | CCI_ENABLE_DVM_REQ)
  28. struct cci_nb_ports {
  29. unsigned int nb_ace;
  30. unsigned int nb_ace_lite;
  31. };
  32. enum cci_ace_port_type {
  33. ACE_INVALID_PORT = 0x0,
  34. ACE_PORT,
  35. ACE_LITE_PORT,
  36. };
  37. struct cci_ace_port {
  38. void __iomem *base;
  39. enum cci_ace_port_type type;
  40. struct device_node *dn;
  41. };
  42. static struct cci_ace_port *ports;
  43. static unsigned int nb_cci_ports;
  44. static void __iomem *cci_ctrl_base;
  45. struct cpu_port {
  46. u64 mpidr;
  47. u32 port;
  48. };
  49. /*
  50. * Use the port MSB as valid flag, shift can be made dynamic
  51. * by computing number of bits required for port indexes.
  52. * Code disabling CCI cpu ports runs with D-cache invalidated
  53. * and SCTLR bit clear so data accesses must be kept to a minimum
  54. * to improve performance; for now shift is left static to
  55. * avoid one more data access while disabling the CCI port.
  56. */
  57. #define PORT_VALID_SHIFT 31
  58. #define PORT_VALID (0x1 << PORT_VALID_SHIFT)
  59. static inline void init_cpu_port(struct cpu_port *port, u32 index, u64 mpidr)
  60. {
  61. port->port = PORT_VALID | index;
  62. port->mpidr = mpidr;
  63. }
  64. static inline bool cpu_port_is_valid(struct cpu_port *port)
  65. {
  66. return !!(port->port & PORT_VALID);
  67. }
  68. static inline bool cpu_port_match(struct cpu_port *port, u64 mpidr)
  69. {
  70. return port->mpidr == (mpidr & MPIDR_HWID_BITMASK);
  71. }
  72. static struct cpu_port cpu_port[NR_CPUS];
  73. /**
  74. * __cci_ace_get_port - Function to retrieve the port index connected to
  75. * a cpu or device.
  76. *
  77. * @dn: device node of the device to look-up
  78. * @type: port type
  79. *
  80. * Return value:
  81. * - CCI port index if success
  82. * - -ENODEV if failure
  83. */
  84. static int __cci_ace_get_port(struct device_node *dn, int type)
  85. {
  86. int i;
  87. bool ace_match;
  88. struct device_node *cci_portn;
  89. cci_portn = of_parse_phandle(dn, "cci-control-port", 0);
  90. for (i = 0; i < nb_cci_ports; i++) {
  91. ace_match = ports[i].type == type;
  92. if (ace_match && cci_portn == ports[i].dn)
  93. return i;
  94. }
  95. return -ENODEV;
  96. }
  97. int cci_ace_get_port(struct device_node *dn)
  98. {
  99. return __cci_ace_get_port(dn, ACE_LITE_PORT);
  100. }
  101. EXPORT_SYMBOL_GPL(cci_ace_get_port);
  102. static void __init cci_ace_init_ports(void)
  103. {
  104. int port, ac, cpu;
  105. u64 hwid;
  106. const u32 *cell;
  107. struct device_node *cpun, *cpus;
  108. cpus = of_find_node_by_path("/cpus");
  109. if (WARN(!cpus, "Missing cpus node, bailing out\n"))
  110. return;
  111. if (WARN_ON(of_property_read_u32(cpus, "#address-cells", &ac)))
  112. ac = of_n_addr_cells(cpus);
  113. /*
  114. * Port index look-up speeds up the function disabling ports by CPU,
  115. * since the logical to port index mapping is done once and does
  116. * not change after system boot.
  117. * The stashed index array is initialized for all possible CPUs
  118. * at probe time.
  119. */
  120. for_each_child_of_node(cpus, cpun) {
  121. if (of_node_cmp(cpun->type, "cpu"))
  122. continue;
  123. cell = of_get_property(cpun, "reg", NULL);
  124. if (WARN(!cell, "%s: missing reg property\n", cpun->full_name))
  125. continue;
  126. hwid = of_read_number(cell, ac);
  127. cpu = get_logical_index(hwid & MPIDR_HWID_BITMASK);
  128. if (cpu < 0 || !cpu_possible(cpu))
  129. continue;
  130. port = __cci_ace_get_port(cpun, ACE_PORT);
  131. if (port < 0)
  132. continue;
  133. init_cpu_port(&cpu_port[cpu], port, cpu_logical_map(cpu));
  134. }
  135. for_each_possible_cpu(cpu) {
  136. WARN(!cpu_port_is_valid(&cpu_port[cpu]),
  137. "CPU %u does not have an associated CCI port\n",
  138. cpu);
  139. }
  140. }
  141. /*
  142. * Functions to enable/disable a CCI interconnect slave port
  143. *
  144. * They are called by low-level power management code to disable slave
  145. * interfaces snoops and DVM broadcast.
  146. * Since they may execute with cache data allocation disabled and
  147. * after the caches have been cleaned and invalidated the functions provide
  148. * no explicit locking since they may run with D-cache disabled, so normal
  149. * cacheable kernel locks based on ldrex/strex may not work.
  150. * Locking has to be provided by BSP implementations to ensure proper
  151. * operations.
  152. */
  153. /**
  154. * cci_port_control() - function to control a CCI port
  155. *
  156. * @port: index of the port to setup
  157. * @enable: if true enables the port, if false disables it
  158. */
  159. static void notrace cci_port_control(unsigned int port, bool enable)
  160. {
  161. void __iomem *base = ports[port].base;
  162. writel_relaxed(enable ? CCI_ENABLE_REQ : 0, base + CCI_PORT_CTRL);
  163. /*
  164. * This function is called from power down procedures
  165. * and must not execute any instruction that might
  166. * cause the processor to be put in a quiescent state
  167. * (eg wfi). Hence, cpu_relax() can not be added to this
  168. * read loop to optimize power, since it might hide possibly
  169. * disruptive operations.
  170. */
  171. while (readl_relaxed(cci_ctrl_base + CCI_CTRL_STATUS) & 0x1)
  172. ;
  173. }
  174. /**
  175. * cci_disable_port_by_cpu() - function to disable a CCI port by CPU
  176. * reference
  177. *
  178. * @mpidr: mpidr of the CPU whose CCI port should be disabled
  179. *
  180. * Disabling a CCI port for a CPU implies disabling the CCI port
  181. * controlling that CPU cluster. Code disabling CPU CCI ports
  182. * must make sure that the CPU running the code is the last active CPU
  183. * in the cluster ie all other CPUs are quiescent in a low power state.
  184. *
  185. * Return:
  186. * 0 on success
  187. * -ENODEV on port look-up failure
  188. */
  189. int notrace cci_disable_port_by_cpu(u64 mpidr)
  190. {
  191. int cpu;
  192. bool is_valid;
  193. for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
  194. is_valid = cpu_port_is_valid(&cpu_port[cpu]);
  195. if (is_valid && cpu_port_match(&cpu_port[cpu], mpidr)) {
  196. cci_port_control(cpu_port[cpu].port, false);
  197. return 0;
  198. }
  199. }
  200. return -ENODEV;
  201. }
  202. EXPORT_SYMBOL_GPL(cci_disable_port_by_cpu);
  203. /**
  204. * __cci_control_port_by_device() - function to control a CCI port by device
  205. * reference
  206. *
  207. * @dn: device node pointer of the device whose CCI port should be
  208. * controlled
  209. * @enable: if true enables the port, if false disables it
  210. *
  211. * Return:
  212. * 0 on success
  213. * -ENODEV on port look-up failure
  214. */
  215. int notrace __cci_control_port_by_device(struct device_node *dn, bool enable)
  216. {
  217. int port;
  218. if (!dn)
  219. return -ENODEV;
  220. port = __cci_ace_get_port(dn, ACE_LITE_PORT);
  221. if (WARN_ONCE(port < 0, "node %s ACE lite port look-up failure\n",
  222. dn->full_name))
  223. return -ENODEV;
  224. cci_port_control(port, enable);
  225. return 0;
  226. }
  227. EXPORT_SYMBOL_GPL(__cci_control_port_by_device);
  228. /**
  229. * __cci_control_port_by_index() - function to control a CCI port by port index
  230. *
  231. * @port: port index previously retrieved with cci_ace_get_port()
  232. * @enable: if true enables the port, if false disables it
  233. *
  234. * Return:
  235. * 0 on success
  236. * -ENODEV on port index out of range
  237. * -EPERM if operation carried out on an ACE PORT
  238. */
  239. int notrace __cci_control_port_by_index(u32 port, bool enable)
  240. {
  241. if (port >= nb_cci_ports || ports[port].type == ACE_INVALID_PORT)
  242. return -ENODEV;
  243. /*
  244. * CCI control for ports connected to CPUS is extremely fragile
  245. * and must be made to go through a specific and controlled
  246. * interface (ie cci_disable_port_by_cpu(); control by general purpose
  247. * indexing is therefore disabled for ACE ports.
  248. */
  249. if (ports[port].type == ACE_PORT)
  250. return -EPERM;
  251. cci_port_control(port, enable);
  252. return 0;
  253. }
  254. EXPORT_SYMBOL_GPL(__cci_control_port_by_index);
  255. static const struct cci_nb_ports cci400_ports = {
  256. .nb_ace = 2,
  257. .nb_ace_lite = 3
  258. };
  259. static const struct of_device_id arm_cci_matches[] = {
  260. {.compatible = "arm,cci-400", .data = &cci400_ports },
  261. {},
  262. };
  263. static const struct of_device_id arm_cci_ctrl_if_matches[] = {
  264. {.compatible = "arm,cci-400-ctrl-if", },
  265. {},
  266. };
  267. static int __init cci_probe(void)
  268. {
  269. struct cci_nb_ports const *cci_config;
  270. int ret, i, nb_ace = 0, nb_ace_lite = 0;
  271. struct device_node *np, *cp;
  272. const char *match_str;
  273. bool is_ace;
  274. np = of_find_matching_node(NULL, arm_cci_matches);
  275. if (!np)
  276. return -ENODEV;
  277. cci_config = of_match_node(arm_cci_matches, np)->data;
  278. if (!cci_config)
  279. return -ENODEV;
  280. nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
  281. ports = kcalloc(sizeof(*ports), nb_cci_ports, GFP_KERNEL);
  282. if (!ports)
  283. return -ENOMEM;
  284. cci_ctrl_base = of_iomap(np, 0);
  285. if (!cci_ctrl_base) {
  286. WARN(1, "unable to ioremap CCI ctrl\n");
  287. ret = -ENXIO;
  288. goto memalloc_err;
  289. }
  290. for_each_child_of_node(np, cp) {
  291. if (!of_match_node(arm_cci_ctrl_if_matches, cp))
  292. continue;
  293. i = nb_ace + nb_ace_lite;
  294. if (i >= nb_cci_ports)
  295. break;
  296. if (of_property_read_string(cp, "interface-type",
  297. &match_str)) {
  298. WARN(1, "node %s missing interface-type property\n",
  299. cp->full_name);
  300. continue;
  301. }
  302. is_ace = strcmp(match_str, "ace") == 0;
  303. if (!is_ace && strcmp(match_str, "ace-lite")) {
  304. WARN(1, "node %s containing invalid interface-type property, skipping it\n",
  305. cp->full_name);
  306. continue;
  307. }
  308. ports[i].base = of_iomap(cp, 0);
  309. if (!ports[i].base) {
  310. WARN(1, "unable to ioremap CCI port %d\n", i);
  311. continue;
  312. }
  313. if (is_ace) {
  314. if (WARN_ON(nb_ace >= cci_config->nb_ace))
  315. continue;
  316. ports[i].type = ACE_PORT;
  317. ++nb_ace;
  318. } else {
  319. if (WARN_ON(nb_ace_lite >= cci_config->nb_ace_lite))
  320. continue;
  321. ports[i].type = ACE_LITE_PORT;
  322. ++nb_ace_lite;
  323. }
  324. ports[i].dn = cp;
  325. }
  326. /* initialize a stashed array of ACE ports to speed-up look-up */
  327. cci_ace_init_ports();
  328. /*
  329. * Multi-cluster systems may need this data when non-coherent, during
  330. * cluster power-up/power-down. Make sure it reaches main memory.
  331. */
  332. sync_cache_w(&cci_ctrl_base);
  333. sync_cache_w(&ports);
  334. sync_cache_w(&cpu_port);
  335. __sync_cache_range_w(ports, sizeof(*ports) * nb_cci_ports);
  336. pr_info("ARM CCI driver probed\n");
  337. return 0;
  338. memalloc_err:
  339. kfree(ports);
  340. return ret;
  341. }
  342. static int cci_init_status = -EAGAIN;
  343. static DEFINE_MUTEX(cci_probing);
  344. static int __init cci_init(void)
  345. {
  346. if (cci_init_status != -EAGAIN)
  347. return cci_init_status;
  348. mutex_lock(&cci_probing);
  349. if (cci_init_status == -EAGAIN)
  350. cci_init_status = cci_probe();
  351. mutex_unlock(&cci_probing);
  352. return cci_init_status;
  353. }
  354. /*
  355. * To sort out early init calls ordering a helper function is provided to
  356. * check if the CCI driver has beed initialized. Function check if the driver
  357. * has been initialized, if not it calls the init function that probes
  358. * the driver and updates the return value.
  359. */
  360. bool __init cci_probed(void)
  361. {
  362. return cci_init() == 0;
  363. }
  364. EXPORT_SYMBOL_GPL(cci_probed);
  365. early_initcall(cci_init);
  366. MODULE_LICENSE("GPL");
  367. MODULE_DESCRIPTION("ARM CCI support");