sysfs.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
  1. #include <linux/config.h>
  2. #include <linux/sysdev.h>
  3. #include <linux/cpu.h>
  4. #include <linux/smp.h>
  5. #include <linux/percpu.h>
  6. #include <linux/init.h>
  7. #include <linux/sched.h>
  8. #include <linux/module.h>
  9. #include <linux/nodemask.h>
  10. #include <linux/cpumask.h>
  11. #include <linux/notifier.h>
  12. #include <asm/current.h>
  13. #include <asm/processor.h>
  14. #include <asm/cputable.h>
  15. #include <asm/hvcall.h>
  16. #include <asm/prom.h>
  17. #include <asm/systemcfg.h>
  18. #include <asm/paca.h>
  19. #include <asm/lppaca.h>
  20. #include <asm/machdep.h>
  21. static DEFINE_PER_CPU(struct cpu, cpu_devices);
  22. /* SMT stuff */
  23. #ifdef CONFIG_PPC_MULTIPLATFORM
  24. /* default to snooze disabled */
  25. DEFINE_PER_CPU(unsigned long, smt_snooze_delay);
  26. static ssize_t store_smt_snooze_delay(struct sys_device *dev, const char *buf,
  27. size_t count)
  28. {
  29. struct cpu *cpu = container_of(dev, struct cpu, sysdev);
  30. ssize_t ret;
  31. unsigned long snooze;
  32. ret = sscanf(buf, "%lu", &snooze);
  33. if (ret != 1)
  34. return -EINVAL;
  35. per_cpu(smt_snooze_delay, cpu->sysdev.id) = snooze;
  36. return count;
  37. }
  38. static ssize_t show_smt_snooze_delay(struct sys_device *dev, char *buf)
  39. {
  40. struct cpu *cpu = container_of(dev, struct cpu, sysdev);
  41. return sprintf(buf, "%lu\n", per_cpu(smt_snooze_delay, cpu->sysdev.id));
  42. }
  43. static SYSDEV_ATTR(smt_snooze_delay, 0644, show_smt_snooze_delay,
  44. store_smt_snooze_delay);
  45. /* Only parse OF options if the matching cmdline option was not specified */
  46. static int smt_snooze_cmdline;
  47. static int __init smt_setup(void)
  48. {
  49. struct device_node *options;
  50. unsigned int *val;
  51. unsigned int cpu;
  52. if (!cpu_has_feature(CPU_FTR_SMT))
  53. return 1;
  54. options = find_path_device("/options");
  55. if (!options)
  56. return 1;
  57. val = (unsigned int *)get_property(options, "ibm,smt-snooze-delay",
  58. NULL);
  59. if (!smt_snooze_cmdline && val) {
  60. for_each_cpu(cpu)
  61. per_cpu(smt_snooze_delay, cpu) = *val;
  62. }
  63. return 1;
  64. }
  65. __initcall(smt_setup);
  66. static int __init setup_smt_snooze_delay(char *str)
  67. {
  68. unsigned int cpu;
  69. int snooze;
  70. if (!cpu_has_feature(CPU_FTR_SMT))
  71. return 1;
  72. smt_snooze_cmdline = 1;
  73. if (get_option(&str, &snooze)) {
  74. for_each_cpu(cpu)
  75. per_cpu(smt_snooze_delay, cpu) = snooze;
  76. }
  77. return 1;
  78. }
  79. __setup("smt-snooze-delay=", setup_smt_snooze_delay);
  80. /*
  81. * Enabling PMCs will slow partition context switch times so we only do
  82. * it the first time we write to the PMCs.
  83. */
  84. static DEFINE_PER_CPU(char, pmcs_enabled);
  85. void ppc64_enable_pmcs(void)
  86. {
  87. unsigned long hid0;
  88. #ifdef CONFIG_PPC_PSERIES
  89. unsigned long set, reset;
  90. #endif /* CONFIG_PPC_PSERIES */
  91. /* Only need to enable them once */
  92. if (__get_cpu_var(pmcs_enabled))
  93. return;
  94. __get_cpu_var(pmcs_enabled) = 1;
  95. switch (systemcfg->platform) {
  96. case PLATFORM_PSERIES:
  97. case PLATFORM_POWERMAC:
  98. hid0 = mfspr(HID0);
  99. hid0 |= 1UL << (63 - 20);
  100. /* POWER4 requires the following sequence */
  101. asm volatile(
  102. "sync\n"
  103. "mtspr %1, %0\n"
  104. "mfspr %0, %1\n"
  105. "mfspr %0, %1\n"
  106. "mfspr %0, %1\n"
  107. "mfspr %0, %1\n"
  108. "mfspr %0, %1\n"
  109. "mfspr %0, %1\n"
  110. "isync" : "=&r" (hid0) : "i" (HID0), "0" (hid0):
  111. "memory");
  112. break;
  113. #ifdef CONFIG_PPC_PSERIES
  114. case PLATFORM_PSERIES_LPAR:
  115. set = 1UL << 63;
  116. reset = 0;
  117. plpar_hcall_norets(H_PERFMON, set, reset);
  118. break;
  119. #endif /* CONFIG_PPC_PSERIES */
  120. default:
  121. break;
  122. }
  123. #ifdef CONFIG_PPC_PSERIES
  124. /* instruct hypervisor to maintain PMCs */
  125. if (cur_cpu_spec->firmware_features & FW_FEATURE_SPLPAR)
  126. get_paca()->lppaca.pmcregs_in_use = 1;
  127. #endif /* CONFIG_PPC_PSERIES */
  128. }
  129. #else
  130. /* PMC stuff */
  131. void ppc64_enable_pmcs(void)
  132. {
  133. /* XXX Implement for iseries */
  134. }
  135. #endif /* CONFIG_PPC_MULTIPLATFORM */
  136. EXPORT_SYMBOL(ppc64_enable_pmcs);
  137. /* XXX convert to rusty's on_one_cpu */
  138. static unsigned long run_on_cpu(unsigned long cpu,
  139. unsigned long (*func)(unsigned long),
  140. unsigned long arg)
  141. {
  142. cpumask_t old_affinity = current->cpus_allowed;
  143. unsigned long ret;
  144. /* should return -EINVAL to userspace */
  145. if (set_cpus_allowed(current, cpumask_of_cpu(cpu)))
  146. return 0;
  147. ret = func(arg);
  148. set_cpus_allowed(current, old_affinity);
  149. return ret;
  150. }
  151. #define SYSFS_PMCSETUP(NAME, ADDRESS) \
  152. static unsigned long read_##NAME(unsigned long junk) \
  153. { \
  154. return mfspr(ADDRESS); \
  155. } \
  156. static unsigned long write_##NAME(unsigned long val) \
  157. { \
  158. ppc64_enable_pmcs(); \
  159. mtspr(ADDRESS, val); \
  160. return 0; \
  161. } \
  162. static ssize_t show_##NAME(struct sys_device *dev, char *buf) \
  163. { \
  164. struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
  165. unsigned long val = run_on_cpu(cpu->sysdev.id, read_##NAME, 0); \
  166. return sprintf(buf, "%lx\n", val); \
  167. } \
  168. static ssize_t __attribute_used__ \
  169. store_##NAME(struct sys_device *dev, const char *buf, size_t count) \
  170. { \
  171. struct cpu *cpu = container_of(dev, struct cpu, sysdev); \
  172. unsigned long val; \
  173. int ret = sscanf(buf, "%lx", &val); \
  174. if (ret != 1) \
  175. return -EINVAL; \
  176. run_on_cpu(cpu->sysdev.id, write_##NAME, val); \
  177. return count; \
  178. }
  179. SYSFS_PMCSETUP(mmcr0, SPRN_MMCR0);
  180. SYSFS_PMCSETUP(mmcr1, SPRN_MMCR1);
  181. SYSFS_PMCSETUP(mmcra, SPRN_MMCRA);
  182. SYSFS_PMCSETUP(pmc1, SPRN_PMC1);
  183. SYSFS_PMCSETUP(pmc2, SPRN_PMC2);
  184. SYSFS_PMCSETUP(pmc3, SPRN_PMC3);
  185. SYSFS_PMCSETUP(pmc4, SPRN_PMC4);
  186. SYSFS_PMCSETUP(pmc5, SPRN_PMC5);
  187. SYSFS_PMCSETUP(pmc6, SPRN_PMC6);
  188. SYSFS_PMCSETUP(pmc7, SPRN_PMC7);
  189. SYSFS_PMCSETUP(pmc8, SPRN_PMC8);
  190. SYSFS_PMCSETUP(purr, SPRN_PURR);
  191. static SYSDEV_ATTR(mmcr0, 0600, show_mmcr0, store_mmcr0);
  192. static SYSDEV_ATTR(mmcr1, 0600, show_mmcr1, store_mmcr1);
  193. static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra);
  194. static SYSDEV_ATTR(pmc1, 0600, show_pmc1, store_pmc1);
  195. static SYSDEV_ATTR(pmc2, 0600, show_pmc2, store_pmc2);
  196. static SYSDEV_ATTR(pmc3, 0600, show_pmc3, store_pmc3);
  197. static SYSDEV_ATTR(pmc4, 0600, show_pmc4, store_pmc4);
  198. static SYSDEV_ATTR(pmc5, 0600, show_pmc5, store_pmc5);
  199. static SYSDEV_ATTR(pmc6, 0600, show_pmc6, store_pmc6);
  200. static SYSDEV_ATTR(pmc7, 0600, show_pmc7, store_pmc7);
  201. static SYSDEV_ATTR(pmc8, 0600, show_pmc8, store_pmc8);
  202. static SYSDEV_ATTR(purr, 0600, show_purr, NULL);
  203. static void register_cpu_online(unsigned int cpu)
  204. {
  205. struct cpu *c = &per_cpu(cpu_devices, cpu);
  206. struct sys_device *s = &c->sysdev;
  207. #ifndef CONFIG_PPC_ISERIES
  208. if (cpu_has_feature(CPU_FTR_SMT))
  209. sysdev_create_file(s, &attr_smt_snooze_delay);
  210. #endif
  211. /* PMC stuff */
  212. sysdev_create_file(s, &attr_mmcr0);
  213. sysdev_create_file(s, &attr_mmcr1);
  214. if (cpu_has_feature(CPU_FTR_MMCRA))
  215. sysdev_create_file(s, &attr_mmcra);
  216. sysdev_create_file(s, &attr_pmc1);
  217. sysdev_create_file(s, &attr_pmc2);
  218. sysdev_create_file(s, &attr_pmc3);
  219. sysdev_create_file(s, &attr_pmc4);
  220. sysdev_create_file(s, &attr_pmc5);
  221. sysdev_create_file(s, &attr_pmc6);
  222. if (cpu_has_feature(CPU_FTR_PMC8)) {
  223. sysdev_create_file(s, &attr_pmc7);
  224. sysdev_create_file(s, &attr_pmc8);
  225. }
  226. if (cpu_has_feature(CPU_FTR_SMT))
  227. sysdev_create_file(s, &attr_purr);
  228. }
  229. #ifdef CONFIG_HOTPLUG_CPU
  230. static void unregister_cpu_online(unsigned int cpu)
  231. {
  232. struct cpu *c = &per_cpu(cpu_devices, cpu);
  233. struct sys_device *s = &c->sysdev;
  234. BUG_ON(c->no_control);
  235. #ifndef CONFIG_PPC_ISERIES
  236. if (cpu_has_feature(CPU_FTR_SMT))
  237. sysdev_remove_file(s, &attr_smt_snooze_delay);
  238. #endif
  239. /* PMC stuff */
  240. sysdev_remove_file(s, &attr_mmcr0);
  241. sysdev_remove_file(s, &attr_mmcr1);
  242. if (cpu_has_feature(CPU_FTR_MMCRA))
  243. sysdev_remove_file(s, &attr_mmcra);
  244. sysdev_remove_file(s, &attr_pmc1);
  245. sysdev_remove_file(s, &attr_pmc2);
  246. sysdev_remove_file(s, &attr_pmc3);
  247. sysdev_remove_file(s, &attr_pmc4);
  248. sysdev_remove_file(s, &attr_pmc5);
  249. sysdev_remove_file(s, &attr_pmc6);
  250. if (cpu_has_feature(CPU_FTR_PMC8)) {
  251. sysdev_remove_file(s, &attr_pmc7);
  252. sysdev_remove_file(s, &attr_pmc8);
  253. }
  254. if (cpu_has_feature(CPU_FTR_SMT))
  255. sysdev_remove_file(s, &attr_purr);
  256. }
  257. #endif /* CONFIG_HOTPLUG_CPU */
  258. static int __devinit sysfs_cpu_notify(struct notifier_block *self,
  259. unsigned long action, void *hcpu)
  260. {
  261. unsigned int cpu = (unsigned int)(long)hcpu;
  262. switch (action) {
  263. case CPU_ONLINE:
  264. register_cpu_online(cpu);
  265. break;
  266. #ifdef CONFIG_HOTPLUG_CPU
  267. case CPU_DEAD:
  268. unregister_cpu_online(cpu);
  269. break;
  270. #endif
  271. }
  272. return NOTIFY_OK;
  273. }
  274. static struct notifier_block __devinitdata sysfs_cpu_nb = {
  275. .notifier_call = sysfs_cpu_notify,
  276. };
  277. /* NUMA stuff */
  278. #ifdef CONFIG_NUMA
  279. static struct node node_devices[MAX_NUMNODES];
  280. static void register_nodes(void)
  281. {
  282. int i;
  283. for (i = 0; i < MAX_NUMNODES; i++) {
  284. if (node_online(i)) {
  285. int p_node = parent_node(i);
  286. struct node *parent = NULL;
  287. if (p_node != i)
  288. parent = &node_devices[p_node];
  289. register_node(&node_devices[i], i, parent);
  290. }
  291. }
  292. }
  293. #else
  294. static void register_nodes(void)
  295. {
  296. return;
  297. }
  298. #endif
  299. /* Only valid if CPU is present. */
  300. static ssize_t show_physical_id(struct sys_device *dev, char *buf)
  301. {
  302. struct cpu *cpu = container_of(dev, struct cpu, sysdev);
  303. return sprintf(buf, "%d\n", get_hard_smp_processor_id(cpu->sysdev.id));
  304. }
  305. static SYSDEV_ATTR(physical_id, 0444, show_physical_id, NULL);
  306. static int __init topology_init(void)
  307. {
  308. int cpu;
  309. struct node *parent = NULL;
  310. register_nodes();
  311. register_cpu_notifier(&sysfs_cpu_nb);
  312. for_each_cpu(cpu) {
  313. struct cpu *c = &per_cpu(cpu_devices, cpu);
  314. #ifdef CONFIG_NUMA
  315. /* The node to which a cpu belongs can't be known
  316. * until the cpu is made present.
  317. */
  318. parent = NULL;
  319. if (cpu_present(cpu))
  320. parent = &node_devices[cpu_to_node(cpu)];
  321. #endif
  322. /*
  323. * For now, we just see if the system supports making
  324. * the RTAS calls for CPU hotplug. But, there may be a
  325. * more comprehensive way to do this for an individual
  326. * CPU. For instance, the boot cpu might never be valid
  327. * for hotplugging.
  328. */
  329. if (!ppc_md.cpu_die)
  330. c->no_control = 1;
  331. if (cpu_online(cpu) || (c->no_control == 0)) {
  332. register_cpu(c, cpu, parent);
  333. sysdev_create_file(&c->sysdev, &attr_physical_id);
  334. }
  335. if (cpu_online(cpu))
  336. register_cpu_online(cpu);
  337. }
  338. return 0;
  339. }
  340. __initcall(topology_init);