cpu.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. /*
  2. * CPU subsystem support
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/init.h>
  7. #include <linux/sched.h>
  8. #include <linux/cpu.h>
  9. #include <linux/topology.h>
  10. #include <linux/device.h>
  11. #include <linux/node.h>
  12. #include <linux/gfp.h>
  13. #include <linux/slab.h>
  14. #include <linux/percpu.h>
  15. #include "base.h"
  16. struct bus_type cpu_subsys = {
  17. .name = "cpu",
  18. .dev_name = "cpu",
  19. };
  20. EXPORT_SYMBOL_GPL(cpu_subsys);
  21. static DEFINE_PER_CPU(struct device *, cpu_sys_devices);
  22. #ifdef CONFIG_HOTPLUG_CPU
  23. static ssize_t show_online(struct device *dev,
  24. struct device_attribute *attr,
  25. char *buf)
  26. {
  27. struct cpu *cpu = container_of(dev, struct cpu, dev);
  28. return sprintf(buf, "%u\n", !!cpu_online(cpu->dev.id));
  29. }
  30. static ssize_t __ref store_online(struct device *dev,
  31. struct device_attribute *attr,
  32. const char *buf, size_t count)
  33. {
  34. struct cpu *cpu = container_of(dev, struct cpu, dev);
  35. ssize_t ret;
  36. cpu_hotplug_driver_lock();
  37. switch (buf[0]) {
  38. case '0':
  39. ret = cpu_down(cpu->dev.id);
  40. if (!ret)
  41. kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
  42. break;
  43. case '1':
  44. ret = cpu_up(cpu->dev.id);
  45. if (!ret)
  46. kobject_uevent(&dev->kobj, KOBJ_ONLINE);
  47. break;
  48. default:
  49. ret = -EINVAL;
  50. }
  51. cpu_hotplug_driver_unlock();
  52. if (ret >= 0)
  53. ret = count;
  54. return ret;
  55. }
  56. static DEVICE_ATTR(online, 0644, show_online, store_online);
  57. static void __cpuinit register_cpu_control(struct cpu *cpu)
  58. {
  59. device_create_file(&cpu->dev, &dev_attr_online);
  60. }
  61. void unregister_cpu(struct cpu *cpu)
  62. {
  63. int logical_cpu = cpu->dev.id;
  64. unregister_cpu_under_node(logical_cpu, cpu_to_node(logical_cpu));
  65. device_remove_file(&cpu->dev, &dev_attr_online);
  66. device_unregister(&cpu->dev);
  67. per_cpu(cpu_sys_devices, logical_cpu) = NULL;
  68. return;
  69. }
  70. #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
  71. static ssize_t cpu_probe_store(struct device *dev,
  72. struct device_attribute *attr,
  73. const char *buf,
  74. size_t count)
  75. {
  76. return arch_cpu_probe(buf, count);
  77. }
  78. static ssize_t cpu_release_store(struct device *dev,
  79. struct device_attribute *attr,
  80. const char *buf,
  81. size_t count)
  82. {
  83. return arch_cpu_release(buf, count);
  84. }
  85. static DEVICE_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
  86. static DEVICE_ATTR(release, S_IWUSR, NULL, cpu_release_store);
  87. #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
  88. #else /* ... !CONFIG_HOTPLUG_CPU */
  89. static inline void register_cpu_control(struct cpu *cpu)
  90. {
  91. }
  92. #endif /* CONFIG_HOTPLUG_CPU */
  93. #ifdef CONFIG_KEXEC
  94. #include <linux/kexec.h>
  95. static ssize_t show_crash_notes(struct device *dev, struct device_attribute *attr,
  96. char *buf)
  97. {
  98. struct cpu *cpu = container_of(dev, struct cpu, dev);
  99. ssize_t rc;
  100. unsigned long long addr;
  101. int cpunum;
  102. cpunum = cpu->dev.id;
  103. /*
  104. * Might be reading other cpu's data based on which cpu read thread
  105. * has been scheduled. But cpu data (memory) is allocated once during
  106. * boot up and this data does not change there after. Hence this
  107. * operation should be safe. No locking required.
  108. */
  109. addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpunum));
  110. rc = sprintf(buf, "%Lx\n", addr);
  111. return rc;
  112. }
  113. static DEVICE_ATTR(crash_notes, 0400, show_crash_notes, NULL);
  114. #endif
  115. /*
  116. * Print cpu online, possible, present, and system maps
  117. */
  118. struct cpu_attr {
  119. struct device_attribute attr;
  120. const struct cpumask *const * const map;
  121. };
  122. static ssize_t show_cpus_attr(struct device *dev,
  123. struct device_attribute *attr,
  124. char *buf)
  125. {
  126. struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
  127. int n = cpulist_scnprintf(buf, PAGE_SIZE-2, *(ca->map));
  128. buf[n++] = '\n';
  129. buf[n] = '\0';
  130. return n;
  131. }
  132. #define _CPU_ATTR(name, map) \
  133. { __ATTR(name, 0444, show_cpus_attr, NULL), map }
  134. /* Keep in sync with cpu_subsys_attrs */
  135. static struct cpu_attr cpu_attrs[] = {
  136. _CPU_ATTR(online, &cpu_online_mask),
  137. _CPU_ATTR(possible, &cpu_possible_mask),
  138. _CPU_ATTR(present, &cpu_present_mask),
  139. };
  140. /*
  141. * Print values for NR_CPUS and offlined cpus
  142. */
  143. static ssize_t print_cpus_kernel_max(struct device *dev,
  144. struct device_attribute *attr, char *buf)
  145. {
  146. int n = snprintf(buf, PAGE_SIZE-2, "%d\n", NR_CPUS - 1);
  147. return n;
  148. }
  149. static DEVICE_ATTR(kernel_max, 0444, print_cpus_kernel_max, NULL);
  150. /* arch-optional setting to enable display of offline cpus >= nr_cpu_ids */
  151. unsigned int total_cpus;
  152. static ssize_t print_cpus_offline(struct device *dev,
  153. struct device_attribute *attr, char *buf)
  154. {
  155. int n = 0, len = PAGE_SIZE-2;
  156. cpumask_var_t offline;
  157. /* display offline cpus < nr_cpu_ids */
  158. if (!alloc_cpumask_var(&offline, GFP_KERNEL))
  159. return -ENOMEM;
  160. cpumask_andnot(offline, cpu_possible_mask, cpu_online_mask);
  161. n = cpulist_scnprintf(buf, len, offline);
  162. free_cpumask_var(offline);
  163. /* display offline cpus >= nr_cpu_ids */
  164. if (total_cpus && nr_cpu_ids < total_cpus) {
  165. if (n && n < len)
  166. buf[n++] = ',';
  167. if (nr_cpu_ids == total_cpus-1)
  168. n += snprintf(&buf[n], len - n, "%d", nr_cpu_ids);
  169. else
  170. n += snprintf(&buf[n], len - n, "%d-%d",
  171. nr_cpu_ids, total_cpus-1);
  172. }
  173. n += snprintf(&buf[n], len - n, "\n");
  174. return n;
  175. }
  176. static DEVICE_ATTR(offline, 0444, print_cpus_offline, NULL);
  177. /*
  178. * register_cpu - Setup a sysfs device for a CPU.
  179. * @cpu - cpu->hotpluggable field set to 1 will generate a control file in
  180. * sysfs for this CPU.
  181. * @num - CPU number to use when creating the device.
  182. *
  183. * Initialize and register the CPU device.
  184. */
  185. int __cpuinit register_cpu(struct cpu *cpu, int num)
  186. {
  187. int error;
  188. cpu->node_id = cpu_to_node(num);
  189. cpu->dev.id = num;
  190. cpu->dev.bus = &cpu_subsys;
  191. #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
  192. cpu->dev.bus->uevent = arch_cpu_uevent;
  193. #endif
  194. error = device_register(&cpu->dev);
  195. if (!error && cpu->hotpluggable)
  196. register_cpu_control(cpu);
  197. if (!error)
  198. per_cpu(cpu_sys_devices, num) = &cpu->dev;
  199. if (!error)
  200. register_cpu_under_node(num, cpu_to_node(num));
  201. #ifdef CONFIG_KEXEC
  202. if (!error)
  203. error = device_create_file(&cpu->dev, &dev_attr_crash_notes);
  204. #endif
  205. return error;
  206. }
  207. struct device *get_cpu_device(unsigned cpu)
  208. {
  209. if (cpu < nr_cpu_ids && cpu_possible(cpu))
  210. return per_cpu(cpu_sys_devices, cpu);
  211. else
  212. return NULL;
  213. }
  214. EXPORT_SYMBOL_GPL(get_cpu_device);
  215. #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
  216. static DEVICE_ATTR(modalias, 0444, arch_print_cpu_modalias, NULL);
  217. #endif
  218. static struct attribute *cpu_root_attrs[] = {
  219. #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
  220. &dev_attr_probe.attr,
  221. &dev_attr_release.attr,
  222. #endif
  223. &cpu_attrs[0].attr.attr,
  224. &cpu_attrs[1].attr.attr,
  225. &cpu_attrs[2].attr.attr,
  226. &dev_attr_kernel_max.attr,
  227. &dev_attr_offline.attr,
  228. #ifdef CONFIG_ARCH_HAS_CPU_AUTOPROBE
  229. &dev_attr_modalias.attr,
  230. #endif
  231. NULL
  232. };
  233. static struct attribute_group cpu_root_attr_group = {
  234. .attrs = cpu_root_attrs,
  235. };
  236. static const struct attribute_group *cpu_root_attr_groups[] = {
  237. &cpu_root_attr_group,
  238. NULL,
  239. };
  240. bool cpu_is_hotpluggable(unsigned cpu)
  241. {
  242. struct device *dev = get_cpu_device(cpu);
  243. return dev && container_of(dev, struct cpu, dev)->hotpluggable;
  244. }
  245. EXPORT_SYMBOL_GPL(cpu_is_hotpluggable);
  246. #ifdef CONFIG_GENERIC_CPU_DEVICES
  247. static DEFINE_PER_CPU(struct cpu, cpu_devices);
  248. #endif
  249. static void __init cpu_dev_register_generic(void)
  250. {
  251. #ifdef CONFIG_GENERIC_CPU_DEVICES
  252. int i;
  253. for_each_possible_cpu(i) {
  254. if (register_cpu(&per_cpu(cpu_devices, i), i))
  255. panic("Failed to register CPU device");
  256. }
  257. #endif
  258. }
  259. void __init cpu_dev_init(void)
  260. {
  261. if (subsys_system_register(&cpu_subsys, cpu_root_attr_groups))
  262. panic("Failed to register CPU subsystem");
  263. cpu_dev_register_generic();
  264. #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
  265. sched_create_sysfs_power_savings_entries(cpu_subsys.dev_root);
  266. #endif
  267. }