cpu.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669
  1. /* CPU control.
  2. * (C) 2001, 2002, 2003, 2004 Rusty Russell
  3. *
  4. * This code is licenced under the GPL.
  5. */
  6. #include <linux/proc_fs.h>
  7. #include <linux/smp.h>
  8. #include <linux/init.h>
  9. #include <linux/notifier.h>
  10. #include <linux/sched.h>
  11. #include <linux/unistd.h>
  12. #include <linux/cpu.h>
  13. #include <linux/export.h>
  14. #include <linux/kthread.h>
  15. #include <linux/stop_machine.h>
  16. #include <linux/mutex.h>
  17. #include <linux/gfp.h>
  18. #include <linux/suspend.h>
  19. #ifdef CONFIG_SMP
  20. /* Serializes the updates to cpu_online_mask, cpu_present_mask */
  21. static DEFINE_MUTEX(cpu_add_remove_lock);
  22. /*
  23. * The following two API's must be used when attempting
  24. * to serialize the updates to cpu_online_mask, cpu_present_mask.
  25. */
  26. void cpu_maps_update_begin(void)
  27. {
  28. mutex_lock(&cpu_add_remove_lock);
  29. }
  30. void cpu_maps_update_done(void)
  31. {
  32. mutex_unlock(&cpu_add_remove_lock);
  33. }
  34. static RAW_NOTIFIER_HEAD(cpu_chain);
  35. /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
  36. * Should always be manipulated under cpu_add_remove_lock
  37. */
  38. static int cpu_hotplug_disabled;
  39. #ifdef CONFIG_HOTPLUG_CPU
  40. static struct {
  41. struct task_struct *active_writer;
  42. struct mutex lock; /* Synchronizes accesses to refcount, */
  43. /*
  44. * Also blocks the new readers during
  45. * an ongoing cpu hotplug operation.
  46. */
  47. int refcount;
  48. } cpu_hotplug = {
  49. .active_writer = NULL,
  50. .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
  51. .refcount = 0,
  52. };
  53. void get_online_cpus(void)
  54. {
  55. might_sleep();
  56. if (cpu_hotplug.active_writer == current)
  57. return;
  58. mutex_lock(&cpu_hotplug.lock);
  59. cpu_hotplug.refcount++;
  60. mutex_unlock(&cpu_hotplug.lock);
  61. }
  62. EXPORT_SYMBOL_GPL(get_online_cpus);
  63. void put_online_cpus(void)
  64. {
  65. if (cpu_hotplug.active_writer == current)
  66. return;
  67. mutex_lock(&cpu_hotplug.lock);
  68. if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
  69. wake_up_process(cpu_hotplug.active_writer);
  70. mutex_unlock(&cpu_hotplug.lock);
  71. }
  72. EXPORT_SYMBOL_GPL(put_online_cpus);
  73. /*
  74. * This ensures that the hotplug operation can begin only when the
  75. * refcount goes to zero.
  76. *
  77. * Note that during a cpu-hotplug operation, the new readers, if any,
  78. * will be blocked by the cpu_hotplug.lock
  79. *
  80. * Since cpu_hotplug_begin() is always called after invoking
  81. * cpu_maps_update_begin(), we can be sure that only one writer is active.
  82. *
  83. * Note that theoretically, there is a possibility of a livelock:
  84. * - Refcount goes to zero, last reader wakes up the sleeping
  85. * writer.
  86. * - Last reader unlocks the cpu_hotplug.lock.
  87. * - A new reader arrives at this moment, bumps up the refcount.
  88. * - The writer acquires the cpu_hotplug.lock finds the refcount
  89. * non zero and goes to sleep again.
  90. *
  91. * However, this is very difficult to achieve in practice since
  92. * get_online_cpus() not an api which is called all that often.
  93. *
  94. */
  95. static void cpu_hotplug_begin(void)
  96. {
  97. cpu_hotplug.active_writer = current;
  98. for (;;) {
  99. mutex_lock(&cpu_hotplug.lock);
  100. if (likely(!cpu_hotplug.refcount))
  101. break;
  102. __set_current_state(TASK_UNINTERRUPTIBLE);
  103. mutex_unlock(&cpu_hotplug.lock);
  104. schedule();
  105. }
  106. }
  107. static void cpu_hotplug_done(void)
  108. {
  109. cpu_hotplug.active_writer = NULL;
  110. mutex_unlock(&cpu_hotplug.lock);
  111. }
  112. #else /* #if CONFIG_HOTPLUG_CPU */
  113. static void cpu_hotplug_begin(void) {}
  114. static void cpu_hotplug_done(void) {}
  115. #endif /* #else #if CONFIG_HOTPLUG_CPU */
  116. /* Need to know about CPUs going up/down? */
  117. int __ref register_cpu_notifier(struct notifier_block *nb)
  118. {
  119. int ret;
  120. cpu_maps_update_begin();
  121. ret = raw_notifier_chain_register(&cpu_chain, nb);
  122. cpu_maps_update_done();
  123. return ret;
  124. }
  125. static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
  126. int *nr_calls)
  127. {
  128. int ret;
  129. ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
  130. nr_calls);
  131. return notifier_to_errno(ret);
  132. }
  133. static int cpu_notify(unsigned long val, void *v)
  134. {
  135. return __cpu_notify(val, v, -1, NULL);
  136. }
  137. #ifdef CONFIG_HOTPLUG_CPU
  138. static void cpu_notify_nofail(unsigned long val, void *v)
  139. {
  140. BUG_ON(cpu_notify(val, v));
  141. }
  142. EXPORT_SYMBOL(register_cpu_notifier);
  143. void __ref unregister_cpu_notifier(struct notifier_block *nb)
  144. {
  145. cpu_maps_update_begin();
  146. raw_notifier_chain_unregister(&cpu_chain, nb);
  147. cpu_maps_update_done();
  148. }
  149. EXPORT_SYMBOL(unregister_cpu_notifier);
  150. static inline void check_for_tasks(int cpu)
  151. {
  152. struct task_struct *p;
  153. write_lock_irq(&tasklist_lock);
  154. for_each_process(p) {
  155. if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
  156. (p->utime || p->stime))
  157. printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
  158. "(state = %ld, flags = %x)\n",
  159. p->comm, task_pid_nr(p), cpu,
  160. p->state, p->flags);
  161. }
  162. write_unlock_irq(&tasklist_lock);
  163. }
  164. struct take_cpu_down_param {
  165. unsigned long mod;
  166. void *hcpu;
  167. };
  168. /* Take this CPU down. */
  169. static int __ref take_cpu_down(void *_param)
  170. {
  171. struct take_cpu_down_param *param = _param;
  172. int err;
  173. /* Ensure this CPU doesn't handle any more interrupts. */
  174. err = __cpu_disable();
  175. if (err < 0)
  176. return err;
  177. cpu_notify(CPU_DYING | param->mod, param->hcpu);
  178. return 0;
  179. }
  180. /* Requires cpu_add_remove_lock to be held */
  181. static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
  182. {
  183. int err, nr_calls = 0;
  184. void *hcpu = (void *)(long)cpu;
  185. unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
  186. struct take_cpu_down_param tcd_param = {
  187. .mod = mod,
  188. .hcpu = hcpu,
  189. };
  190. if (num_online_cpus() == 1)
  191. return -EBUSY;
  192. if (!cpu_online(cpu))
  193. return -EINVAL;
  194. cpu_hotplug_begin();
  195. err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
  196. if (err) {
  197. nr_calls--;
  198. __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
  199. printk("%s: attempt to take down CPU %u failed\n",
  200. __func__, cpu);
  201. goto out_release;
  202. }
  203. err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
  204. if (err) {
  205. /* CPU didn't die: tell everyone. Can't complain. */
  206. cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
  207. goto out_release;
  208. }
  209. BUG_ON(cpu_online(cpu));
  210. /*
  211. * The migration_call() CPU_DYING callback will have removed all
  212. * runnable tasks from the cpu, there's only the idle task left now
  213. * that the migration thread is done doing the stop_machine thing.
  214. *
  215. * Wait for the stop thread to go away.
  216. */
  217. while (!idle_cpu(cpu))
  218. cpu_relax();
  219. /* This actually kills the CPU. */
  220. __cpu_die(cpu);
  221. /* CPU is completely dead: tell everyone. Too late to complain. */
  222. cpu_notify_nofail(CPU_DEAD | mod, hcpu);
  223. check_for_tasks(cpu);
  224. out_release:
  225. cpu_hotplug_done();
  226. if (!err)
  227. cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
  228. return err;
  229. }
  230. int __ref cpu_down(unsigned int cpu)
  231. {
  232. int err;
  233. cpu_maps_update_begin();
  234. if (cpu_hotplug_disabled) {
  235. err = -EBUSY;
  236. goto out;
  237. }
  238. err = _cpu_down(cpu, 0);
  239. out:
  240. cpu_maps_update_done();
  241. return err;
  242. }
  243. EXPORT_SYMBOL(cpu_down);
  244. #endif /*CONFIG_HOTPLUG_CPU*/
  245. /* Requires cpu_add_remove_lock to be held */
  246. static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
  247. {
  248. int ret, nr_calls = 0;
  249. void *hcpu = (void *)(long)cpu;
  250. unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
  251. if (cpu_online(cpu) || !cpu_present(cpu))
  252. return -EINVAL;
  253. cpu_hotplug_begin();
  254. ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
  255. if (ret) {
  256. nr_calls--;
  257. printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
  258. __func__, cpu);
  259. goto out_notify;
  260. }
  261. /* Arch-specific enabling code. */
  262. ret = __cpu_up(cpu);
  263. if (ret != 0)
  264. goto out_notify;
  265. BUG_ON(!cpu_online(cpu));
  266. /* Now call notifier in preparation. */
  267. cpu_notify(CPU_ONLINE | mod, hcpu);
  268. out_notify:
  269. if (ret != 0)
  270. __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
  271. cpu_hotplug_done();
  272. return ret;
  273. }
  274. int __cpuinit cpu_up(unsigned int cpu)
  275. {
  276. int err = 0;
  277. #ifdef CONFIG_MEMORY_HOTPLUG
  278. int nid;
  279. pg_data_t *pgdat;
  280. #endif
  281. if (!cpu_possible(cpu)) {
  282. printk(KERN_ERR "can't online cpu %d because it is not "
  283. "configured as may-hotadd at boot time\n", cpu);
  284. #if defined(CONFIG_IA64)
  285. printk(KERN_ERR "please check additional_cpus= boot "
  286. "parameter\n");
  287. #endif
  288. return -EINVAL;
  289. }
  290. #ifdef CONFIG_MEMORY_HOTPLUG
  291. nid = cpu_to_node(cpu);
  292. if (!node_online(nid)) {
  293. err = mem_online_node(nid);
  294. if (err)
  295. return err;
  296. }
  297. pgdat = NODE_DATA(nid);
  298. if (!pgdat) {
  299. printk(KERN_ERR
  300. "Can't online cpu %d due to NULL pgdat\n", cpu);
  301. return -ENOMEM;
  302. }
  303. if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
  304. mutex_lock(&zonelists_mutex);
  305. build_all_zonelists(NULL);
  306. mutex_unlock(&zonelists_mutex);
  307. }
  308. #endif
  309. cpu_maps_update_begin();
  310. if (cpu_hotplug_disabled) {
  311. err = -EBUSY;
  312. goto out;
  313. }
  314. err = _cpu_up(cpu, 0);
  315. out:
  316. cpu_maps_update_done();
  317. return err;
  318. }
  319. #ifdef CONFIG_PM_SLEEP_SMP
  320. static cpumask_var_t frozen_cpus;
  321. void __weak arch_disable_nonboot_cpus_begin(void)
  322. {
  323. }
  324. void __weak arch_disable_nonboot_cpus_end(void)
  325. {
  326. }
  327. int disable_nonboot_cpus(void)
  328. {
  329. int cpu, first_cpu, error = 0;
  330. cpu_maps_update_begin();
  331. first_cpu = cpumask_first(cpu_online_mask);
  332. /*
  333. * We take down all of the non-boot CPUs in one shot to avoid races
  334. * with the userspace trying to use the CPU hotplug at the same time
  335. */
  336. cpumask_clear(frozen_cpus);
  337. arch_disable_nonboot_cpus_begin();
  338. printk("Disabling non-boot CPUs ...\n");
  339. for_each_online_cpu(cpu) {
  340. if (cpu == first_cpu)
  341. continue;
  342. error = _cpu_down(cpu, 1);
  343. if (!error)
  344. cpumask_set_cpu(cpu, frozen_cpus);
  345. else {
  346. printk(KERN_ERR "Error taking CPU%d down: %d\n",
  347. cpu, error);
  348. break;
  349. }
  350. }
  351. arch_disable_nonboot_cpus_end();
  352. if (!error) {
  353. BUG_ON(num_online_cpus() > 1);
  354. /* Make sure the CPUs won't be enabled by someone else */
  355. cpu_hotplug_disabled = 1;
  356. } else {
  357. printk(KERN_ERR "Non-boot CPUs are not disabled\n");
  358. }
  359. cpu_maps_update_done();
  360. return error;
  361. }
  362. void __weak arch_enable_nonboot_cpus_begin(void)
  363. {
  364. }
  365. void __weak arch_enable_nonboot_cpus_end(void)
  366. {
  367. }
  368. void __ref enable_nonboot_cpus(void)
  369. {
  370. int cpu, error;
  371. /* Allow everyone to use the CPU hotplug again */
  372. cpu_maps_update_begin();
  373. cpu_hotplug_disabled = 0;
  374. if (cpumask_empty(frozen_cpus))
  375. goto out;
  376. printk(KERN_INFO "Enabling non-boot CPUs ...\n");
  377. arch_enable_nonboot_cpus_begin();
  378. for_each_cpu(cpu, frozen_cpus) {
  379. error = _cpu_up(cpu, 1);
  380. if (!error) {
  381. printk(KERN_INFO "CPU%d is up\n", cpu);
  382. continue;
  383. }
  384. printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
  385. }
  386. arch_enable_nonboot_cpus_end();
  387. cpumask_clear(frozen_cpus);
  388. out:
  389. cpu_maps_update_done();
  390. }
  391. static int alloc_frozen_cpus(void)
  392. {
  393. if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
  394. return -ENOMEM;
  395. return 0;
  396. }
  397. core_initcall(alloc_frozen_cpus);
  398. /*
  399. * Prevent regular CPU hotplug from racing with the freezer, by disabling CPU
  400. * hotplug when tasks are about to be frozen. Also, don't allow the freezer
  401. * to continue until any currently running CPU hotplug operation gets
  402. * completed.
  403. * To modify the 'cpu_hotplug_disabled' flag, we need to acquire the
  404. * 'cpu_add_remove_lock'. And this same lock is also taken by the regular
  405. * CPU hotplug path and released only after it is complete. Thus, we
  406. * (and hence the freezer) will block here until any currently running CPU
  407. * hotplug operation gets completed.
  408. */
  409. void cpu_hotplug_disable_before_freeze(void)
  410. {
  411. cpu_maps_update_begin();
  412. cpu_hotplug_disabled = 1;
  413. cpu_maps_update_done();
  414. }
  415. /*
  416. * When tasks have been thawed, re-enable regular CPU hotplug (which had been
  417. * disabled while beginning to freeze tasks).
  418. */
  419. void cpu_hotplug_enable_after_thaw(void)
  420. {
  421. cpu_maps_update_begin();
  422. cpu_hotplug_disabled = 0;
  423. cpu_maps_update_done();
  424. }
  425. /*
  426. * When callbacks for CPU hotplug notifications are being executed, we must
  427. * ensure that the state of the system with respect to the tasks being frozen
  428. * or not, as reported by the notification, remains unchanged *throughout the
  429. * duration* of the execution of the callbacks.
  430. * Hence we need to prevent the freezer from racing with regular CPU hotplug.
  431. *
  432. * This synchronization is implemented by mutually excluding regular CPU
  433. * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
  434. * Hibernate notifications.
  435. */
  436. static int
  437. cpu_hotplug_pm_callback(struct notifier_block *nb,
  438. unsigned long action, void *ptr)
  439. {
  440. switch (action) {
  441. case PM_SUSPEND_PREPARE:
  442. case PM_HIBERNATION_PREPARE:
  443. cpu_hotplug_disable_before_freeze();
  444. break;
  445. case PM_POST_SUSPEND:
  446. case PM_POST_HIBERNATION:
  447. cpu_hotplug_enable_after_thaw();
  448. break;
  449. default:
  450. return NOTIFY_DONE;
  451. }
  452. return NOTIFY_OK;
  453. }
  454. int cpu_hotplug_pm_sync_init(void)
  455. {
  456. pm_notifier(cpu_hotplug_pm_callback, 0);
  457. return 0;
  458. }
  459. core_initcall(cpu_hotplug_pm_sync_init);
  460. #endif /* CONFIG_PM_SLEEP_SMP */
  461. /**
  462. * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
  463. * @cpu: cpu that just started
  464. *
  465. * This function calls the cpu_chain notifiers with CPU_STARTING.
  466. * It must be called by the arch code on the new cpu, before the new cpu
  467. * enables interrupts and before the "boot" cpu returns from __cpu_up().
  468. */
  469. void __cpuinit notify_cpu_starting(unsigned int cpu)
  470. {
  471. unsigned long val = CPU_STARTING;
  472. #ifdef CONFIG_PM_SLEEP_SMP
  473. if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
  474. val = CPU_STARTING_FROZEN;
  475. #endif /* CONFIG_PM_SLEEP_SMP */
  476. cpu_notify(val, (void *)(long)cpu);
  477. }
  478. #endif /* CONFIG_SMP */
  479. /*
  480. * cpu_bit_bitmap[] is a special, "compressed" data structure that
  481. * represents all NR_CPUS bits binary values of 1<<nr.
  482. *
  483. * It is used by cpumask_of() to get a constant address to a CPU
  484. * mask value that has a single bit set only.
  485. */
  486. /* cpu_bit_bitmap[0] is empty - so we can back into it */
  487. #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
  488. #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
  489. #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
  490. #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
  491. const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
  492. MASK_DECLARE_8(0), MASK_DECLARE_8(8),
  493. MASK_DECLARE_8(16), MASK_DECLARE_8(24),
  494. #if BITS_PER_LONG > 32
  495. MASK_DECLARE_8(32), MASK_DECLARE_8(40),
  496. MASK_DECLARE_8(48), MASK_DECLARE_8(56),
  497. #endif
  498. };
  499. EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
  500. const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
  501. EXPORT_SYMBOL(cpu_all_bits);
  502. #ifdef CONFIG_INIT_ALL_POSSIBLE
  503. static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
  504. = CPU_BITS_ALL;
  505. #else
  506. static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
  507. #endif
  508. const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
  509. EXPORT_SYMBOL(cpu_possible_mask);
  510. static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
  511. const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
  512. EXPORT_SYMBOL(cpu_online_mask);
  513. static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
  514. const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
  515. EXPORT_SYMBOL(cpu_present_mask);
  516. static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
  517. const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
  518. EXPORT_SYMBOL(cpu_active_mask);
  519. void set_cpu_possible(unsigned int cpu, bool possible)
  520. {
  521. if (possible)
  522. cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
  523. else
  524. cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
  525. }
  526. void set_cpu_present(unsigned int cpu, bool present)
  527. {
  528. if (present)
  529. cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
  530. else
  531. cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
  532. }
  533. void set_cpu_online(unsigned int cpu, bool online)
  534. {
  535. if (online)
  536. cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
  537. else
  538. cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
  539. }
  540. void set_cpu_active(unsigned int cpu, bool active)
  541. {
  542. if (active)
  543. cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
  544. else
  545. cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
  546. }
  547. void init_cpu_present(const struct cpumask *src)
  548. {
  549. cpumask_copy(to_cpumask(cpu_present_bits), src);
  550. }
  551. void init_cpu_possible(const struct cpumask *src)
  552. {
  553. cpumask_copy(to_cpumask(cpu_possible_bits), src);
  554. }
  555. void init_cpu_online(const struct cpumask *src)
  556. {
  557. cpumask_copy(to_cpumask(cpu_online_bits), src);
  558. }