appldata_base.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675
  1. /*
  2. * arch/s390/appldata/appldata_base.c
  3. *
  4. * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
  5. * Exports appldata_register_ops() and appldata_unregister_ops() for the
  6. * data gathering modules.
  7. *
  8. * Copyright IBM Corp. 2003, 2009
  9. *
  10. * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
  11. */
  12. #define KMSG_COMPONENT "appldata"
  13. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  14. #include <linux/module.h>
  15. #include <linux/init.h>
  16. #include <linux/slab.h>
  17. #include <linux/errno.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/proc_fs.h>
  20. #include <linux/mm.h>
  21. #include <linux/swap.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/sysctl.h>
  24. #include <linux/notifier.h>
  25. #include <linux/cpu.h>
  26. #include <linux/workqueue.h>
  27. #include <linux/suspend.h>
  28. #include <linux/platform_device.h>
  29. #include <asm/appldata.h>
  30. #include <asm/timer.h>
  31. #include <asm/uaccess.h>
  32. #include <asm/io.h>
  33. #include <asm/smp.h>
  34. #include "appldata.h"
  35. #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for
  36. sampling interval in
  37. milliseconds */
  38. #define TOD_MICRO 0x01000 /* nr. of TOD clock units
  39. for 1 microsecond */
  40. static struct platform_device *appldata_pdev;
  41. /*
  42. * /proc entries (sysctl)
  43. */
  44. static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
  45. static int appldata_timer_handler(ctl_table *ctl, int write,
  46. void __user *buffer, size_t *lenp, loff_t *ppos);
  47. static int appldata_interval_handler(ctl_table *ctl, int write,
  48. void __user *buffer,
  49. size_t *lenp, loff_t *ppos);
  50. static struct ctl_table_header *appldata_sysctl_header;
  51. static struct ctl_table appldata_table[] = {
  52. {
  53. .procname = "timer",
  54. .mode = S_IRUGO | S_IWUSR,
  55. .proc_handler = &appldata_timer_handler,
  56. },
  57. {
  58. .procname = "interval",
  59. .mode = S_IRUGO | S_IWUSR,
  60. .proc_handler = &appldata_interval_handler,
  61. },
  62. { },
  63. };
  64. static struct ctl_table appldata_dir_table[] = {
  65. {
  66. .procname = appldata_proc_name,
  67. .maxlen = 0,
  68. .mode = S_IRUGO | S_IXUGO,
  69. .child = appldata_table,
  70. },
  71. { },
  72. };
  73. /*
  74. * Timer
  75. */
  76. static DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
  77. static atomic_t appldata_expire_count = ATOMIC_INIT(0);
  78. static DEFINE_SPINLOCK(appldata_timer_lock);
  79. static int appldata_interval = APPLDATA_CPU_INTERVAL;
  80. static int appldata_timer_active;
  81. static int appldata_timer_suspended = 0;
  82. /*
  83. * Work queue
  84. */
  85. static struct workqueue_struct *appldata_wq;
  86. static void appldata_work_fn(struct work_struct *work);
  87. static DECLARE_WORK(appldata_work, appldata_work_fn);
  88. /*
  89. * Ops list
  90. */
  91. static DEFINE_MUTEX(appldata_ops_mutex);
  92. static LIST_HEAD(appldata_ops_list);
  93. /*************************** timer, work, DIAG *******************************/
  94. /*
  95. * appldata_timer_function()
  96. *
  97. * schedule work and reschedule timer
  98. */
  99. static void appldata_timer_function(unsigned long data)
  100. {
  101. if (atomic_dec_and_test(&appldata_expire_count)) {
  102. atomic_set(&appldata_expire_count, num_online_cpus());
  103. queue_work(appldata_wq, (struct work_struct *) data);
  104. }
  105. }
  106. /*
  107. * appldata_work_fn()
  108. *
  109. * call data gathering function for each (active) module
  110. */
  111. static void appldata_work_fn(struct work_struct *work)
  112. {
  113. struct list_head *lh;
  114. struct appldata_ops *ops;
  115. int i;
  116. i = 0;
  117. get_online_cpus();
  118. mutex_lock(&appldata_ops_mutex);
  119. list_for_each(lh, &appldata_ops_list) {
  120. ops = list_entry(lh, struct appldata_ops, list);
  121. if (ops->active == 1) {
  122. ops->callback(ops->data);
  123. }
  124. }
  125. mutex_unlock(&appldata_ops_mutex);
  126. put_online_cpus();
  127. }
  128. /*
  129. * appldata_diag()
  130. *
  131. * prepare parameter list, issue DIAG 0xDC
  132. */
  133. int appldata_diag(char record_nr, u16 function, unsigned long buffer,
  134. u16 length, char *mod_lvl)
  135. {
  136. struct appldata_product_id id = {
  137. .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
  138. 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
  139. .prod_fn = 0xD5D3, /* "NL" */
  140. .version_nr = 0xF2F6, /* "26" */
  141. .release_nr = 0xF0F1, /* "01" */
  142. };
  143. id.record_nr = record_nr;
  144. id.mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1];
  145. return appldata_asm(&id, function, (void *) buffer, length);
  146. }
  147. /************************ timer, work, DIAG <END> ****************************/
  148. /****************************** /proc stuff **********************************/
  149. /*
  150. * appldata_mod_vtimer_wrap()
  151. *
  152. * wrapper function for mod_virt_timer(), because smp_call_function_single()
  153. * accepts only one parameter.
  154. */
  155. static void __appldata_mod_vtimer_wrap(void *p) {
  156. struct {
  157. struct vtimer_list *timer;
  158. u64 expires;
  159. } *args = p;
  160. mod_virt_timer_periodic(args->timer, args->expires);
  161. }
  162. #define APPLDATA_ADD_TIMER 0
  163. #define APPLDATA_DEL_TIMER 1
  164. #define APPLDATA_MOD_TIMER 2
  165. /*
  166. * __appldata_vtimer_setup()
  167. *
  168. * Add, delete or modify virtual timers on all online cpus.
  169. * The caller needs to get the appldata_timer_lock spinlock.
  170. */
  171. static void
  172. __appldata_vtimer_setup(int cmd)
  173. {
  174. u64 per_cpu_interval;
  175. int i;
  176. switch (cmd) {
  177. case APPLDATA_ADD_TIMER:
  178. if (appldata_timer_active)
  179. break;
  180. per_cpu_interval = (u64) (appldata_interval*1000 /
  181. num_online_cpus()) * TOD_MICRO;
  182. for_each_online_cpu(i) {
  183. per_cpu(appldata_timer, i).expires = per_cpu_interval;
  184. smp_call_function_single(i, add_virt_timer_periodic,
  185. &per_cpu(appldata_timer, i),
  186. 1);
  187. }
  188. appldata_timer_active = 1;
  189. break;
  190. case APPLDATA_DEL_TIMER:
  191. for_each_online_cpu(i)
  192. del_virt_timer(&per_cpu(appldata_timer, i));
  193. if (!appldata_timer_active)
  194. break;
  195. appldata_timer_active = 0;
  196. atomic_set(&appldata_expire_count, num_online_cpus());
  197. break;
  198. case APPLDATA_MOD_TIMER:
  199. per_cpu_interval = (u64) (appldata_interval*1000 /
  200. num_online_cpus()) * TOD_MICRO;
  201. if (!appldata_timer_active)
  202. break;
  203. for_each_online_cpu(i) {
  204. struct {
  205. struct vtimer_list *timer;
  206. u64 expires;
  207. } args;
  208. args.timer = &per_cpu(appldata_timer, i);
  209. args.expires = per_cpu_interval;
  210. smp_call_function_single(i, __appldata_mod_vtimer_wrap,
  211. &args, 1);
  212. }
  213. }
  214. }
  215. /*
  216. * appldata_timer_handler()
  217. *
  218. * Start/Stop timer, show status of timer (0 = not active, 1 = active)
  219. */
  220. static int
  221. appldata_timer_handler(ctl_table *ctl, int write,
  222. void __user *buffer, size_t *lenp, loff_t *ppos)
  223. {
  224. int len;
  225. char buf[2];
  226. if (!*lenp || *ppos) {
  227. *lenp = 0;
  228. return 0;
  229. }
  230. if (!write) {
  231. len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n");
  232. if (len > *lenp)
  233. len = *lenp;
  234. if (copy_to_user(buffer, buf, len))
  235. return -EFAULT;
  236. goto out;
  237. }
  238. len = *lenp;
  239. if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
  240. return -EFAULT;
  241. get_online_cpus();
  242. spin_lock(&appldata_timer_lock);
  243. if (buf[0] == '1')
  244. __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
  245. else if (buf[0] == '0')
  246. __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
  247. spin_unlock(&appldata_timer_lock);
  248. put_online_cpus();
  249. out:
  250. *lenp = len;
  251. *ppos += len;
  252. return 0;
  253. }
  254. /*
  255. * appldata_interval_handler()
  256. *
  257. * Set (CPU) timer interval for collection of data (in milliseconds), show
  258. * current timer interval.
  259. */
  260. static int
  261. appldata_interval_handler(ctl_table *ctl, int write,
  262. void __user *buffer, size_t *lenp, loff_t *ppos)
  263. {
  264. int len, interval;
  265. char buf[16];
  266. if (!*lenp || *ppos) {
  267. *lenp = 0;
  268. return 0;
  269. }
  270. if (!write) {
  271. len = sprintf(buf, "%i\n", appldata_interval);
  272. if (len > *lenp)
  273. len = *lenp;
  274. if (copy_to_user(buffer, buf, len))
  275. return -EFAULT;
  276. goto out;
  277. }
  278. len = *lenp;
  279. if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) {
  280. return -EFAULT;
  281. }
  282. interval = 0;
  283. sscanf(buf, "%i", &interval);
  284. if (interval <= 0)
  285. return -EINVAL;
  286. get_online_cpus();
  287. spin_lock(&appldata_timer_lock);
  288. appldata_interval = interval;
  289. __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
  290. spin_unlock(&appldata_timer_lock);
  291. put_online_cpus();
  292. out:
  293. *lenp = len;
  294. *ppos += len;
  295. return 0;
  296. }
  297. /*
  298. * appldata_generic_handler()
  299. *
  300. * Generic start/stop monitoring and DIAG, show status of
  301. * monitoring (0 = not in process, 1 = in process)
  302. */
  303. static int
  304. appldata_generic_handler(ctl_table *ctl, int write,
  305. void __user *buffer, size_t *lenp, loff_t *ppos)
  306. {
  307. struct appldata_ops *ops = NULL, *tmp_ops;
  308. int rc, len, found;
  309. char buf[2];
  310. struct list_head *lh;
  311. found = 0;
  312. mutex_lock(&appldata_ops_mutex);
  313. list_for_each(lh, &appldata_ops_list) {
  314. tmp_ops = list_entry(lh, struct appldata_ops, list);
  315. if (&tmp_ops->ctl_table[2] == ctl) {
  316. found = 1;
  317. }
  318. }
  319. if (!found) {
  320. mutex_unlock(&appldata_ops_mutex);
  321. return -ENODEV;
  322. }
  323. ops = ctl->data;
  324. if (!try_module_get(ops->owner)) { // protect this function
  325. mutex_unlock(&appldata_ops_mutex);
  326. return -ENODEV;
  327. }
  328. mutex_unlock(&appldata_ops_mutex);
  329. if (!*lenp || *ppos) {
  330. *lenp = 0;
  331. module_put(ops->owner);
  332. return 0;
  333. }
  334. if (!write) {
  335. len = sprintf(buf, ops->active ? "1\n" : "0\n");
  336. if (len > *lenp)
  337. len = *lenp;
  338. if (copy_to_user(buffer, buf, len)) {
  339. module_put(ops->owner);
  340. return -EFAULT;
  341. }
  342. goto out;
  343. }
  344. len = *lenp;
  345. if (copy_from_user(buf, buffer,
  346. len > sizeof(buf) ? sizeof(buf) : len)) {
  347. module_put(ops->owner);
  348. return -EFAULT;
  349. }
  350. mutex_lock(&appldata_ops_mutex);
  351. if ((buf[0] == '1') && (ops->active == 0)) {
  352. // protect work queue callback
  353. if (!try_module_get(ops->owner)) {
  354. mutex_unlock(&appldata_ops_mutex);
  355. module_put(ops->owner);
  356. return -ENODEV;
  357. }
  358. ops->callback(ops->data); // init record
  359. rc = appldata_diag(ops->record_nr,
  360. APPLDATA_START_INTERVAL_REC,
  361. (unsigned long) ops->data, ops->size,
  362. ops->mod_lvl);
  363. if (rc != 0) {
  364. pr_err("Starting the data collection for %s "
  365. "failed with rc=%d\n", ops->name, rc);
  366. module_put(ops->owner);
  367. } else
  368. ops->active = 1;
  369. } else if ((buf[0] == '0') && (ops->active == 1)) {
  370. ops->active = 0;
  371. rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
  372. (unsigned long) ops->data, ops->size,
  373. ops->mod_lvl);
  374. if (rc != 0)
  375. pr_err("Stopping the data collection for %s "
  376. "failed with rc=%d\n", ops->name, rc);
  377. module_put(ops->owner);
  378. }
  379. mutex_unlock(&appldata_ops_mutex);
  380. out:
  381. *lenp = len;
  382. *ppos += len;
  383. module_put(ops->owner);
  384. return 0;
  385. }
  386. /*************************** /proc stuff <END> *******************************/
  387. /************************* module-ops management *****************************/
  388. /*
  389. * appldata_register_ops()
  390. *
  391. * update ops list, register /proc/sys entries
  392. */
  393. int appldata_register_ops(struct appldata_ops *ops)
  394. {
  395. if (ops->size > APPLDATA_MAX_REC_SIZE)
  396. return -EINVAL;
  397. ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL);
  398. if (!ops->ctl_table)
  399. return -ENOMEM;
  400. mutex_lock(&appldata_ops_mutex);
  401. list_add(&ops->list, &appldata_ops_list);
  402. mutex_unlock(&appldata_ops_mutex);
  403. ops->ctl_table[0].procname = appldata_proc_name;
  404. ops->ctl_table[0].maxlen = 0;
  405. ops->ctl_table[0].mode = S_IRUGO | S_IXUGO;
  406. ops->ctl_table[0].child = &ops->ctl_table[2];
  407. ops->ctl_table[2].procname = ops->name;
  408. ops->ctl_table[2].mode = S_IRUGO | S_IWUSR;
  409. ops->ctl_table[2].proc_handler = appldata_generic_handler;
  410. ops->ctl_table[2].data = ops;
  411. ops->sysctl_header = register_sysctl_table(ops->ctl_table);
  412. if (!ops->sysctl_header)
  413. goto out;
  414. return 0;
  415. out:
  416. mutex_lock(&appldata_ops_mutex);
  417. list_del(&ops->list);
  418. mutex_unlock(&appldata_ops_mutex);
  419. kfree(ops->ctl_table);
  420. return -ENOMEM;
  421. }
  422. /*
  423. * appldata_unregister_ops()
  424. *
  425. * update ops list, unregister /proc entries, stop DIAG if necessary
  426. */
  427. void appldata_unregister_ops(struct appldata_ops *ops)
  428. {
  429. mutex_lock(&appldata_ops_mutex);
  430. list_del(&ops->list);
  431. mutex_unlock(&appldata_ops_mutex);
  432. unregister_sysctl_table(ops->sysctl_header);
  433. kfree(ops->ctl_table);
  434. }
  435. /********************** module-ops management <END> **************************/
  436. /**************************** suspend / resume *******************************/
  437. static int appldata_freeze(struct device *dev)
  438. {
  439. struct appldata_ops *ops;
  440. int rc;
  441. struct list_head *lh;
  442. get_online_cpus();
  443. spin_lock(&appldata_timer_lock);
  444. if (appldata_timer_active) {
  445. __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
  446. appldata_timer_suspended = 1;
  447. }
  448. spin_unlock(&appldata_timer_lock);
  449. put_online_cpus();
  450. mutex_lock(&appldata_ops_mutex);
  451. list_for_each(lh, &appldata_ops_list) {
  452. ops = list_entry(lh, struct appldata_ops, list);
  453. if (ops->active == 1) {
  454. rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
  455. (unsigned long) ops->data, ops->size,
  456. ops->mod_lvl);
  457. if (rc != 0)
  458. pr_err("Stopping the data collection for %s "
  459. "failed with rc=%d\n", ops->name, rc);
  460. }
  461. }
  462. mutex_unlock(&appldata_ops_mutex);
  463. return 0;
  464. }
  465. static int appldata_restore(struct device *dev)
  466. {
  467. struct appldata_ops *ops;
  468. int rc;
  469. struct list_head *lh;
  470. get_online_cpus();
  471. spin_lock(&appldata_timer_lock);
  472. if (appldata_timer_suspended) {
  473. __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
  474. appldata_timer_suspended = 0;
  475. }
  476. spin_unlock(&appldata_timer_lock);
  477. put_online_cpus();
  478. mutex_lock(&appldata_ops_mutex);
  479. list_for_each(lh, &appldata_ops_list) {
  480. ops = list_entry(lh, struct appldata_ops, list);
  481. if (ops->active == 1) {
  482. ops->callback(ops->data); // init record
  483. rc = appldata_diag(ops->record_nr,
  484. APPLDATA_START_INTERVAL_REC,
  485. (unsigned long) ops->data, ops->size,
  486. ops->mod_lvl);
  487. if (rc != 0) {
  488. pr_err("Starting the data collection for %s "
  489. "failed with rc=%d\n", ops->name, rc);
  490. }
  491. }
  492. }
  493. mutex_unlock(&appldata_ops_mutex);
  494. return 0;
  495. }
  496. static int appldata_thaw(struct device *dev)
  497. {
  498. return appldata_restore(dev);
  499. }
  500. static struct dev_pm_ops appldata_pm_ops = {
  501. .freeze = appldata_freeze,
  502. .thaw = appldata_thaw,
  503. .restore = appldata_restore,
  504. };
  505. static struct platform_driver appldata_pdrv = {
  506. .driver = {
  507. .name = "appldata",
  508. .owner = THIS_MODULE,
  509. .pm = &appldata_pm_ops,
  510. },
  511. };
  512. /************************* suspend / resume <END> ****************************/
  513. /******************************* init / exit *********************************/
  514. static void __cpuinit appldata_online_cpu(int cpu)
  515. {
  516. init_virt_timer(&per_cpu(appldata_timer, cpu));
  517. per_cpu(appldata_timer, cpu).function = appldata_timer_function;
  518. per_cpu(appldata_timer, cpu).data = (unsigned long)
  519. &appldata_work;
  520. atomic_inc(&appldata_expire_count);
  521. spin_lock(&appldata_timer_lock);
  522. __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
  523. spin_unlock(&appldata_timer_lock);
  524. }
  525. static void __cpuinit appldata_offline_cpu(int cpu)
  526. {
  527. del_virt_timer(&per_cpu(appldata_timer, cpu));
  528. if (atomic_dec_and_test(&appldata_expire_count)) {
  529. atomic_set(&appldata_expire_count, num_online_cpus());
  530. queue_work(appldata_wq, &appldata_work);
  531. }
  532. spin_lock(&appldata_timer_lock);
  533. __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
  534. spin_unlock(&appldata_timer_lock);
  535. }
  536. static int __cpuinit appldata_cpu_notify(struct notifier_block *self,
  537. unsigned long action,
  538. void *hcpu)
  539. {
  540. switch (action) {
  541. case CPU_ONLINE:
  542. case CPU_ONLINE_FROZEN:
  543. appldata_online_cpu((long) hcpu);
  544. break;
  545. case CPU_DEAD:
  546. case CPU_DEAD_FROZEN:
  547. appldata_offline_cpu((long) hcpu);
  548. break;
  549. default:
  550. break;
  551. }
  552. return NOTIFY_OK;
  553. }
  554. static struct notifier_block __cpuinitdata appldata_nb = {
  555. .notifier_call = appldata_cpu_notify,
  556. };
  557. /*
  558. * appldata_init()
  559. *
  560. * init timer, register /proc entries
  561. */
  562. static int __init appldata_init(void)
  563. {
  564. int i, rc;
  565. rc = platform_driver_register(&appldata_pdrv);
  566. if (rc)
  567. return rc;
  568. appldata_pdev = platform_device_register_simple("appldata", -1, NULL,
  569. 0);
  570. if (IS_ERR(appldata_pdev)) {
  571. rc = PTR_ERR(appldata_pdev);
  572. goto out_driver;
  573. }
  574. appldata_wq = create_singlethread_workqueue("appldata");
  575. if (!appldata_wq) {
  576. rc = -ENOMEM;
  577. goto out_device;
  578. }
  579. get_online_cpus();
  580. for_each_online_cpu(i)
  581. appldata_online_cpu(i);
  582. put_online_cpus();
  583. /* Register cpu hotplug notifier */
  584. register_hotcpu_notifier(&appldata_nb);
  585. appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
  586. return 0;
  587. out_device:
  588. platform_device_unregister(appldata_pdev);
  589. out_driver:
  590. platform_driver_unregister(&appldata_pdrv);
  591. return rc;
  592. }
  593. __initcall(appldata_init);
  594. /**************************** init / exit <END> ******************************/
  595. EXPORT_SYMBOL_GPL(appldata_register_ops);
  596. EXPORT_SYMBOL_GPL(appldata_unregister_ops);
  597. EXPORT_SYMBOL_GPL(appldata_diag);
  598. #ifdef CONFIG_SWAP
  599. EXPORT_SYMBOL_GPL(si_swapinfo);
  600. #endif
  601. EXPORT_SYMBOL_GPL(nr_threads);
  602. EXPORT_SYMBOL_GPL(nr_running);
  603. EXPORT_SYMBOL_GPL(nr_iowait);