appldata_base.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676
  1. /*
  2. * arch/s390/appldata/appldata_base.c
  3. *
  4. * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
  5. * Exports appldata_register_ops() and appldata_unregister_ops() for the
  6. * data gathering modules.
  7. *
  8. * Copyright IBM Corp. 2003, 2009
  9. *
  10. * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
  11. */
  12. #define KMSG_COMPONENT "appldata"
  13. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  14. #include <linux/module.h>
  15. #include <linux/init.h>
  16. #include <linux/slab.h>
  17. #include <linux/errno.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/proc_fs.h>
  20. #include <linux/mm.h>
  21. #include <linux/swap.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/sysctl.h>
  24. #include <linux/notifier.h>
  25. #include <linux/cpu.h>
  26. #include <linux/workqueue.h>
  27. #include <linux/suspend.h>
  28. #include <linux/platform_device.h>
  29. #include <asm/appldata.h>
  30. #include <asm/timer.h>
  31. #include <asm/uaccess.h>
  32. #include <asm/io.h>
  33. #include <asm/smp.h>
  34. #include "appldata.h"
  35. #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for
  36. sampling interval in
  37. milliseconds */
  38. #define TOD_MICRO 0x01000 /* nr. of TOD clock units
  39. for 1 microsecond */
  40. static struct platform_device *appldata_pdev;
  41. /*
  42. * /proc entries (sysctl)
  43. */
  44. static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
  45. static int appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
  46. void __user *buffer, size_t *lenp, loff_t *ppos);
  47. static int appldata_interval_handler(ctl_table *ctl, int write,
  48. struct file *filp,
  49. void __user *buffer,
  50. size_t *lenp, loff_t *ppos);
  51. static struct ctl_table_header *appldata_sysctl_header;
  52. static struct ctl_table appldata_table[] = {
  53. {
  54. .procname = "timer",
  55. .mode = S_IRUGO | S_IWUSR,
  56. .proc_handler = &appldata_timer_handler,
  57. },
  58. {
  59. .procname = "interval",
  60. .mode = S_IRUGO | S_IWUSR,
  61. .proc_handler = &appldata_interval_handler,
  62. },
  63. { },
  64. };
  65. static struct ctl_table appldata_dir_table[] = {
  66. {
  67. .procname = appldata_proc_name,
  68. .maxlen = 0,
  69. .mode = S_IRUGO | S_IXUGO,
  70. .child = appldata_table,
  71. },
  72. { },
  73. };
  74. /*
  75. * Timer
  76. */
  77. static DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
  78. static atomic_t appldata_expire_count = ATOMIC_INIT(0);
  79. static DEFINE_SPINLOCK(appldata_timer_lock);
  80. static int appldata_interval = APPLDATA_CPU_INTERVAL;
  81. static int appldata_timer_active;
  82. static int appldata_timer_suspended = 0;
  83. /*
  84. * Work queue
  85. */
  86. static struct workqueue_struct *appldata_wq;
  87. static void appldata_work_fn(struct work_struct *work);
  88. static DECLARE_WORK(appldata_work, appldata_work_fn);
  89. /*
  90. * Ops list
  91. */
  92. static DEFINE_MUTEX(appldata_ops_mutex);
  93. static LIST_HEAD(appldata_ops_list);
  94. /*************************** timer, work, DIAG *******************************/
  95. /*
  96. * appldata_timer_function()
  97. *
  98. * schedule work and reschedule timer
  99. */
  100. static void appldata_timer_function(unsigned long data)
  101. {
  102. if (atomic_dec_and_test(&appldata_expire_count)) {
  103. atomic_set(&appldata_expire_count, num_online_cpus());
  104. queue_work(appldata_wq, (struct work_struct *) data);
  105. }
  106. }
  107. /*
  108. * appldata_work_fn()
  109. *
  110. * call data gathering function for each (active) module
  111. */
  112. static void appldata_work_fn(struct work_struct *work)
  113. {
  114. struct list_head *lh;
  115. struct appldata_ops *ops;
  116. int i;
  117. i = 0;
  118. get_online_cpus();
  119. mutex_lock(&appldata_ops_mutex);
  120. list_for_each(lh, &appldata_ops_list) {
  121. ops = list_entry(lh, struct appldata_ops, list);
  122. if (ops->active == 1) {
  123. ops->callback(ops->data);
  124. }
  125. }
  126. mutex_unlock(&appldata_ops_mutex);
  127. put_online_cpus();
  128. }
  129. /*
  130. * appldata_diag()
  131. *
  132. * prepare parameter list, issue DIAG 0xDC
  133. */
  134. int appldata_diag(char record_nr, u16 function, unsigned long buffer,
  135. u16 length, char *mod_lvl)
  136. {
  137. struct appldata_product_id id = {
  138. .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
  139. 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
  140. .prod_fn = 0xD5D3, /* "NL" */
  141. .version_nr = 0xF2F6, /* "26" */
  142. .release_nr = 0xF0F1, /* "01" */
  143. };
  144. id.record_nr = record_nr;
  145. id.mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1];
  146. return appldata_asm(&id, function, (void *) buffer, length);
  147. }
  148. /************************ timer, work, DIAG <END> ****************************/
  149. /****************************** /proc stuff **********************************/
  150. /*
  151. * appldata_mod_vtimer_wrap()
  152. *
  153. * wrapper function for mod_virt_timer(), because smp_call_function_single()
  154. * accepts only one parameter.
  155. */
  156. static void __appldata_mod_vtimer_wrap(void *p) {
  157. struct {
  158. struct vtimer_list *timer;
  159. u64 expires;
  160. } *args = p;
  161. mod_virt_timer_periodic(args->timer, args->expires);
  162. }
  163. #define APPLDATA_ADD_TIMER 0
  164. #define APPLDATA_DEL_TIMER 1
  165. #define APPLDATA_MOD_TIMER 2
  166. /*
  167. * __appldata_vtimer_setup()
  168. *
  169. * Add, delete or modify virtual timers on all online cpus.
  170. * The caller needs to get the appldata_timer_lock spinlock.
  171. */
  172. static void
  173. __appldata_vtimer_setup(int cmd)
  174. {
  175. u64 per_cpu_interval;
  176. int i;
  177. switch (cmd) {
  178. case APPLDATA_ADD_TIMER:
  179. if (appldata_timer_active)
  180. break;
  181. per_cpu_interval = (u64) (appldata_interval*1000 /
  182. num_online_cpus()) * TOD_MICRO;
  183. for_each_online_cpu(i) {
  184. per_cpu(appldata_timer, i).expires = per_cpu_interval;
  185. smp_call_function_single(i, add_virt_timer_periodic,
  186. &per_cpu(appldata_timer, i),
  187. 1);
  188. }
  189. appldata_timer_active = 1;
  190. break;
  191. case APPLDATA_DEL_TIMER:
  192. for_each_online_cpu(i)
  193. del_virt_timer(&per_cpu(appldata_timer, i));
  194. if (!appldata_timer_active)
  195. break;
  196. appldata_timer_active = 0;
  197. atomic_set(&appldata_expire_count, num_online_cpus());
  198. break;
  199. case APPLDATA_MOD_TIMER:
  200. per_cpu_interval = (u64) (appldata_interval*1000 /
  201. num_online_cpus()) * TOD_MICRO;
  202. if (!appldata_timer_active)
  203. break;
  204. for_each_online_cpu(i) {
  205. struct {
  206. struct vtimer_list *timer;
  207. u64 expires;
  208. } args;
  209. args.timer = &per_cpu(appldata_timer, i);
  210. args.expires = per_cpu_interval;
  211. smp_call_function_single(i, __appldata_mod_vtimer_wrap,
  212. &args, 1);
  213. }
  214. }
  215. }
  216. /*
  217. * appldata_timer_handler()
  218. *
  219. * Start/Stop timer, show status of timer (0 = not active, 1 = active)
  220. */
  221. static int
  222. appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
  223. void __user *buffer, size_t *lenp, loff_t *ppos)
  224. {
  225. int len;
  226. char buf[2];
  227. if (!*lenp || *ppos) {
  228. *lenp = 0;
  229. return 0;
  230. }
  231. if (!write) {
  232. len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n");
  233. if (len > *lenp)
  234. len = *lenp;
  235. if (copy_to_user(buffer, buf, len))
  236. return -EFAULT;
  237. goto out;
  238. }
  239. len = *lenp;
  240. if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
  241. return -EFAULT;
  242. get_online_cpus();
  243. spin_lock(&appldata_timer_lock);
  244. if (buf[0] == '1')
  245. __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
  246. else if (buf[0] == '0')
  247. __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
  248. spin_unlock(&appldata_timer_lock);
  249. put_online_cpus();
  250. out:
  251. *lenp = len;
  252. *ppos += len;
  253. return 0;
  254. }
  255. /*
  256. * appldata_interval_handler()
  257. *
  258. * Set (CPU) timer interval for collection of data (in milliseconds), show
  259. * current timer interval.
  260. */
  261. static int
  262. appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
  263. void __user *buffer, size_t *lenp, loff_t *ppos)
  264. {
  265. int len, interval;
  266. char buf[16];
  267. if (!*lenp || *ppos) {
  268. *lenp = 0;
  269. return 0;
  270. }
  271. if (!write) {
  272. len = sprintf(buf, "%i\n", appldata_interval);
  273. if (len > *lenp)
  274. len = *lenp;
  275. if (copy_to_user(buffer, buf, len))
  276. return -EFAULT;
  277. goto out;
  278. }
  279. len = *lenp;
  280. if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) {
  281. return -EFAULT;
  282. }
  283. interval = 0;
  284. sscanf(buf, "%i", &interval);
  285. if (interval <= 0)
  286. return -EINVAL;
  287. get_online_cpus();
  288. spin_lock(&appldata_timer_lock);
  289. appldata_interval = interval;
  290. __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
  291. spin_unlock(&appldata_timer_lock);
  292. put_online_cpus();
  293. out:
  294. *lenp = len;
  295. *ppos += len;
  296. return 0;
  297. }
  298. /*
  299. * appldata_generic_handler()
  300. *
  301. * Generic start/stop monitoring and DIAG, show status of
  302. * monitoring (0 = not in process, 1 = in process)
  303. */
  304. static int
  305. appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
  306. void __user *buffer, size_t *lenp, loff_t *ppos)
  307. {
  308. struct appldata_ops *ops = NULL, *tmp_ops;
  309. int rc, len, found;
  310. char buf[2];
  311. struct list_head *lh;
  312. found = 0;
  313. mutex_lock(&appldata_ops_mutex);
  314. list_for_each(lh, &appldata_ops_list) {
  315. tmp_ops = list_entry(lh, struct appldata_ops, list);
  316. if (&tmp_ops->ctl_table[2] == ctl) {
  317. found = 1;
  318. }
  319. }
  320. if (!found) {
  321. mutex_unlock(&appldata_ops_mutex);
  322. return -ENODEV;
  323. }
  324. ops = ctl->data;
  325. if (!try_module_get(ops->owner)) { // protect this function
  326. mutex_unlock(&appldata_ops_mutex);
  327. return -ENODEV;
  328. }
  329. mutex_unlock(&appldata_ops_mutex);
  330. if (!*lenp || *ppos) {
  331. *lenp = 0;
  332. module_put(ops->owner);
  333. return 0;
  334. }
  335. if (!write) {
  336. len = sprintf(buf, ops->active ? "1\n" : "0\n");
  337. if (len > *lenp)
  338. len = *lenp;
  339. if (copy_to_user(buffer, buf, len)) {
  340. module_put(ops->owner);
  341. return -EFAULT;
  342. }
  343. goto out;
  344. }
  345. len = *lenp;
  346. if (copy_from_user(buf, buffer,
  347. len > sizeof(buf) ? sizeof(buf) : len)) {
  348. module_put(ops->owner);
  349. return -EFAULT;
  350. }
  351. mutex_lock(&appldata_ops_mutex);
  352. if ((buf[0] == '1') && (ops->active == 0)) {
  353. // protect work queue callback
  354. if (!try_module_get(ops->owner)) {
  355. mutex_unlock(&appldata_ops_mutex);
  356. module_put(ops->owner);
  357. return -ENODEV;
  358. }
  359. ops->callback(ops->data); // init record
  360. rc = appldata_diag(ops->record_nr,
  361. APPLDATA_START_INTERVAL_REC,
  362. (unsigned long) ops->data, ops->size,
  363. ops->mod_lvl);
  364. if (rc != 0) {
  365. pr_err("Starting the data collection for %s "
  366. "failed with rc=%d\n", ops->name, rc);
  367. module_put(ops->owner);
  368. } else
  369. ops->active = 1;
  370. } else if ((buf[0] == '0') && (ops->active == 1)) {
  371. ops->active = 0;
  372. rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
  373. (unsigned long) ops->data, ops->size,
  374. ops->mod_lvl);
  375. if (rc != 0)
  376. pr_err("Stopping the data collection for %s "
  377. "failed with rc=%d\n", ops->name, rc);
  378. module_put(ops->owner);
  379. }
  380. mutex_unlock(&appldata_ops_mutex);
  381. out:
  382. *lenp = len;
  383. *ppos += len;
  384. module_put(ops->owner);
  385. return 0;
  386. }
  387. /*************************** /proc stuff <END> *******************************/
  388. /************************* module-ops management *****************************/
  389. /*
  390. * appldata_register_ops()
  391. *
  392. * update ops list, register /proc/sys entries
  393. */
  394. int appldata_register_ops(struct appldata_ops *ops)
  395. {
  396. if (ops->size > APPLDATA_MAX_REC_SIZE)
  397. return -EINVAL;
  398. ops->ctl_table = kzalloc(4 * sizeof(struct ctl_table), GFP_KERNEL);
  399. if (!ops->ctl_table)
  400. return -ENOMEM;
  401. mutex_lock(&appldata_ops_mutex);
  402. list_add(&ops->list, &appldata_ops_list);
  403. mutex_unlock(&appldata_ops_mutex);
  404. ops->ctl_table[0].procname = appldata_proc_name;
  405. ops->ctl_table[0].maxlen = 0;
  406. ops->ctl_table[0].mode = S_IRUGO | S_IXUGO;
  407. ops->ctl_table[0].child = &ops->ctl_table[2];
  408. ops->ctl_table[2].procname = ops->name;
  409. ops->ctl_table[2].mode = S_IRUGO | S_IWUSR;
  410. ops->ctl_table[2].proc_handler = appldata_generic_handler;
  411. ops->ctl_table[2].data = ops;
  412. ops->sysctl_header = register_sysctl_table(ops->ctl_table);
  413. if (!ops->sysctl_header)
  414. goto out;
  415. return 0;
  416. out:
  417. mutex_lock(&appldata_ops_mutex);
  418. list_del(&ops->list);
  419. mutex_unlock(&appldata_ops_mutex);
  420. kfree(ops->ctl_table);
  421. return -ENOMEM;
  422. }
  423. /*
  424. * appldata_unregister_ops()
  425. *
  426. * update ops list, unregister /proc entries, stop DIAG if necessary
  427. */
  428. void appldata_unregister_ops(struct appldata_ops *ops)
  429. {
  430. mutex_lock(&appldata_ops_mutex);
  431. list_del(&ops->list);
  432. mutex_unlock(&appldata_ops_mutex);
  433. unregister_sysctl_table(ops->sysctl_header);
  434. kfree(ops->ctl_table);
  435. }
  436. /********************** module-ops management <END> **************************/
  437. /**************************** suspend / resume *******************************/
  438. static int appldata_freeze(struct device *dev)
  439. {
  440. struct appldata_ops *ops;
  441. int rc;
  442. struct list_head *lh;
  443. get_online_cpus();
  444. spin_lock(&appldata_timer_lock);
  445. if (appldata_timer_active) {
  446. __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
  447. appldata_timer_suspended = 1;
  448. }
  449. spin_unlock(&appldata_timer_lock);
  450. put_online_cpus();
  451. mutex_lock(&appldata_ops_mutex);
  452. list_for_each(lh, &appldata_ops_list) {
  453. ops = list_entry(lh, struct appldata_ops, list);
  454. if (ops->active == 1) {
  455. rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
  456. (unsigned long) ops->data, ops->size,
  457. ops->mod_lvl);
  458. if (rc != 0)
  459. pr_err("Stopping the data collection for %s "
  460. "failed with rc=%d\n", ops->name, rc);
  461. }
  462. }
  463. mutex_unlock(&appldata_ops_mutex);
  464. return 0;
  465. }
  466. static int appldata_restore(struct device *dev)
  467. {
  468. struct appldata_ops *ops;
  469. int rc;
  470. struct list_head *lh;
  471. get_online_cpus();
  472. spin_lock(&appldata_timer_lock);
  473. if (appldata_timer_suspended) {
  474. __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
  475. appldata_timer_suspended = 0;
  476. }
  477. spin_unlock(&appldata_timer_lock);
  478. put_online_cpus();
  479. mutex_lock(&appldata_ops_mutex);
  480. list_for_each(lh, &appldata_ops_list) {
  481. ops = list_entry(lh, struct appldata_ops, list);
  482. if (ops->active == 1) {
  483. ops->callback(ops->data); // init record
  484. rc = appldata_diag(ops->record_nr,
  485. APPLDATA_START_INTERVAL_REC,
  486. (unsigned long) ops->data, ops->size,
  487. ops->mod_lvl);
  488. if (rc != 0) {
  489. pr_err("Starting the data collection for %s "
  490. "failed with rc=%d\n", ops->name, rc);
  491. }
  492. }
  493. }
  494. mutex_unlock(&appldata_ops_mutex);
  495. return 0;
  496. }
  497. static int appldata_thaw(struct device *dev)
  498. {
  499. return appldata_restore(dev);
  500. }
  501. static struct dev_pm_ops appldata_pm_ops = {
  502. .freeze = appldata_freeze,
  503. .thaw = appldata_thaw,
  504. .restore = appldata_restore,
  505. };
  506. static struct platform_driver appldata_pdrv = {
  507. .driver = {
  508. .name = "appldata",
  509. .owner = THIS_MODULE,
  510. .pm = &appldata_pm_ops,
  511. },
  512. };
  513. /************************* suspend / resume <END> ****************************/
  514. /******************************* init / exit *********************************/
  515. static void __cpuinit appldata_online_cpu(int cpu)
  516. {
  517. init_virt_timer(&per_cpu(appldata_timer, cpu));
  518. per_cpu(appldata_timer, cpu).function = appldata_timer_function;
  519. per_cpu(appldata_timer, cpu).data = (unsigned long)
  520. &appldata_work;
  521. atomic_inc(&appldata_expire_count);
  522. spin_lock(&appldata_timer_lock);
  523. __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
  524. spin_unlock(&appldata_timer_lock);
  525. }
  526. static void __cpuinit appldata_offline_cpu(int cpu)
  527. {
  528. del_virt_timer(&per_cpu(appldata_timer, cpu));
  529. if (atomic_dec_and_test(&appldata_expire_count)) {
  530. atomic_set(&appldata_expire_count, num_online_cpus());
  531. queue_work(appldata_wq, &appldata_work);
  532. }
  533. spin_lock(&appldata_timer_lock);
  534. __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
  535. spin_unlock(&appldata_timer_lock);
  536. }
  537. static int __cpuinit appldata_cpu_notify(struct notifier_block *self,
  538. unsigned long action,
  539. void *hcpu)
  540. {
  541. switch (action) {
  542. case CPU_ONLINE:
  543. case CPU_ONLINE_FROZEN:
  544. appldata_online_cpu((long) hcpu);
  545. break;
  546. case CPU_DEAD:
  547. case CPU_DEAD_FROZEN:
  548. appldata_offline_cpu((long) hcpu);
  549. break;
  550. default:
  551. break;
  552. }
  553. return NOTIFY_OK;
  554. }
  555. static struct notifier_block __cpuinitdata appldata_nb = {
  556. .notifier_call = appldata_cpu_notify,
  557. };
  558. /*
  559. * appldata_init()
  560. *
  561. * init timer, register /proc entries
  562. */
  563. static int __init appldata_init(void)
  564. {
  565. int i, rc;
  566. rc = platform_driver_register(&appldata_pdrv);
  567. if (rc)
  568. return rc;
  569. appldata_pdev = platform_device_register_simple("appldata", -1, NULL,
  570. 0);
  571. if (IS_ERR(appldata_pdev)) {
  572. rc = PTR_ERR(appldata_pdev);
  573. goto out_driver;
  574. }
  575. appldata_wq = create_singlethread_workqueue("appldata");
  576. if (!appldata_wq) {
  577. rc = -ENOMEM;
  578. goto out_device;
  579. }
  580. get_online_cpus();
  581. for_each_online_cpu(i)
  582. appldata_online_cpu(i);
  583. put_online_cpus();
  584. /* Register cpu hotplug notifier */
  585. register_hotcpu_notifier(&appldata_nb);
  586. appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
  587. return 0;
  588. out_device:
  589. platform_device_unregister(appldata_pdev);
  590. out_driver:
  591. platform_driver_unregister(&appldata_pdrv);
  592. return rc;
  593. }
  594. __initcall(appldata_init);
  595. /**************************** init / exit <END> ******************************/
  596. EXPORT_SYMBOL_GPL(appldata_register_ops);
  597. EXPORT_SYMBOL_GPL(appldata_unregister_ops);
  598. EXPORT_SYMBOL_GPL(appldata_diag);
  599. #ifdef CONFIG_SWAP
  600. EXPORT_SYMBOL_GPL(si_swapinfo);
  601. #endif
  602. EXPORT_SYMBOL_GPL(nr_threads);
  603. EXPORT_SYMBOL_GPL(nr_running);
  604. EXPORT_SYMBOL_GPL(nr_iowait);