appldata_base.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712
  1. /*
  2. * arch/s390/appldata/appldata_base.c
  3. *
  4. * Base infrastructure for Linux-z/VM Monitor Stream, Stage 1.
  5. * Exports appldata_register_ops() and appldata_unregister_ops() for the
  6. * data gathering modules.
  7. *
  8. * Copyright (C) 2003,2006 IBM Corporation, IBM Deutschland Entwicklung GmbH.
  9. *
  10. * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com>
  11. */
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/slab.h>
  15. #include <linux/errno.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/proc_fs.h>
  18. #include <linux/mm.h>
  19. #include <linux/swap.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/sysctl.h>
  22. #include <linux/notifier.h>
  23. #include <linux/cpu.h>
  24. #include <linux/workqueue.h>
  25. #include <asm/appldata.h>
  26. #include <asm/timer.h>
  27. #include <asm/uaccess.h>
  28. #include <asm/io.h>
  29. #include <asm/smp.h>
  30. #include "appldata.h"
  31. #define MY_PRINT_NAME "appldata" /* for debug messages, etc. */
  32. #define APPLDATA_CPU_INTERVAL 10000 /* default (CPU) time for
  33. sampling interval in
  34. milliseconds */
  35. #define TOD_MICRO 0x01000 /* nr. of TOD clock units
  36. for 1 microsecond */
  37. /*
  38. * /proc entries (sysctl)
  39. */
  40. static const char appldata_proc_name[APPLDATA_PROC_NAME_LENGTH] = "appldata";
  41. static int appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
  42. void __user *buffer, size_t *lenp, loff_t *ppos);
  43. static int appldata_interval_handler(ctl_table *ctl, int write,
  44. struct file *filp,
  45. void __user *buffer,
  46. size_t *lenp, loff_t *ppos);
  47. static struct ctl_table_header *appldata_sysctl_header;
  48. static struct ctl_table appldata_table[] = {
  49. {
  50. .ctl_name = CTL_APPLDATA_TIMER,
  51. .procname = "timer",
  52. .mode = S_IRUGO | S_IWUSR,
  53. .proc_handler = &appldata_timer_handler,
  54. },
  55. {
  56. .ctl_name = CTL_APPLDATA_INTERVAL,
  57. .procname = "interval",
  58. .mode = S_IRUGO | S_IWUSR,
  59. .proc_handler = &appldata_interval_handler,
  60. },
  61. { .ctl_name = 0 }
  62. };
  63. static struct ctl_table appldata_dir_table[] = {
  64. {
  65. .ctl_name = CTL_APPLDATA,
  66. .procname = appldata_proc_name,
  67. .maxlen = 0,
  68. .mode = S_IRUGO | S_IXUGO,
  69. .child = appldata_table,
  70. },
  71. { .ctl_name = 0 }
  72. };
  73. /*
  74. * Timer
  75. */
  76. static DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
  77. static atomic_t appldata_expire_count = ATOMIC_INIT(0);
  78. static DEFINE_SPINLOCK(appldata_timer_lock);
  79. static int appldata_interval = APPLDATA_CPU_INTERVAL;
  80. static int appldata_timer_active;
  81. /*
  82. * Work queue
  83. */
  84. static struct workqueue_struct *appldata_wq;
  85. static void appldata_work_fn(struct work_struct *work);
  86. static DECLARE_WORK(appldata_work, appldata_work_fn);
  87. /*
  88. * Ops list
  89. */
  90. static DEFINE_SPINLOCK(appldata_ops_lock);
  91. static LIST_HEAD(appldata_ops_list);
  92. /*************************** timer, work, DIAG *******************************/
  93. /*
  94. * appldata_timer_function()
  95. *
  96. * schedule work and reschedule timer
  97. */
  98. static void appldata_timer_function(unsigned long data)
  99. {
  100. P_DEBUG(" -= Timer =-\n");
  101. P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(),
  102. atomic_read(&appldata_expire_count));
  103. if (atomic_dec_and_test(&appldata_expire_count)) {
  104. atomic_set(&appldata_expire_count, num_online_cpus());
  105. queue_work(appldata_wq, (struct work_struct *) data);
  106. }
  107. }
  108. /*
  109. * appldata_work_fn()
  110. *
  111. * call data gathering function for each (active) module
  112. */
  113. static void appldata_work_fn(struct work_struct *work)
  114. {
  115. struct list_head *lh;
  116. struct appldata_ops *ops;
  117. int i;
  118. P_DEBUG(" -= Work Queue =-\n");
  119. i = 0;
  120. spin_lock(&appldata_ops_lock);
  121. list_for_each(lh, &appldata_ops_list) {
  122. ops = list_entry(lh, struct appldata_ops, list);
  123. P_DEBUG("list_for_each loop: %i) active = %u, name = %s\n",
  124. ++i, ops->active, ops->name);
  125. if (ops->active == 1) {
  126. ops->callback(ops->data);
  127. }
  128. }
  129. spin_unlock(&appldata_ops_lock);
  130. }
  131. /*
  132. * appldata_diag()
  133. *
  134. * prepare parameter list, issue DIAG 0xDC
  135. */
  136. int appldata_diag(char record_nr, u16 function, unsigned long buffer,
  137. u16 length, char *mod_lvl)
  138. {
  139. struct appldata_product_id id = {
  140. .prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
  141. 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
  142. .prod_fn = 0xD5D3, /* "NL" */
  143. .version_nr = 0xF2F6, /* "26" */
  144. .release_nr = 0xF0F1, /* "01" */
  145. };
  146. id.record_nr = record_nr;
  147. id.mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1];
  148. return appldata_asm(&id, function, (void *) buffer, length);
  149. }
  150. /************************ timer, work, DIAG <END> ****************************/
  151. /****************************** /proc stuff **********************************/
  152. /*
  153. * appldata_mod_vtimer_wrap()
  154. *
  155. * wrapper function for mod_virt_timer(), because smp_call_function_on()
  156. * accepts only one parameter.
  157. */
  158. static void __appldata_mod_vtimer_wrap(void *p) {
  159. struct {
  160. struct vtimer_list *timer;
  161. u64 expires;
  162. } *args = p;
  163. mod_virt_timer(args->timer, args->expires);
  164. }
  165. #define APPLDATA_ADD_TIMER 0
  166. #define APPLDATA_DEL_TIMER 1
  167. #define APPLDATA_MOD_TIMER 2
  168. /*
  169. * __appldata_vtimer_setup()
  170. *
  171. * Add, delete or modify virtual timers on all online cpus.
  172. * The caller needs to get the appldata_timer_lock spinlock.
  173. */
  174. static void
  175. __appldata_vtimer_setup(int cmd)
  176. {
  177. u64 per_cpu_interval;
  178. int i;
  179. switch (cmd) {
  180. case APPLDATA_ADD_TIMER:
  181. if (appldata_timer_active)
  182. break;
  183. per_cpu_interval = (u64) (appldata_interval*1000 /
  184. num_online_cpus()) * TOD_MICRO;
  185. for_each_online_cpu(i) {
  186. per_cpu(appldata_timer, i).expires = per_cpu_interval;
  187. smp_call_function_on(add_virt_timer_periodic,
  188. &per_cpu(appldata_timer, i),
  189. 0, 1, i);
  190. }
  191. appldata_timer_active = 1;
  192. P_INFO("Monitoring timer started.\n");
  193. break;
  194. case APPLDATA_DEL_TIMER:
  195. for_each_online_cpu(i)
  196. del_virt_timer(&per_cpu(appldata_timer, i));
  197. if (!appldata_timer_active)
  198. break;
  199. appldata_timer_active = 0;
  200. atomic_set(&appldata_expire_count, num_online_cpus());
  201. P_INFO("Monitoring timer stopped.\n");
  202. break;
  203. case APPLDATA_MOD_TIMER:
  204. per_cpu_interval = (u64) (appldata_interval*1000 /
  205. num_online_cpus()) * TOD_MICRO;
  206. if (!appldata_timer_active)
  207. break;
  208. for_each_online_cpu(i) {
  209. struct {
  210. struct vtimer_list *timer;
  211. u64 expires;
  212. } args;
  213. args.timer = &per_cpu(appldata_timer, i);
  214. args.expires = per_cpu_interval;
  215. smp_call_function_on(__appldata_mod_vtimer_wrap,
  216. &args, 0, 1, i);
  217. }
  218. }
  219. }
  220. /*
  221. * appldata_timer_handler()
  222. *
  223. * Start/Stop timer, show status of timer (0 = not active, 1 = active)
  224. */
  225. static int
  226. appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
  227. void __user *buffer, size_t *lenp, loff_t *ppos)
  228. {
  229. int len;
  230. char buf[2];
  231. if (!*lenp || *ppos) {
  232. *lenp = 0;
  233. return 0;
  234. }
  235. if (!write) {
  236. len = sprintf(buf, appldata_timer_active ? "1\n" : "0\n");
  237. if (len > *lenp)
  238. len = *lenp;
  239. if (copy_to_user(buffer, buf, len))
  240. return -EFAULT;
  241. goto out;
  242. }
  243. len = *lenp;
  244. if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
  245. return -EFAULT;
  246. spin_lock(&appldata_timer_lock);
  247. if (buf[0] == '1')
  248. __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
  249. else if (buf[0] == '0')
  250. __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
  251. spin_unlock(&appldata_timer_lock);
  252. out:
  253. *lenp = len;
  254. *ppos += len;
  255. return 0;
  256. }
  257. /*
  258. * appldata_interval_handler()
  259. *
  260. * Set (CPU) timer interval for collection of data (in milliseconds), show
  261. * current timer interval.
  262. */
  263. static int
  264. appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
  265. void __user *buffer, size_t *lenp, loff_t *ppos)
  266. {
  267. int len, interval;
  268. char buf[16];
  269. if (!*lenp || *ppos) {
  270. *lenp = 0;
  271. return 0;
  272. }
  273. if (!write) {
  274. len = sprintf(buf, "%i\n", appldata_interval);
  275. if (len > *lenp)
  276. len = *lenp;
  277. if (copy_to_user(buffer, buf, len))
  278. return -EFAULT;
  279. goto out;
  280. }
  281. len = *lenp;
  282. if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) {
  283. return -EFAULT;
  284. }
  285. interval = 0;
  286. sscanf(buf, "%i", &interval);
  287. if (interval <= 0) {
  288. P_ERROR("Timer CPU interval has to be > 0!\n");
  289. return -EINVAL;
  290. }
  291. spin_lock(&appldata_timer_lock);
  292. appldata_interval = interval;
  293. __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
  294. spin_unlock(&appldata_timer_lock);
  295. P_INFO("Monitoring CPU interval set to %u milliseconds.\n",
  296. interval);
  297. out:
  298. *lenp = len;
  299. *ppos += len;
  300. return 0;
  301. }
  302. /*
  303. * appldata_generic_handler()
  304. *
  305. * Generic start/stop monitoring and DIAG, show status of
  306. * monitoring (0 = not in process, 1 = in process)
  307. */
  308. static int
  309. appldata_generic_handler(ctl_table *ctl, int write, struct file *filp,
  310. void __user *buffer, size_t *lenp, loff_t *ppos)
  311. {
  312. struct appldata_ops *ops = NULL, *tmp_ops;
  313. int rc, len, found;
  314. char buf[2];
  315. struct list_head *lh;
  316. found = 0;
  317. spin_lock(&appldata_ops_lock);
  318. list_for_each(lh, &appldata_ops_list) {
  319. tmp_ops = list_entry(lh, struct appldata_ops, list);
  320. if (&tmp_ops->ctl_table[2] == ctl) {
  321. found = 1;
  322. }
  323. }
  324. if (!found) {
  325. spin_unlock(&appldata_ops_lock);
  326. return -ENODEV;
  327. }
  328. ops = ctl->data;
  329. if (!try_module_get(ops->owner)) { // protect this function
  330. spin_unlock(&appldata_ops_lock);
  331. return -ENODEV;
  332. }
  333. spin_unlock(&appldata_ops_lock);
  334. if (!*lenp || *ppos) {
  335. *lenp = 0;
  336. module_put(ops->owner);
  337. return 0;
  338. }
  339. if (!write) {
  340. len = sprintf(buf, ops->active ? "1\n" : "0\n");
  341. if (len > *lenp)
  342. len = *lenp;
  343. if (copy_to_user(buffer, buf, len)) {
  344. module_put(ops->owner);
  345. return -EFAULT;
  346. }
  347. goto out;
  348. }
  349. len = *lenp;
  350. if (copy_from_user(buf, buffer,
  351. len > sizeof(buf) ? sizeof(buf) : len)) {
  352. module_put(ops->owner);
  353. return -EFAULT;
  354. }
  355. spin_lock(&appldata_ops_lock);
  356. if ((buf[0] == '1') && (ops->active == 0)) {
  357. // protect work queue callback
  358. if (!try_module_get(ops->owner)) {
  359. spin_unlock(&appldata_ops_lock);
  360. module_put(ops->owner);
  361. return -ENODEV;
  362. }
  363. ops->callback(ops->data); // init record
  364. rc = appldata_diag(ops->record_nr,
  365. APPLDATA_START_INTERVAL_REC,
  366. (unsigned long) ops->data, ops->size,
  367. ops->mod_lvl);
  368. if (rc != 0) {
  369. P_ERROR("START DIAG 0xDC for %s failed, "
  370. "return code: %d\n", ops->name, rc);
  371. module_put(ops->owner);
  372. } else {
  373. P_INFO("Monitoring %s data enabled, "
  374. "DIAG 0xDC started.\n", ops->name);
  375. ops->active = 1;
  376. }
  377. } else if ((buf[0] == '0') && (ops->active == 1)) {
  378. ops->active = 0;
  379. rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
  380. (unsigned long) ops->data, ops->size,
  381. ops->mod_lvl);
  382. if (rc != 0) {
  383. P_ERROR("STOP DIAG 0xDC for %s failed, "
  384. "return code: %d\n", ops->name, rc);
  385. } else {
  386. P_INFO("Monitoring %s data disabled, "
  387. "DIAG 0xDC stopped.\n", ops->name);
  388. }
  389. module_put(ops->owner);
  390. }
  391. spin_unlock(&appldata_ops_lock);
  392. out:
  393. *lenp = len;
  394. *ppos += len;
  395. module_put(ops->owner);
  396. return 0;
  397. }
  398. /*************************** /proc stuff <END> *******************************/
  399. /************************* module-ops management *****************************/
  400. /*
  401. * appldata_register_ops()
  402. *
  403. * update ops list, register /proc/sys entries
  404. */
  405. int appldata_register_ops(struct appldata_ops *ops)
  406. {
  407. struct list_head *lh;
  408. struct appldata_ops *tmp_ops;
  409. int i;
  410. i = 0;
  411. if ((ops->size > APPLDATA_MAX_REC_SIZE) ||
  412. (ops->size < 0)){
  413. P_ERROR("Invalid size of %s record = %i, maximum = %i!\n",
  414. ops->name, ops->size, APPLDATA_MAX_REC_SIZE);
  415. return -ENOMEM;
  416. }
  417. if ((ops->ctl_nr == CTL_APPLDATA) ||
  418. (ops->ctl_nr == CTL_APPLDATA_TIMER) ||
  419. (ops->ctl_nr == CTL_APPLDATA_INTERVAL)) {
  420. P_ERROR("ctl_nr %i already in use!\n", ops->ctl_nr);
  421. return -EBUSY;
  422. }
  423. ops->ctl_table = kzalloc(4*sizeof(struct ctl_table), GFP_KERNEL);
  424. if (ops->ctl_table == NULL) {
  425. P_ERROR("Not enough memory for %s ctl_table!\n", ops->name);
  426. return -ENOMEM;
  427. }
  428. spin_lock(&appldata_ops_lock);
  429. list_for_each(lh, &appldata_ops_list) {
  430. tmp_ops = list_entry(lh, struct appldata_ops, list);
  431. P_DEBUG("register_ops loop: %i) name = %s, ctl = %i\n",
  432. ++i, tmp_ops->name, tmp_ops->ctl_nr);
  433. P_DEBUG("Comparing %s (ctl %i) with %s (ctl %i)\n",
  434. tmp_ops->name, tmp_ops->ctl_nr, ops->name,
  435. ops->ctl_nr);
  436. if (strncmp(tmp_ops->name, ops->name,
  437. APPLDATA_PROC_NAME_LENGTH) == 0) {
  438. P_ERROR("Name \"%s\" already registered!\n", ops->name);
  439. kfree(ops->ctl_table);
  440. spin_unlock(&appldata_ops_lock);
  441. return -EBUSY;
  442. }
  443. if (tmp_ops->ctl_nr == ops->ctl_nr) {
  444. P_ERROR("ctl_nr %i already registered!\n", ops->ctl_nr);
  445. kfree(ops->ctl_table);
  446. spin_unlock(&appldata_ops_lock);
  447. return -EBUSY;
  448. }
  449. }
  450. list_add(&ops->list, &appldata_ops_list);
  451. spin_unlock(&appldata_ops_lock);
  452. ops->ctl_table[0].ctl_name = CTL_APPLDATA;
  453. ops->ctl_table[0].procname = appldata_proc_name;
  454. ops->ctl_table[0].maxlen = 0;
  455. ops->ctl_table[0].mode = S_IRUGO | S_IXUGO;
  456. ops->ctl_table[0].child = &ops->ctl_table[2];
  457. ops->ctl_table[1].ctl_name = 0;
  458. ops->ctl_table[2].ctl_name = ops->ctl_nr;
  459. ops->ctl_table[2].procname = ops->name;
  460. ops->ctl_table[2].mode = S_IRUGO | S_IWUSR;
  461. ops->ctl_table[2].proc_handler = appldata_generic_handler;
  462. ops->ctl_table[2].data = ops;
  463. ops->ctl_table[3].ctl_name = 0;
  464. ops->sysctl_header = register_sysctl_table(ops->ctl_table,1);
  465. P_INFO("%s-ops registered!\n", ops->name);
  466. return 0;
  467. }
  468. /*
  469. * appldata_unregister_ops()
  470. *
  471. * update ops list, unregister /proc entries, stop DIAG if necessary
  472. */
  473. void appldata_unregister_ops(struct appldata_ops *ops)
  474. {
  475. void *table;
  476. spin_lock(&appldata_ops_lock);
  477. list_del(&ops->list);
  478. /* at that point any incoming access will fail */
  479. table = ops->ctl_table;
  480. ops->ctl_table = NULL;
  481. spin_unlock(&appldata_ops_lock);
  482. unregister_sysctl_table(ops->sysctl_header);
  483. kfree(table);
  484. P_INFO("%s-ops unregistered!\n", ops->name);
  485. }
  486. /********************** module-ops management <END> **************************/
  487. /******************************* init / exit *********************************/
  488. static void
  489. appldata_online_cpu(int cpu)
  490. {
  491. init_virt_timer(&per_cpu(appldata_timer, cpu));
  492. per_cpu(appldata_timer, cpu).function = appldata_timer_function;
  493. per_cpu(appldata_timer, cpu).data = (unsigned long)
  494. &appldata_work;
  495. atomic_inc(&appldata_expire_count);
  496. spin_lock(&appldata_timer_lock);
  497. __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
  498. spin_unlock(&appldata_timer_lock);
  499. }
  500. static void
  501. appldata_offline_cpu(int cpu)
  502. {
  503. del_virt_timer(&per_cpu(appldata_timer, cpu));
  504. if (atomic_dec_and_test(&appldata_expire_count)) {
  505. atomic_set(&appldata_expire_count, num_online_cpus());
  506. queue_work(appldata_wq, &appldata_work);
  507. }
  508. spin_lock(&appldata_timer_lock);
  509. __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
  510. spin_unlock(&appldata_timer_lock);
  511. }
  512. static int __cpuinit
  513. appldata_cpu_notify(struct notifier_block *self,
  514. unsigned long action, void *hcpu)
  515. {
  516. switch (action) {
  517. case CPU_ONLINE:
  518. appldata_online_cpu((long) hcpu);
  519. break;
  520. case CPU_DEAD:
  521. appldata_offline_cpu((long) hcpu);
  522. break;
  523. default:
  524. break;
  525. }
  526. return NOTIFY_OK;
  527. }
  528. static struct notifier_block appldata_nb = {
  529. .notifier_call = appldata_cpu_notify,
  530. };
  531. /*
  532. * appldata_init()
  533. *
  534. * init timer, register /proc entries
  535. */
  536. static int __init appldata_init(void)
  537. {
  538. int i;
  539. P_DEBUG("sizeof(parameter_list) = %lu\n",
  540. sizeof(struct appldata_parameter_list));
  541. appldata_wq = create_singlethread_workqueue("appldata");
  542. if (!appldata_wq) {
  543. P_ERROR("Could not create work queue\n");
  544. return -ENOMEM;
  545. }
  546. for_each_online_cpu(i)
  547. appldata_online_cpu(i);
  548. /* Register cpu hotplug notifier */
  549. register_hotcpu_notifier(&appldata_nb);
  550. appldata_sysctl_header = register_sysctl_table(appldata_dir_table, 1);
  551. #ifdef MODULE
  552. appldata_dir_table[0].de->owner = THIS_MODULE;
  553. appldata_table[0].de->owner = THIS_MODULE;
  554. appldata_table[1].de->owner = THIS_MODULE;
  555. #endif
  556. P_DEBUG("Base interface initialized.\n");
  557. return 0;
  558. }
  559. /*
  560. * appldata_exit()
  561. *
  562. * stop timer, unregister /proc entries
  563. */
  564. static void __exit appldata_exit(void)
  565. {
  566. struct list_head *lh;
  567. struct appldata_ops *ops;
  568. int rc, i;
  569. P_DEBUG("Unloading module ...\n");
  570. /*
  571. * ops list should be empty, but just in case something went wrong...
  572. */
  573. spin_lock(&appldata_ops_lock);
  574. list_for_each(lh, &appldata_ops_list) {
  575. ops = list_entry(lh, struct appldata_ops, list);
  576. rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC,
  577. (unsigned long) ops->data, ops->size,
  578. ops->mod_lvl);
  579. if (rc != 0) {
  580. P_ERROR("STOP DIAG 0xDC for %s failed, "
  581. "return code: %d\n", ops->name, rc);
  582. }
  583. }
  584. spin_unlock(&appldata_ops_lock);
  585. for_each_online_cpu(i)
  586. appldata_offline_cpu(i);
  587. appldata_timer_active = 0;
  588. unregister_sysctl_table(appldata_sysctl_header);
  589. destroy_workqueue(appldata_wq);
  590. P_DEBUG("... module unloaded!\n");
  591. }
  592. /**************************** init / exit <END> ******************************/
  593. module_init(appldata_init);
  594. module_exit(appldata_exit);
  595. MODULE_LICENSE("GPL");
  596. MODULE_AUTHOR("Gerald Schaefer");
  597. MODULE_DESCRIPTION("Linux-VM Monitor Stream, base infrastructure");
  598. EXPORT_SYMBOL_GPL(appldata_register_ops);
  599. EXPORT_SYMBOL_GPL(appldata_unregister_ops);
  600. EXPORT_SYMBOL_GPL(appldata_diag);
  601. #ifdef MODULE
  602. /*
  603. * Kernel symbols needed by appldata_mem and appldata_os modules.
  604. * However, if this file is compiled as a module (for testing only), these
  605. * symbols are not exported. In this case, we define them locally and export
  606. * those.
  607. */
  608. void si_swapinfo(struct sysinfo *val)
  609. {
  610. val->freeswap = -1ul;
  611. val->totalswap = -1ul;
  612. }
  613. unsigned long avenrun[3] = {-1 - FIXED_1/200, -1 - FIXED_1/200,
  614. -1 - FIXED_1/200};
  615. int nr_threads = -1;
  616. void get_full_page_state(struct page_state *ps)
  617. {
  618. memset(ps, -1, sizeof(struct page_state));
  619. }
  620. unsigned long nr_running(void)
  621. {
  622. return -1;
  623. }
  624. unsigned long nr_iowait(void)
  625. {
  626. return -1;
  627. }
  628. /*unsigned long nr_context_switches(void)
  629. {
  630. return -1;
  631. }*/
  632. #endif /* MODULE */
  633. EXPORT_SYMBOL_GPL(si_swapinfo);
  634. EXPORT_SYMBOL_GPL(nr_threads);
  635. EXPORT_SYMBOL_GPL(nr_running);
  636. EXPORT_SYMBOL_GPL(nr_iowait);
  637. //EXPORT_SYMBOL_GPL(nr_context_switches);