taskstats.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636
  1. /*
  2. * taskstats.c - Export per-task statistics to userland
  3. *
  4. * Copyright (C) Shailabh Nagar, IBM Corp. 2006
  5. * (C) Balbir Singh, IBM Corp. 2006
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/taskstats_kern.h>
  20. #include <linux/tsacct_kern.h>
  21. #include <linux/delayacct.h>
  22. #include <linux/cpumask.h>
  23. #include <linux/percpu.h>
  24. #include <linux/cgroupstats.h>
  25. #include <linux/cgroup.h>
  26. #include <linux/fs.h>
  27. #include <linux/file.h>
  28. #include <net/genetlink.h>
  29. #include <asm/atomic.h>
  30. /*
  31. * Maximum length of a cpumask that can be specified in
  32. * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
  33. */
  34. #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
  35. static DEFINE_PER_CPU(__u32, taskstats_seqnum);
  36. static int family_registered;
  37. struct kmem_cache *taskstats_cache;
  38. static struct genl_family family = {
  39. .id = GENL_ID_GENERATE,
  40. .name = TASKSTATS_GENL_NAME,
  41. .version = TASKSTATS_GENL_VERSION,
  42. .maxattr = TASKSTATS_CMD_ATTR_MAX,
  43. };
  44. static struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1]
  45. __read_mostly = {
  46. [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
  47. [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
  48. [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
  49. [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
  50. static struct nla_policy
  51. cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] __read_mostly = {
  52. [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
  53. };
  54. struct listener {
  55. struct list_head list;
  56. pid_t pid;
  57. char valid;
  58. };
  59. struct listener_list {
  60. struct rw_semaphore sem;
  61. struct list_head list;
  62. };
  63. static DEFINE_PER_CPU(struct listener_list, listener_array);
  64. enum actions {
  65. REGISTER,
  66. DEREGISTER,
  67. CPU_DONT_CARE
  68. };
  69. static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
  70. size_t size)
  71. {
  72. struct sk_buff *skb;
  73. void *reply;
  74. /*
  75. * If new attributes are added, please revisit this allocation
  76. */
  77. skb = genlmsg_new(size, GFP_KERNEL);
  78. if (!skb)
  79. return -ENOMEM;
  80. if (!info) {
  81. int seq = get_cpu_var(taskstats_seqnum)++;
  82. put_cpu_var(taskstats_seqnum);
  83. reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
  84. } else
  85. reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
  86. if (reply == NULL) {
  87. nlmsg_free(skb);
  88. return -EINVAL;
  89. }
  90. *skbp = skb;
  91. return 0;
  92. }
  93. /*
  94. * Send taskstats data in @skb to listener with nl_pid @pid
  95. */
  96. static int send_reply(struct sk_buff *skb, pid_t pid)
  97. {
  98. struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
  99. void *reply = genlmsg_data(genlhdr);
  100. int rc;
  101. rc = genlmsg_end(skb, reply);
  102. if (rc < 0) {
  103. nlmsg_free(skb);
  104. return rc;
  105. }
  106. return genlmsg_unicast(skb, pid);
  107. }
  108. /*
  109. * Send taskstats data in @skb to listeners registered for @cpu's exit data
  110. */
  111. static void send_cpu_listeners(struct sk_buff *skb,
  112. struct listener_list *listeners)
  113. {
  114. struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
  115. struct listener *s, *tmp;
  116. struct sk_buff *skb_next, *skb_cur = skb;
  117. void *reply = genlmsg_data(genlhdr);
  118. int rc, delcount = 0;
  119. rc = genlmsg_end(skb, reply);
  120. if (rc < 0) {
  121. nlmsg_free(skb);
  122. return;
  123. }
  124. rc = 0;
  125. down_read(&listeners->sem);
  126. list_for_each_entry(s, &listeners->list, list) {
  127. skb_next = NULL;
  128. if (!list_is_last(&s->list, &listeners->list)) {
  129. skb_next = skb_clone(skb_cur, GFP_KERNEL);
  130. if (!skb_next)
  131. break;
  132. }
  133. rc = genlmsg_unicast(skb_cur, s->pid);
  134. if (rc == -ECONNREFUSED) {
  135. s->valid = 0;
  136. delcount++;
  137. }
  138. skb_cur = skb_next;
  139. }
  140. up_read(&listeners->sem);
  141. if (skb_cur)
  142. nlmsg_free(skb_cur);
  143. if (!delcount)
  144. return;
  145. /* Delete invalidated entries */
  146. down_write(&listeners->sem);
  147. list_for_each_entry_safe(s, tmp, &listeners->list, list) {
  148. if (!s->valid) {
  149. list_del(&s->list);
  150. kfree(s);
  151. }
  152. }
  153. up_write(&listeners->sem);
  154. }
  155. static int fill_pid(pid_t pid, struct task_struct *tsk,
  156. struct taskstats *stats)
  157. {
  158. int rc = 0;
  159. if (!tsk) {
  160. rcu_read_lock();
  161. tsk = find_task_by_vpid(pid);
  162. if (tsk)
  163. get_task_struct(tsk);
  164. rcu_read_unlock();
  165. if (!tsk)
  166. return -ESRCH;
  167. } else
  168. get_task_struct(tsk);
  169. memset(stats, 0, sizeof(*stats));
  170. /*
  171. * Each accounting subsystem adds calls to its functions to
  172. * fill in relevant parts of struct taskstsats as follows
  173. *
  174. * per-task-foo(stats, tsk);
  175. */
  176. delayacct_add_tsk(stats, tsk);
  177. /* fill in basic acct fields */
  178. stats->version = TASKSTATS_VERSION;
  179. stats->nvcsw = tsk->nvcsw;
  180. stats->nivcsw = tsk->nivcsw;
  181. bacct_add_tsk(stats, tsk);
  182. /* fill in extended acct fields */
  183. xacct_add_tsk(stats, tsk);
  184. /* Define err: label here if needed */
  185. put_task_struct(tsk);
  186. return rc;
  187. }
  188. static int fill_tgid(pid_t tgid, struct task_struct *first,
  189. struct taskstats *stats)
  190. {
  191. struct task_struct *tsk;
  192. unsigned long flags;
  193. int rc = -ESRCH;
  194. /*
  195. * Add additional stats from live tasks except zombie thread group
  196. * leaders who are already counted with the dead tasks
  197. */
  198. rcu_read_lock();
  199. if (!first)
  200. first = find_task_by_vpid(tgid);
  201. if (!first || !lock_task_sighand(first, &flags))
  202. goto out;
  203. if (first->signal->stats)
  204. memcpy(stats, first->signal->stats, sizeof(*stats));
  205. else
  206. memset(stats, 0, sizeof(*stats));
  207. tsk = first;
  208. do {
  209. if (tsk->exit_state)
  210. continue;
  211. /*
  212. * Accounting subsystem can call its functions here to
  213. * fill in relevant parts of struct taskstsats as follows
  214. *
  215. * per-task-foo(stats, tsk);
  216. */
  217. delayacct_add_tsk(stats, tsk);
  218. stats->nvcsw += tsk->nvcsw;
  219. stats->nivcsw += tsk->nivcsw;
  220. } while_each_thread(first, tsk);
  221. unlock_task_sighand(first, &flags);
  222. rc = 0;
  223. out:
  224. rcu_read_unlock();
  225. stats->version = TASKSTATS_VERSION;
  226. /*
  227. * Accounting subsystems can also add calls here to modify
  228. * fields of taskstats.
  229. */
  230. return rc;
  231. }
  232. static void fill_tgid_exit(struct task_struct *tsk)
  233. {
  234. unsigned long flags;
  235. spin_lock_irqsave(&tsk->sighand->siglock, flags);
  236. if (!tsk->signal->stats)
  237. goto ret;
  238. /*
  239. * Each accounting subsystem calls its functions here to
  240. * accumalate its per-task stats for tsk, into the per-tgid structure
  241. *
  242. * per-task-foo(tsk->signal->stats, tsk);
  243. */
  244. delayacct_add_tsk(tsk->signal->stats, tsk);
  245. ret:
  246. spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
  247. return;
  248. }
  249. static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
  250. {
  251. struct listener_list *listeners;
  252. struct listener *s, *tmp;
  253. unsigned int cpu;
  254. if (!cpumask_subset(mask, cpu_possible_mask))
  255. return -EINVAL;
  256. if (isadd == REGISTER) {
  257. for_each_cpu(cpu, mask) {
  258. s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
  259. cpu_to_node(cpu));
  260. if (!s)
  261. goto cleanup;
  262. s->pid = pid;
  263. INIT_LIST_HEAD(&s->list);
  264. s->valid = 1;
  265. listeners = &per_cpu(listener_array, cpu);
  266. down_write(&listeners->sem);
  267. list_add(&s->list, &listeners->list);
  268. up_write(&listeners->sem);
  269. }
  270. return 0;
  271. }
  272. /* Deregister or cleanup */
  273. cleanup:
  274. for_each_cpu(cpu, mask) {
  275. listeners = &per_cpu(listener_array, cpu);
  276. down_write(&listeners->sem);
  277. list_for_each_entry_safe(s, tmp, &listeners->list, list) {
  278. if (s->pid == pid) {
  279. list_del(&s->list);
  280. kfree(s);
  281. break;
  282. }
  283. }
  284. up_write(&listeners->sem);
  285. }
  286. return 0;
  287. }
  288. static int parse(struct nlattr *na, struct cpumask *mask)
  289. {
  290. char *data;
  291. int len;
  292. int ret;
  293. if (na == NULL)
  294. return 1;
  295. len = nla_len(na);
  296. if (len > TASKSTATS_CPUMASK_MAXLEN)
  297. return -E2BIG;
  298. if (len < 1)
  299. return -EINVAL;
  300. data = kmalloc(len, GFP_KERNEL);
  301. if (!data)
  302. return -ENOMEM;
  303. nla_strlcpy(data, na, len);
  304. ret = cpulist_parse(data, mask);
  305. kfree(data);
  306. return ret;
  307. }
  308. static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
  309. {
  310. struct nlattr *na, *ret;
  311. int aggr;
  312. aggr = (type == TASKSTATS_TYPE_PID)
  313. ? TASKSTATS_TYPE_AGGR_PID
  314. : TASKSTATS_TYPE_AGGR_TGID;
  315. na = nla_nest_start(skb, aggr);
  316. if (!na)
  317. goto err;
  318. if (nla_put(skb, type, sizeof(pid), &pid) < 0)
  319. goto err;
  320. ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
  321. if (!ret)
  322. goto err;
  323. nla_nest_end(skb, na);
  324. return nla_data(ret);
  325. err:
  326. return NULL;
  327. }
  328. static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
  329. {
  330. int rc = 0;
  331. struct sk_buff *rep_skb;
  332. struct cgroupstats *stats;
  333. struct nlattr *na;
  334. size_t size;
  335. u32 fd;
  336. struct file *file;
  337. int fput_needed;
  338. na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
  339. if (!na)
  340. return -EINVAL;
  341. fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
  342. file = fget_light(fd, &fput_needed);
  343. if (!file)
  344. return 0;
  345. size = nla_total_size(sizeof(struct cgroupstats));
  346. rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
  347. size);
  348. if (rc < 0)
  349. goto err;
  350. na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
  351. sizeof(struct cgroupstats));
  352. stats = nla_data(na);
  353. memset(stats, 0, sizeof(*stats));
  354. rc = cgroupstats_build(stats, file->f_dentry);
  355. if (rc < 0) {
  356. nlmsg_free(rep_skb);
  357. goto err;
  358. }
  359. rc = send_reply(rep_skb, info->snd_pid);
  360. err:
  361. fput_light(file, fput_needed);
  362. return rc;
  363. }
  364. static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
  365. {
  366. int rc;
  367. struct sk_buff *rep_skb;
  368. struct taskstats *stats;
  369. size_t size;
  370. cpumask_var_t mask;
  371. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  372. return -ENOMEM;
  373. rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
  374. if (rc < 0)
  375. goto free_return_rc;
  376. if (rc == 0) {
  377. rc = add_del_listener(info->snd_pid, mask, REGISTER);
  378. goto free_return_rc;
  379. }
  380. rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
  381. if (rc < 0)
  382. goto free_return_rc;
  383. if (rc == 0) {
  384. rc = add_del_listener(info->snd_pid, mask, DEREGISTER);
  385. free_return_rc:
  386. free_cpumask_var(mask);
  387. return rc;
  388. }
  389. free_cpumask_var(mask);
  390. /*
  391. * Size includes space for nested attributes
  392. */
  393. size = nla_total_size(sizeof(u32)) +
  394. nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
  395. rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
  396. if (rc < 0)
  397. return rc;
  398. rc = -EINVAL;
  399. if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
  400. u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
  401. stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
  402. if (!stats)
  403. goto err;
  404. rc = fill_pid(pid, NULL, stats);
  405. if (rc < 0)
  406. goto err;
  407. } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
  408. u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
  409. stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
  410. if (!stats)
  411. goto err;
  412. rc = fill_tgid(tgid, NULL, stats);
  413. if (rc < 0)
  414. goto err;
  415. } else
  416. goto err;
  417. return send_reply(rep_skb, info->snd_pid);
  418. err:
  419. nlmsg_free(rep_skb);
  420. return rc;
  421. }
  422. static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
  423. {
  424. struct signal_struct *sig = tsk->signal;
  425. struct taskstats *stats;
  426. if (sig->stats || thread_group_empty(tsk))
  427. goto ret;
  428. /* No problem if kmem_cache_zalloc() fails */
  429. stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
  430. spin_lock_irq(&tsk->sighand->siglock);
  431. if (!sig->stats) {
  432. sig->stats = stats;
  433. stats = NULL;
  434. }
  435. spin_unlock_irq(&tsk->sighand->siglock);
  436. if (stats)
  437. kmem_cache_free(taskstats_cache, stats);
  438. ret:
  439. return sig->stats;
  440. }
  441. /* Send pid data out on exit */
  442. void taskstats_exit(struct task_struct *tsk, int group_dead)
  443. {
  444. int rc;
  445. struct listener_list *listeners;
  446. struct taskstats *stats;
  447. struct sk_buff *rep_skb;
  448. size_t size;
  449. int is_thread_group;
  450. if (!family_registered)
  451. return;
  452. /*
  453. * Size includes space for nested attributes
  454. */
  455. size = nla_total_size(sizeof(u32)) +
  456. nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
  457. is_thread_group = !!taskstats_tgid_alloc(tsk);
  458. if (is_thread_group) {
  459. /* PID + STATS + TGID + STATS */
  460. size = 2 * size;
  461. /* fill the tsk->signal->stats structure */
  462. fill_tgid_exit(tsk);
  463. }
  464. listeners = &__raw_get_cpu_var(listener_array);
  465. if (list_empty(&listeners->list))
  466. return;
  467. rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
  468. if (rc < 0)
  469. return;
  470. stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid);
  471. if (!stats)
  472. goto err;
  473. rc = fill_pid(-1, tsk, stats);
  474. if (rc < 0)
  475. goto err;
  476. /*
  477. * Doesn't matter if tsk is the leader or the last group member leaving
  478. */
  479. if (!is_thread_group || !group_dead)
  480. goto send;
  481. stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid);
  482. if (!stats)
  483. goto err;
  484. memcpy(stats, tsk->signal->stats, sizeof(*stats));
  485. send:
  486. send_cpu_listeners(rep_skb, listeners);
  487. return;
  488. err:
  489. nlmsg_free(rep_skb);
  490. }
  491. static struct genl_ops taskstats_ops = {
  492. .cmd = TASKSTATS_CMD_GET,
  493. .doit = taskstats_user_cmd,
  494. .policy = taskstats_cmd_get_policy,
  495. };
  496. static struct genl_ops cgroupstats_ops = {
  497. .cmd = CGROUPSTATS_CMD_GET,
  498. .doit = cgroupstats_user_cmd,
  499. .policy = cgroupstats_cmd_get_policy,
  500. };
  501. /* Needed early in initialization */
  502. void __init taskstats_init_early(void)
  503. {
  504. unsigned int i;
  505. taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
  506. for_each_possible_cpu(i) {
  507. INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
  508. init_rwsem(&(per_cpu(listener_array, i).sem));
  509. }
  510. }
  511. static int __init taskstats_init(void)
  512. {
  513. int rc;
  514. rc = genl_register_family(&family);
  515. if (rc)
  516. return rc;
  517. rc = genl_register_ops(&family, &taskstats_ops);
  518. if (rc < 0)
  519. goto err;
  520. rc = genl_register_ops(&family, &cgroupstats_ops);
  521. if (rc < 0)
  522. goto err_cgroup_ops;
  523. family_registered = 1;
  524. printk("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
  525. return 0;
  526. err_cgroup_ops:
  527. genl_unregister_ops(&family, &taskstats_ops);
  528. err:
  529. genl_unregister_family(&family);
  530. return rc;
  531. }
  532. /*
  533. * late initcall ensures initialization of statistics collection
  534. * mechanisms precedes initialization of the taskstats interface
  535. */
  536. late_initcall(taskstats_init);