taskstats.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563
  1. /*
  2. * taskstats.c - Export per-task statistics to userland
  3. *
  4. * Copyright (C) Shailabh Nagar, IBM Corp. 2006
  5. * (C) Balbir Singh, IBM Corp. 2006
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/taskstats_kern.h>
  20. #include <linux/tsacct_kern.h>
  21. #include <linux/delayacct.h>
  22. #include <linux/tsacct_kern.h>
  23. #include <linux/cpumask.h>
  24. #include <linux/percpu.h>
  25. #include <net/genetlink.h>
  26. #include <asm/atomic.h>
  27. /*
  28. * Maximum length of a cpumask that can be specified in
  29. * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
  30. */
  31. #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
  32. static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 };
  33. static int family_registered;
  34. kmem_cache_t *taskstats_cache;
  35. static struct genl_family family = {
  36. .id = GENL_ID_GENERATE,
  37. .name = TASKSTATS_GENL_NAME,
  38. .version = TASKSTATS_GENL_VERSION,
  39. .maxattr = TASKSTATS_CMD_ATTR_MAX,
  40. };
  41. static struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1]
  42. __read_mostly = {
  43. [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
  44. [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
  45. [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
  46. [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
  47. struct listener {
  48. struct list_head list;
  49. pid_t pid;
  50. char valid;
  51. };
  52. struct listener_list {
  53. struct rw_semaphore sem;
  54. struct list_head list;
  55. };
  56. static DEFINE_PER_CPU(struct listener_list, listener_array);
  57. enum actions {
  58. REGISTER,
  59. DEREGISTER,
  60. CPU_DONT_CARE
  61. };
  62. static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
  63. void **replyp, size_t size)
  64. {
  65. struct sk_buff *skb;
  66. void *reply;
  67. /*
  68. * If new attributes are added, please revisit this allocation
  69. */
  70. size = nlmsg_total_size(genlmsg_total_size(size));
  71. skb = nlmsg_new(size, GFP_KERNEL);
  72. if (!skb)
  73. return -ENOMEM;
  74. if (!info) {
  75. int seq = get_cpu_var(taskstats_seqnum)++;
  76. put_cpu_var(taskstats_seqnum);
  77. reply = genlmsg_put(skb, 0, seq,
  78. family.id, 0, 0,
  79. cmd, family.version);
  80. } else
  81. reply = genlmsg_put(skb, info->snd_pid, info->snd_seq,
  82. family.id, 0, 0,
  83. cmd, family.version);
  84. if (reply == NULL) {
  85. nlmsg_free(skb);
  86. return -EINVAL;
  87. }
  88. *skbp = skb;
  89. *replyp = reply;
  90. return 0;
  91. }
  92. /*
  93. * Send taskstats data in @skb to listener with nl_pid @pid
  94. */
  95. static int send_reply(struct sk_buff *skb, pid_t pid)
  96. {
  97. struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
  98. void *reply = genlmsg_data(genlhdr);
  99. int rc;
  100. rc = genlmsg_end(skb, reply);
  101. if (rc < 0) {
  102. nlmsg_free(skb);
  103. return rc;
  104. }
  105. return genlmsg_unicast(skb, pid);
  106. }
  107. /*
  108. * Send taskstats data in @skb to listeners registered for @cpu's exit data
  109. */
  110. static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
  111. {
  112. struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
  113. struct listener_list *listeners;
  114. struct listener *s, *tmp;
  115. struct sk_buff *skb_next, *skb_cur = skb;
  116. void *reply = genlmsg_data(genlhdr);
  117. int rc, delcount = 0;
  118. rc = genlmsg_end(skb, reply);
  119. if (rc < 0) {
  120. nlmsg_free(skb);
  121. return;
  122. }
  123. rc = 0;
  124. listeners = &per_cpu(listener_array, cpu);
  125. down_read(&listeners->sem);
  126. list_for_each_entry(s, &listeners->list, list) {
  127. skb_next = NULL;
  128. if (!list_is_last(&s->list, &listeners->list)) {
  129. skb_next = skb_clone(skb_cur, GFP_KERNEL);
  130. if (!skb_next)
  131. break;
  132. }
  133. rc = genlmsg_unicast(skb_cur, s->pid);
  134. if (rc == -ECONNREFUSED) {
  135. s->valid = 0;
  136. delcount++;
  137. }
  138. skb_cur = skb_next;
  139. }
  140. up_read(&listeners->sem);
  141. if (skb_cur)
  142. nlmsg_free(skb_cur);
  143. if (!delcount)
  144. return;
  145. /* Delete invalidated entries */
  146. down_write(&listeners->sem);
  147. list_for_each_entry_safe(s, tmp, &listeners->list, list) {
  148. if (!s->valid) {
  149. list_del(&s->list);
  150. kfree(s);
  151. }
  152. }
  153. up_write(&listeners->sem);
  154. }
  155. static int fill_pid(pid_t pid, struct task_struct *tsk,
  156. struct taskstats *stats)
  157. {
  158. int rc = 0;
  159. if (!tsk) {
  160. rcu_read_lock();
  161. tsk = find_task_by_pid(pid);
  162. if (tsk)
  163. get_task_struct(tsk);
  164. rcu_read_unlock();
  165. if (!tsk)
  166. return -ESRCH;
  167. } else
  168. get_task_struct(tsk);
  169. /*
  170. * Each accounting subsystem adds calls to its functions to
  171. * fill in relevant parts of struct taskstsats as follows
  172. *
  173. * per-task-foo(stats, tsk);
  174. */
  175. delayacct_add_tsk(stats, tsk);
  176. /* fill in basic acct fields */
  177. stats->version = TASKSTATS_VERSION;
  178. bacct_add_tsk(stats, tsk);
  179. /* fill in extended acct fields */
  180. xacct_add_tsk(stats, tsk);
  181. /* Define err: label here if needed */
  182. put_task_struct(tsk);
  183. return rc;
  184. }
  185. static int fill_tgid(pid_t tgid, struct task_struct *first,
  186. struct taskstats *stats)
  187. {
  188. struct task_struct *tsk;
  189. unsigned long flags;
  190. int rc = -ESRCH;
  191. /*
  192. * Add additional stats from live tasks except zombie thread group
  193. * leaders who are already counted with the dead tasks
  194. */
  195. rcu_read_lock();
  196. if (!first)
  197. first = find_task_by_pid(tgid);
  198. if (!first || !lock_task_sighand(first, &flags))
  199. goto out;
  200. if (first->signal->stats)
  201. memcpy(stats, first->signal->stats, sizeof(*stats));
  202. tsk = first;
  203. do {
  204. if (tsk->exit_state)
  205. continue;
  206. /*
  207. * Accounting subsystem can call its functions here to
  208. * fill in relevant parts of struct taskstsats as follows
  209. *
  210. * per-task-foo(stats, tsk);
  211. */
  212. delayacct_add_tsk(stats, tsk);
  213. } while_each_thread(first, tsk);
  214. unlock_task_sighand(first, &flags);
  215. rc = 0;
  216. out:
  217. rcu_read_unlock();
  218. stats->version = TASKSTATS_VERSION;
  219. /*
  220. * Accounting subsytems can also add calls here to modify
  221. * fields of taskstats.
  222. */
  223. return rc;
  224. }
  225. static void fill_tgid_exit(struct task_struct *tsk)
  226. {
  227. unsigned long flags;
  228. spin_lock_irqsave(&tsk->sighand->siglock, flags);
  229. if (!tsk->signal->stats)
  230. goto ret;
  231. /*
  232. * Each accounting subsystem calls its functions here to
  233. * accumalate its per-task stats for tsk, into the per-tgid structure
  234. *
  235. * per-task-foo(tsk->signal->stats, tsk);
  236. */
  237. delayacct_add_tsk(tsk->signal->stats, tsk);
  238. ret:
  239. spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
  240. return;
  241. }
  242. static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
  243. {
  244. struct listener_list *listeners;
  245. struct listener *s, *tmp;
  246. unsigned int cpu;
  247. cpumask_t mask = *maskp;
  248. if (!cpus_subset(mask, cpu_possible_map))
  249. return -EINVAL;
  250. if (isadd == REGISTER) {
  251. for_each_cpu_mask(cpu, mask) {
  252. s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
  253. cpu_to_node(cpu));
  254. if (!s)
  255. goto cleanup;
  256. s->pid = pid;
  257. INIT_LIST_HEAD(&s->list);
  258. s->valid = 1;
  259. listeners = &per_cpu(listener_array, cpu);
  260. down_write(&listeners->sem);
  261. list_add(&s->list, &listeners->list);
  262. up_write(&listeners->sem);
  263. }
  264. return 0;
  265. }
  266. /* Deregister or cleanup */
  267. cleanup:
  268. for_each_cpu_mask(cpu, mask) {
  269. listeners = &per_cpu(listener_array, cpu);
  270. down_write(&listeners->sem);
  271. list_for_each_entry_safe(s, tmp, &listeners->list, list) {
  272. if (s->pid == pid) {
  273. list_del(&s->list);
  274. kfree(s);
  275. break;
  276. }
  277. }
  278. up_write(&listeners->sem);
  279. }
  280. return 0;
  281. }
  282. static int parse(struct nlattr *na, cpumask_t *mask)
  283. {
  284. char *data;
  285. int len;
  286. int ret;
  287. if (na == NULL)
  288. return 1;
  289. len = nla_len(na);
  290. if (len > TASKSTATS_CPUMASK_MAXLEN)
  291. return -E2BIG;
  292. if (len < 1)
  293. return -EINVAL;
  294. data = kmalloc(len, GFP_KERNEL);
  295. if (!data)
  296. return -ENOMEM;
  297. nla_strlcpy(data, na, len);
  298. ret = cpulist_parse(data, *mask);
  299. kfree(data);
  300. return ret;
  301. }
  302. static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
  303. {
  304. int rc = 0;
  305. struct sk_buff *rep_skb;
  306. struct taskstats stats;
  307. void *reply;
  308. size_t size;
  309. struct nlattr *na;
  310. cpumask_t mask;
  311. rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask);
  312. if (rc < 0)
  313. return rc;
  314. if (rc == 0)
  315. return add_del_listener(info->snd_pid, &mask, REGISTER);
  316. rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask);
  317. if (rc < 0)
  318. return rc;
  319. if (rc == 0)
  320. return add_del_listener(info->snd_pid, &mask, DEREGISTER);
  321. /*
  322. * Size includes space for nested attributes
  323. */
  324. size = nla_total_size(sizeof(u32)) +
  325. nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
  326. memset(&stats, 0, sizeof(stats));
  327. rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
  328. if (rc < 0)
  329. return rc;
  330. if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
  331. u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
  332. rc = fill_pid(pid, NULL, &stats);
  333. if (rc < 0)
  334. goto err;
  335. na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
  336. NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, pid);
  337. NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
  338. stats);
  339. } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
  340. u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
  341. rc = fill_tgid(tgid, NULL, &stats);
  342. if (rc < 0)
  343. goto err;
  344. na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
  345. NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, tgid);
  346. NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
  347. stats);
  348. } else {
  349. rc = -EINVAL;
  350. goto err;
  351. }
  352. nla_nest_end(rep_skb, na);
  353. return send_reply(rep_skb, info->snd_pid);
  354. nla_put_failure:
  355. rc = genlmsg_cancel(rep_skb, reply);
  356. err:
  357. nlmsg_free(rep_skb);
  358. return rc;
  359. }
  360. void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
  361. {
  362. struct listener_list *listeners;
  363. struct taskstats *tmp;
  364. /*
  365. * This is the cpu on which the task is exiting currently and will
  366. * be the one for which the exit event is sent, even if the cpu
  367. * on which this function is running changes later.
  368. */
  369. *mycpu = raw_smp_processor_id();
  370. *ptidstats = NULL;
  371. tmp = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
  372. if (!tmp)
  373. return;
  374. listeners = &per_cpu(listener_array, *mycpu);
  375. down_read(&listeners->sem);
  376. if (!list_empty(&listeners->list)) {
  377. *ptidstats = tmp;
  378. tmp = NULL;
  379. }
  380. up_read(&listeners->sem);
  381. kfree(tmp);
  382. }
  383. /* Send pid data out on exit */
  384. void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
  385. int group_dead, unsigned int mycpu)
  386. {
  387. int rc;
  388. struct sk_buff *rep_skb;
  389. void *reply;
  390. size_t size;
  391. int is_thread_group;
  392. struct nlattr *na;
  393. if (!family_registered)
  394. return;
  395. /*
  396. * Size includes space for nested attributes
  397. */
  398. size = nla_total_size(sizeof(u32)) +
  399. nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
  400. is_thread_group = (tsk->signal->stats != NULL);
  401. if (is_thread_group) {
  402. /* PID + STATS + TGID + STATS */
  403. size = 2 * size;
  404. /* fill the tsk->signal->stats structure */
  405. fill_tgid_exit(tsk);
  406. }
  407. if (!tidstats)
  408. return;
  409. rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
  410. if (rc < 0)
  411. goto ret;
  412. rc = fill_pid(tsk->pid, tsk, tidstats);
  413. if (rc < 0)
  414. goto err_skb;
  415. na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
  416. NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, (u32)tsk->pid);
  417. NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
  418. *tidstats);
  419. nla_nest_end(rep_skb, na);
  420. if (!is_thread_group)
  421. goto send;
  422. /*
  423. * Doesn't matter if tsk is the leader or the last group member leaving
  424. */
  425. if (!group_dead)
  426. goto send;
  427. na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
  428. NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, (u32)tsk->tgid);
  429. /* No locking needed for tsk->signal->stats since group is dead */
  430. NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
  431. *tsk->signal->stats);
  432. nla_nest_end(rep_skb, na);
  433. send:
  434. send_cpu_listeners(rep_skb, mycpu);
  435. return;
  436. nla_put_failure:
  437. genlmsg_cancel(rep_skb, reply);
  438. err_skb:
  439. nlmsg_free(rep_skb);
  440. ret:
  441. return;
  442. }
  443. static struct genl_ops taskstats_ops = {
  444. .cmd = TASKSTATS_CMD_GET,
  445. .doit = taskstats_user_cmd,
  446. .policy = taskstats_cmd_get_policy,
  447. };
  448. /* Needed early in initialization */
  449. void __init taskstats_init_early(void)
  450. {
  451. unsigned int i;
  452. taskstats_cache = kmem_cache_create("taskstats_cache",
  453. sizeof(struct taskstats),
  454. 0, SLAB_PANIC, NULL, NULL);
  455. for_each_possible_cpu(i) {
  456. INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
  457. init_rwsem(&(per_cpu(listener_array, i).sem));
  458. }
  459. }
  460. static int __init taskstats_init(void)
  461. {
  462. int rc;
  463. rc = genl_register_family(&family);
  464. if (rc)
  465. return rc;
  466. rc = genl_register_ops(&family, &taskstats_ops);
  467. if (rc < 0)
  468. goto err;
  469. family_registered = 1;
  470. return 0;
  471. err:
  472. genl_unregister_family(&family);
  473. return rc;
  474. }
  475. /*
  476. * late initcall ensures initialization of statistics collection
  477. * mechanisms precedes initialization of the taskstats interface
  478. */
  479. late_initcall(taskstats_init);