taskstats.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. /*
  2. * taskstats.c - Export per-task statistics to userland
  3. *
  4. * Copyright (C) Shailabh Nagar, IBM Corp. 2006
  5. * (C) Balbir Singh, IBM Corp. 2006
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/taskstats_kern.h>
  20. #include <linux/tsacct_kern.h>
  21. #include <linux/delayacct.h>
  22. #include <linux/tsacct_kern.h>
  23. #include <linux/cpumask.h>
  24. #include <linux/percpu.h>
  25. #include <net/genetlink.h>
  26. #include <asm/atomic.h>
  27. /*
  28. * Maximum length of a cpumask that can be specified in
  29. * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
  30. */
  31. #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
  32. static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 };
  33. static int family_registered;
  34. kmem_cache_t *taskstats_cache;
  35. static struct genl_family family = {
  36. .id = GENL_ID_GENERATE,
  37. .name = TASKSTATS_GENL_NAME,
  38. .version = TASKSTATS_GENL_VERSION,
  39. .maxattr = TASKSTATS_CMD_ATTR_MAX,
  40. };
  41. static struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1]
  42. __read_mostly = {
  43. [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
  44. [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
  45. [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
  46. [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
  47. struct listener {
  48. struct list_head list;
  49. pid_t pid;
  50. char valid;
  51. };
  52. struct listener_list {
  53. struct rw_semaphore sem;
  54. struct list_head list;
  55. };
  56. static DEFINE_PER_CPU(struct listener_list, listener_array);
  57. enum actions {
  58. REGISTER,
  59. DEREGISTER,
  60. CPU_DONT_CARE
  61. };
  62. static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
  63. void **replyp, size_t size)
  64. {
  65. struct sk_buff *skb;
  66. void *reply;
  67. /*
  68. * If new attributes are added, please revisit this allocation
  69. */
  70. skb = genlmsg_new(size, GFP_KERNEL);
  71. if (!skb)
  72. return -ENOMEM;
  73. if (!info) {
  74. int seq = get_cpu_var(taskstats_seqnum)++;
  75. put_cpu_var(taskstats_seqnum);
  76. reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
  77. } else
  78. reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
  79. if (reply == NULL) {
  80. nlmsg_free(skb);
  81. return -EINVAL;
  82. }
  83. *skbp = skb;
  84. *replyp = reply;
  85. return 0;
  86. }
  87. /*
  88. * Send taskstats data in @skb to listener with nl_pid @pid
  89. */
  90. static int send_reply(struct sk_buff *skb, pid_t pid)
  91. {
  92. struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
  93. void *reply = genlmsg_data(genlhdr);
  94. int rc;
  95. rc = genlmsg_end(skb, reply);
  96. if (rc < 0) {
  97. nlmsg_free(skb);
  98. return rc;
  99. }
  100. return genlmsg_unicast(skb, pid);
  101. }
  102. /*
  103. * Send taskstats data in @skb to listeners registered for @cpu's exit data
  104. */
  105. static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
  106. {
  107. struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
  108. struct listener_list *listeners;
  109. struct listener *s, *tmp;
  110. struct sk_buff *skb_next, *skb_cur = skb;
  111. void *reply = genlmsg_data(genlhdr);
  112. int rc, delcount = 0;
  113. rc = genlmsg_end(skb, reply);
  114. if (rc < 0) {
  115. nlmsg_free(skb);
  116. return;
  117. }
  118. rc = 0;
  119. listeners = &per_cpu(listener_array, cpu);
  120. down_read(&listeners->sem);
  121. list_for_each_entry(s, &listeners->list, list) {
  122. skb_next = NULL;
  123. if (!list_is_last(&s->list, &listeners->list)) {
  124. skb_next = skb_clone(skb_cur, GFP_KERNEL);
  125. if (!skb_next)
  126. break;
  127. }
  128. rc = genlmsg_unicast(skb_cur, s->pid);
  129. if (rc == -ECONNREFUSED) {
  130. s->valid = 0;
  131. delcount++;
  132. }
  133. skb_cur = skb_next;
  134. }
  135. up_read(&listeners->sem);
  136. if (skb_cur)
  137. nlmsg_free(skb_cur);
  138. if (!delcount)
  139. return;
  140. /* Delete invalidated entries */
  141. down_write(&listeners->sem);
  142. list_for_each_entry_safe(s, tmp, &listeners->list, list) {
  143. if (!s->valid) {
  144. list_del(&s->list);
  145. kfree(s);
  146. }
  147. }
  148. up_write(&listeners->sem);
  149. }
  150. static int fill_pid(pid_t pid, struct task_struct *tsk,
  151. struct taskstats *stats)
  152. {
  153. int rc = 0;
  154. if (!tsk) {
  155. rcu_read_lock();
  156. tsk = find_task_by_pid(pid);
  157. if (tsk)
  158. get_task_struct(tsk);
  159. rcu_read_unlock();
  160. if (!tsk)
  161. return -ESRCH;
  162. } else
  163. get_task_struct(tsk);
  164. /*
  165. * Each accounting subsystem adds calls to its functions to
  166. * fill in relevant parts of struct taskstsats as follows
  167. *
  168. * per-task-foo(stats, tsk);
  169. */
  170. delayacct_add_tsk(stats, tsk);
  171. /* fill in basic acct fields */
  172. stats->version = TASKSTATS_VERSION;
  173. bacct_add_tsk(stats, tsk);
  174. /* fill in extended acct fields */
  175. xacct_add_tsk(stats, tsk);
  176. /* Define err: label here if needed */
  177. put_task_struct(tsk);
  178. return rc;
  179. }
  180. static int fill_tgid(pid_t tgid, struct task_struct *first,
  181. struct taskstats *stats)
  182. {
  183. struct task_struct *tsk;
  184. unsigned long flags;
  185. int rc = -ESRCH;
  186. /*
  187. * Add additional stats from live tasks except zombie thread group
  188. * leaders who are already counted with the dead tasks
  189. */
  190. rcu_read_lock();
  191. if (!first)
  192. first = find_task_by_pid(tgid);
  193. if (!first || !lock_task_sighand(first, &flags))
  194. goto out;
  195. if (first->signal->stats)
  196. memcpy(stats, first->signal->stats, sizeof(*stats));
  197. tsk = first;
  198. do {
  199. if (tsk->exit_state)
  200. continue;
  201. /*
  202. * Accounting subsystem can call its functions here to
  203. * fill in relevant parts of struct taskstsats as follows
  204. *
  205. * per-task-foo(stats, tsk);
  206. */
  207. delayacct_add_tsk(stats, tsk);
  208. } while_each_thread(first, tsk);
  209. unlock_task_sighand(first, &flags);
  210. rc = 0;
  211. out:
  212. rcu_read_unlock();
  213. stats->version = TASKSTATS_VERSION;
  214. /*
  215. * Accounting subsytems can also add calls here to modify
  216. * fields of taskstats.
  217. */
  218. return rc;
  219. }
  220. static void fill_tgid_exit(struct task_struct *tsk)
  221. {
  222. unsigned long flags;
  223. spin_lock_irqsave(&tsk->sighand->siglock, flags);
  224. if (!tsk->signal->stats)
  225. goto ret;
  226. /*
  227. * Each accounting subsystem calls its functions here to
  228. * accumalate its per-task stats for tsk, into the per-tgid structure
  229. *
  230. * per-task-foo(tsk->signal->stats, tsk);
  231. */
  232. delayacct_add_tsk(tsk->signal->stats, tsk);
  233. ret:
  234. spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
  235. return;
  236. }
  237. static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
  238. {
  239. struct listener_list *listeners;
  240. struct listener *s, *tmp;
  241. unsigned int cpu;
  242. cpumask_t mask = *maskp;
  243. if (!cpus_subset(mask, cpu_possible_map))
  244. return -EINVAL;
  245. if (isadd == REGISTER) {
  246. for_each_cpu_mask(cpu, mask) {
  247. s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
  248. cpu_to_node(cpu));
  249. if (!s)
  250. goto cleanup;
  251. s->pid = pid;
  252. INIT_LIST_HEAD(&s->list);
  253. s->valid = 1;
  254. listeners = &per_cpu(listener_array, cpu);
  255. down_write(&listeners->sem);
  256. list_add(&s->list, &listeners->list);
  257. up_write(&listeners->sem);
  258. }
  259. return 0;
  260. }
  261. /* Deregister or cleanup */
  262. cleanup:
  263. for_each_cpu_mask(cpu, mask) {
  264. listeners = &per_cpu(listener_array, cpu);
  265. down_write(&listeners->sem);
  266. list_for_each_entry_safe(s, tmp, &listeners->list, list) {
  267. if (s->pid == pid) {
  268. list_del(&s->list);
  269. kfree(s);
  270. break;
  271. }
  272. }
  273. up_write(&listeners->sem);
  274. }
  275. return 0;
  276. }
  277. static int parse(struct nlattr *na, cpumask_t *mask)
  278. {
  279. char *data;
  280. int len;
  281. int ret;
  282. if (na == NULL)
  283. return 1;
  284. len = nla_len(na);
  285. if (len > TASKSTATS_CPUMASK_MAXLEN)
  286. return -E2BIG;
  287. if (len < 1)
  288. return -EINVAL;
  289. data = kmalloc(len, GFP_KERNEL);
  290. if (!data)
  291. return -ENOMEM;
  292. nla_strlcpy(data, na, len);
  293. ret = cpulist_parse(data, *mask);
  294. kfree(data);
  295. return ret;
  296. }
  297. static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
  298. {
  299. int rc = 0;
  300. struct sk_buff *rep_skb;
  301. struct taskstats stats;
  302. void *reply;
  303. size_t size;
  304. struct nlattr *na;
  305. cpumask_t mask;
  306. rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask);
  307. if (rc < 0)
  308. return rc;
  309. if (rc == 0)
  310. return add_del_listener(info->snd_pid, &mask, REGISTER);
  311. rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask);
  312. if (rc < 0)
  313. return rc;
  314. if (rc == 0)
  315. return add_del_listener(info->snd_pid, &mask, DEREGISTER);
  316. /*
  317. * Size includes space for nested attributes
  318. */
  319. size = nla_total_size(sizeof(u32)) +
  320. nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
  321. memset(&stats, 0, sizeof(stats));
  322. rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
  323. if (rc < 0)
  324. return rc;
  325. if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
  326. u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
  327. rc = fill_pid(pid, NULL, &stats);
  328. if (rc < 0)
  329. goto err;
  330. na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
  331. NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, pid);
  332. NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
  333. stats);
  334. } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
  335. u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
  336. rc = fill_tgid(tgid, NULL, &stats);
  337. if (rc < 0)
  338. goto err;
  339. na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
  340. NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, tgid);
  341. NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
  342. stats);
  343. } else {
  344. rc = -EINVAL;
  345. goto err;
  346. }
  347. nla_nest_end(rep_skb, na);
  348. return send_reply(rep_skb, info->snd_pid);
  349. nla_put_failure:
  350. rc = genlmsg_cancel(rep_skb, reply);
  351. err:
  352. nlmsg_free(rep_skb);
  353. return rc;
  354. }
  355. void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
  356. {
  357. struct listener_list *listeners;
  358. struct taskstats *tmp;
  359. /*
  360. * This is the cpu on which the task is exiting currently and will
  361. * be the one for which the exit event is sent, even if the cpu
  362. * on which this function is running changes later.
  363. */
  364. *mycpu = raw_smp_processor_id();
  365. *ptidstats = NULL;
  366. tmp = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
  367. if (!tmp)
  368. return;
  369. listeners = &per_cpu(listener_array, *mycpu);
  370. down_read(&listeners->sem);
  371. if (!list_empty(&listeners->list)) {
  372. *ptidstats = tmp;
  373. tmp = NULL;
  374. }
  375. up_read(&listeners->sem);
  376. kfree(tmp);
  377. }
  378. /* Send pid data out on exit */
  379. void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
  380. int group_dead, unsigned int mycpu)
  381. {
  382. int rc;
  383. struct sk_buff *rep_skb;
  384. void *reply;
  385. size_t size;
  386. int is_thread_group;
  387. struct nlattr *na;
  388. if (!family_registered)
  389. return;
  390. /*
  391. * Size includes space for nested attributes
  392. */
  393. size = nla_total_size(sizeof(u32)) +
  394. nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
  395. is_thread_group = (tsk->signal->stats != NULL);
  396. if (is_thread_group) {
  397. /* PID + STATS + TGID + STATS */
  398. size = 2 * size;
  399. /* fill the tsk->signal->stats structure */
  400. fill_tgid_exit(tsk);
  401. }
  402. if (!tidstats)
  403. return;
  404. rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
  405. if (rc < 0)
  406. goto ret;
  407. rc = fill_pid(tsk->pid, tsk, tidstats);
  408. if (rc < 0)
  409. goto err_skb;
  410. na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
  411. NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, (u32)tsk->pid);
  412. NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
  413. *tidstats);
  414. nla_nest_end(rep_skb, na);
  415. if (!is_thread_group)
  416. goto send;
  417. /*
  418. * Doesn't matter if tsk is the leader or the last group member leaving
  419. */
  420. if (!group_dead)
  421. goto send;
  422. na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
  423. NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, (u32)tsk->tgid);
  424. /* No locking needed for tsk->signal->stats since group is dead */
  425. NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
  426. *tsk->signal->stats);
  427. nla_nest_end(rep_skb, na);
  428. send:
  429. send_cpu_listeners(rep_skb, mycpu);
  430. return;
  431. nla_put_failure:
  432. genlmsg_cancel(rep_skb, reply);
  433. err_skb:
  434. nlmsg_free(rep_skb);
  435. ret:
  436. return;
  437. }
  438. static struct genl_ops taskstats_ops = {
  439. .cmd = TASKSTATS_CMD_GET,
  440. .doit = taskstats_user_cmd,
  441. .policy = taskstats_cmd_get_policy,
  442. };
  443. /* Needed early in initialization */
  444. void __init taskstats_init_early(void)
  445. {
  446. unsigned int i;
  447. taskstats_cache = kmem_cache_create("taskstats_cache",
  448. sizeof(struct taskstats),
  449. 0, SLAB_PANIC, NULL, NULL);
  450. for_each_possible_cpu(i) {
  451. INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
  452. init_rwsem(&(per_cpu(listener_array, i).sem));
  453. }
  454. }
  455. static int __init taskstats_init(void)
  456. {
  457. int rc;
  458. rc = genl_register_family(&family);
  459. if (rc)
  460. return rc;
  461. rc = genl_register_ops(&family, &taskstats_ops);
  462. if (rc < 0)
  463. goto err;
  464. family_registered = 1;
  465. return 0;
  466. err:
  467. genl_unregister_family(&family);
  468. return rc;
  469. }
  470. /*
  471. * late initcall ensures initialization of statistics collection
  472. * mechanisms precedes initialization of the taskstats interface
  473. */
  474. late_initcall(taskstats_init);