taskstats.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561
  1. /*
  2. * taskstats.c - Export per-task statistics to userland
  3. *
  4. * Copyright (C) Shailabh Nagar, IBM Corp. 2006
  5. * (C) Balbir Singh, IBM Corp. 2006
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/taskstats_kern.h>
  20. #include <linux/tsacct_kern.h>
  21. #include <linux/delayacct.h>
  22. #include <linux/tsacct_kern.h>
  23. #include <linux/cpumask.h>
  24. #include <linux/percpu.h>
  25. #include <net/genetlink.h>
  26. #include <asm/atomic.h>
  27. /*
  28. * Maximum length of a cpumask that can be specified in
  29. * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
  30. */
  31. #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
  32. static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 };
  33. static int family_registered;
  34. struct kmem_cache *taskstats_cache;
  35. static struct genl_family family = {
  36. .id = GENL_ID_GENERATE,
  37. .name = TASKSTATS_GENL_NAME,
  38. .version = TASKSTATS_GENL_VERSION,
  39. .maxattr = TASKSTATS_CMD_ATTR_MAX,
  40. };
  41. static struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1]
  42. __read_mostly = {
  43. [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
  44. [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
  45. [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
  46. [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
  47. struct listener {
  48. struct list_head list;
  49. pid_t pid;
  50. char valid;
  51. };
  52. struct listener_list {
  53. struct rw_semaphore sem;
  54. struct list_head list;
  55. };
  56. static DEFINE_PER_CPU(struct listener_list, listener_array);
  57. enum actions {
  58. REGISTER,
  59. DEREGISTER,
  60. CPU_DONT_CARE
  61. };
  62. static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
  63. size_t size)
  64. {
  65. struct sk_buff *skb;
  66. void *reply;
  67. /*
  68. * If new attributes are added, please revisit this allocation
  69. */
  70. skb = genlmsg_new(size, GFP_KERNEL);
  71. if (!skb)
  72. return -ENOMEM;
  73. if (!info) {
  74. int seq = get_cpu_var(taskstats_seqnum)++;
  75. put_cpu_var(taskstats_seqnum);
  76. reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
  77. } else
  78. reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
  79. if (reply == NULL) {
  80. nlmsg_free(skb);
  81. return -EINVAL;
  82. }
  83. *skbp = skb;
  84. return 0;
  85. }
  86. /*
  87. * Send taskstats data in @skb to listener with nl_pid @pid
  88. */
  89. static int send_reply(struct sk_buff *skb, pid_t pid)
  90. {
  91. struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
  92. void *reply = genlmsg_data(genlhdr);
  93. int rc;
  94. rc = genlmsg_end(skb, reply);
  95. if (rc < 0) {
  96. nlmsg_free(skb);
  97. return rc;
  98. }
  99. return genlmsg_unicast(skb, pid);
  100. }
  101. /*
  102. * Send taskstats data in @skb to listeners registered for @cpu's exit data
  103. */
  104. static void send_cpu_listeners(struct sk_buff *skb,
  105. struct listener_list *listeners)
  106. {
  107. struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
  108. struct listener *s, *tmp;
  109. struct sk_buff *skb_next, *skb_cur = skb;
  110. void *reply = genlmsg_data(genlhdr);
  111. int rc, delcount = 0;
  112. rc = genlmsg_end(skb, reply);
  113. if (rc < 0) {
  114. nlmsg_free(skb);
  115. return;
  116. }
  117. rc = 0;
  118. down_read(&listeners->sem);
  119. list_for_each_entry(s, &listeners->list, list) {
  120. skb_next = NULL;
  121. if (!list_is_last(&s->list, &listeners->list)) {
  122. skb_next = skb_clone(skb_cur, GFP_KERNEL);
  123. if (!skb_next)
  124. break;
  125. }
  126. rc = genlmsg_unicast(skb_cur, s->pid);
  127. if (rc == -ECONNREFUSED) {
  128. s->valid = 0;
  129. delcount++;
  130. }
  131. skb_cur = skb_next;
  132. }
  133. up_read(&listeners->sem);
  134. if (skb_cur)
  135. nlmsg_free(skb_cur);
  136. if (!delcount)
  137. return;
  138. /* Delete invalidated entries */
  139. down_write(&listeners->sem);
  140. list_for_each_entry_safe(s, tmp, &listeners->list, list) {
  141. if (!s->valid) {
  142. list_del(&s->list);
  143. kfree(s);
  144. }
  145. }
  146. up_write(&listeners->sem);
  147. }
  148. static int fill_pid(pid_t pid, struct task_struct *tsk,
  149. struct taskstats *stats)
  150. {
  151. int rc = 0;
  152. if (!tsk) {
  153. rcu_read_lock();
  154. tsk = find_task_by_pid(pid);
  155. if (tsk)
  156. get_task_struct(tsk);
  157. rcu_read_unlock();
  158. if (!tsk)
  159. return -ESRCH;
  160. } else
  161. get_task_struct(tsk);
  162. memset(stats, 0, sizeof(*stats));
  163. /*
  164. * Each accounting subsystem adds calls to its functions to
  165. * fill in relevant parts of struct taskstsats as follows
  166. *
  167. * per-task-foo(stats, tsk);
  168. */
  169. delayacct_add_tsk(stats, tsk);
  170. /* fill in basic acct fields */
  171. stats->version = TASKSTATS_VERSION;
  172. stats->nvcsw = tsk->nvcsw;
  173. stats->nivcsw = tsk->nivcsw;
  174. bacct_add_tsk(stats, tsk);
  175. /* fill in extended acct fields */
  176. xacct_add_tsk(stats, tsk);
  177. /* Define err: label here if needed */
  178. put_task_struct(tsk);
  179. return rc;
  180. }
  181. static int fill_tgid(pid_t tgid, struct task_struct *first,
  182. struct taskstats *stats)
  183. {
  184. struct task_struct *tsk;
  185. unsigned long flags;
  186. int rc = -ESRCH;
  187. /*
  188. * Add additional stats from live tasks except zombie thread group
  189. * leaders who are already counted with the dead tasks
  190. */
  191. rcu_read_lock();
  192. if (!first)
  193. first = find_task_by_pid(tgid);
  194. if (!first || !lock_task_sighand(first, &flags))
  195. goto out;
  196. if (first->signal->stats)
  197. memcpy(stats, first->signal->stats, sizeof(*stats));
  198. else
  199. memset(stats, 0, sizeof(*stats));
  200. tsk = first;
  201. do {
  202. if (tsk->exit_state)
  203. continue;
  204. /*
  205. * Accounting subsystem can call its functions here to
  206. * fill in relevant parts of struct taskstsats as follows
  207. *
  208. * per-task-foo(stats, tsk);
  209. */
  210. delayacct_add_tsk(stats, tsk);
  211. stats->nvcsw += tsk->nvcsw;
  212. stats->nivcsw += tsk->nivcsw;
  213. } while_each_thread(first, tsk);
  214. unlock_task_sighand(first, &flags);
  215. rc = 0;
  216. out:
  217. rcu_read_unlock();
  218. stats->version = TASKSTATS_VERSION;
  219. /*
  220. * Accounting subsytems can also add calls here to modify
  221. * fields of taskstats.
  222. */
  223. return rc;
  224. }
  225. static void fill_tgid_exit(struct task_struct *tsk)
  226. {
  227. unsigned long flags;
  228. spin_lock_irqsave(&tsk->sighand->siglock, flags);
  229. if (!tsk->signal->stats)
  230. goto ret;
  231. /*
  232. * Each accounting subsystem calls its functions here to
  233. * accumalate its per-task stats for tsk, into the per-tgid structure
  234. *
  235. * per-task-foo(tsk->signal->stats, tsk);
  236. */
  237. delayacct_add_tsk(tsk->signal->stats, tsk);
  238. ret:
  239. spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
  240. return;
  241. }
  242. static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
  243. {
  244. struct listener_list *listeners;
  245. struct listener *s, *tmp;
  246. unsigned int cpu;
  247. cpumask_t mask = *maskp;
  248. if (!cpus_subset(mask, cpu_possible_map))
  249. return -EINVAL;
  250. if (isadd == REGISTER) {
  251. for_each_cpu_mask(cpu, mask) {
  252. s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
  253. cpu_to_node(cpu));
  254. if (!s)
  255. goto cleanup;
  256. s->pid = pid;
  257. INIT_LIST_HEAD(&s->list);
  258. s->valid = 1;
  259. listeners = &per_cpu(listener_array, cpu);
  260. down_write(&listeners->sem);
  261. list_add(&s->list, &listeners->list);
  262. up_write(&listeners->sem);
  263. }
  264. return 0;
  265. }
  266. /* Deregister or cleanup */
  267. cleanup:
  268. for_each_cpu_mask(cpu, mask) {
  269. listeners = &per_cpu(listener_array, cpu);
  270. down_write(&listeners->sem);
  271. list_for_each_entry_safe(s, tmp, &listeners->list, list) {
  272. if (s->pid == pid) {
  273. list_del(&s->list);
  274. kfree(s);
  275. break;
  276. }
  277. }
  278. up_write(&listeners->sem);
  279. }
  280. return 0;
  281. }
  282. static int parse(struct nlattr *na, cpumask_t *mask)
  283. {
  284. char *data;
  285. int len;
  286. int ret;
  287. if (na == NULL)
  288. return 1;
  289. len = nla_len(na);
  290. if (len > TASKSTATS_CPUMASK_MAXLEN)
  291. return -E2BIG;
  292. if (len < 1)
  293. return -EINVAL;
  294. data = kmalloc(len, GFP_KERNEL);
  295. if (!data)
  296. return -ENOMEM;
  297. nla_strlcpy(data, na, len);
  298. ret = cpulist_parse(data, *mask);
  299. kfree(data);
  300. return ret;
  301. }
  302. static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
  303. {
  304. struct nlattr *na, *ret;
  305. int aggr;
  306. aggr = (type == TASKSTATS_TYPE_PID)
  307. ? TASKSTATS_TYPE_AGGR_PID
  308. : TASKSTATS_TYPE_AGGR_TGID;
  309. na = nla_nest_start(skb, aggr);
  310. if (!na)
  311. goto err;
  312. if (nla_put(skb, type, sizeof(pid), &pid) < 0)
  313. goto err;
  314. ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
  315. if (!ret)
  316. goto err;
  317. nla_nest_end(skb, na);
  318. return nla_data(ret);
  319. err:
  320. return NULL;
  321. }
  322. static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
  323. {
  324. int rc = 0;
  325. struct sk_buff *rep_skb;
  326. struct taskstats *stats;
  327. size_t size;
  328. cpumask_t mask;
  329. rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask);
  330. if (rc < 0)
  331. return rc;
  332. if (rc == 0)
  333. return add_del_listener(info->snd_pid, &mask, REGISTER);
  334. rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask);
  335. if (rc < 0)
  336. return rc;
  337. if (rc == 0)
  338. return add_del_listener(info->snd_pid, &mask, DEREGISTER);
  339. /*
  340. * Size includes space for nested attributes
  341. */
  342. size = nla_total_size(sizeof(u32)) +
  343. nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
  344. rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
  345. if (rc < 0)
  346. return rc;
  347. rc = -EINVAL;
  348. if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
  349. u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
  350. stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
  351. if (!stats)
  352. goto err;
  353. rc = fill_pid(pid, NULL, stats);
  354. if (rc < 0)
  355. goto err;
  356. } else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
  357. u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
  358. stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
  359. if (!stats)
  360. goto err;
  361. rc = fill_tgid(tgid, NULL, stats);
  362. if (rc < 0)
  363. goto err;
  364. } else
  365. goto err;
  366. return send_reply(rep_skb, info->snd_pid);
  367. err:
  368. nlmsg_free(rep_skb);
  369. return rc;
  370. }
  371. static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
  372. {
  373. struct signal_struct *sig = tsk->signal;
  374. struct taskstats *stats;
  375. if (sig->stats || thread_group_empty(tsk))
  376. goto ret;
  377. /* No problem if kmem_cache_zalloc() fails */
  378. stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
  379. spin_lock_irq(&tsk->sighand->siglock);
  380. if (!sig->stats) {
  381. sig->stats = stats;
  382. stats = NULL;
  383. }
  384. spin_unlock_irq(&tsk->sighand->siglock);
  385. if (stats)
  386. kmem_cache_free(taskstats_cache, stats);
  387. ret:
  388. return sig->stats;
  389. }
  390. /* Send pid data out on exit */
  391. void taskstats_exit(struct task_struct *tsk, int group_dead)
  392. {
  393. int rc;
  394. struct listener_list *listeners;
  395. struct taskstats *stats;
  396. struct sk_buff *rep_skb;
  397. size_t size;
  398. int is_thread_group;
  399. if (!family_registered)
  400. return;
  401. /*
  402. * Size includes space for nested attributes
  403. */
  404. size = nla_total_size(sizeof(u32)) +
  405. nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
  406. is_thread_group = !!taskstats_tgid_alloc(tsk);
  407. if (is_thread_group) {
  408. /* PID + STATS + TGID + STATS */
  409. size = 2 * size;
  410. /* fill the tsk->signal->stats structure */
  411. fill_tgid_exit(tsk);
  412. }
  413. listeners = &__raw_get_cpu_var(listener_array);
  414. if (list_empty(&listeners->list))
  415. return;
  416. rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
  417. if (rc < 0)
  418. return;
  419. stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, tsk->pid);
  420. if (!stats)
  421. goto err;
  422. rc = fill_pid(tsk->pid, tsk, stats);
  423. if (rc < 0)
  424. goto err;
  425. /*
  426. * Doesn't matter if tsk is the leader or the last group member leaving
  427. */
  428. if (!is_thread_group || !group_dead)
  429. goto send;
  430. stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tsk->tgid);
  431. if (!stats)
  432. goto err;
  433. memcpy(stats, tsk->signal->stats, sizeof(*stats));
  434. send:
  435. send_cpu_listeners(rep_skb, listeners);
  436. return;
  437. err:
  438. nlmsg_free(rep_skb);
  439. }
  440. static struct genl_ops taskstats_ops = {
  441. .cmd = TASKSTATS_CMD_GET,
  442. .doit = taskstats_user_cmd,
  443. .policy = taskstats_cmd_get_policy,
  444. };
  445. /* Needed early in initialization */
  446. void __init taskstats_init_early(void)
  447. {
  448. unsigned int i;
  449. taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
  450. for_each_possible_cpu(i) {
  451. INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
  452. init_rwsem(&(per_cpu(listener_array, i).sem));
  453. }
  454. }
  455. static int __init taskstats_init(void)
  456. {
  457. int rc;
  458. rc = genl_register_family(&family);
  459. if (rc)
  460. return rc;
  461. rc = genl_register_ops(&family, &taskstats_ops);
  462. if (rc < 0)
  463. goto err;
  464. family_registered = 1;
  465. return 0;
  466. err:
  467. genl_unregister_family(&family);
  468. return rc;
  469. }
  470. /*
  471. * late initcall ensures initialization of statistics collection
  472. * mechanisms precedes initialization of the taskstats interface
  473. */
  474. late_initcall(taskstats_init);