taskstats.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725
  1. /*
  2. * taskstats.c - Export per-task statistics to userland
  3. *
  4. * Copyright (C) Shailabh Nagar, IBM Corp. 2006
  5. * (C) Balbir Singh, IBM Corp. 2006
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. */
  18. #include <linux/kernel.h>
  19. #include <linux/taskstats_kern.h>
  20. #include <linux/tsacct_kern.h>
  21. #include <linux/delayacct.h>
  22. #include <linux/cpumask.h>
  23. #include <linux/percpu.h>
  24. #include <linux/slab.h>
  25. #include <linux/cgroupstats.h>
  26. #include <linux/cgroup.h>
  27. #include <linux/fs.h>
  28. #include <linux/file.h>
  29. #include <linux/pid_namespace.h>
  30. #include <net/genetlink.h>
  31. #include <linux/atomic.h>
  32. /*
  33. * Maximum length of a cpumask that can be specified in
  34. * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
  35. */
  36. #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS)
  37. static DEFINE_PER_CPU(__u32, taskstats_seqnum);
  38. static int family_registered;
  39. struct kmem_cache *taskstats_cache;
  40. static struct genl_family family = {
  41. .id = GENL_ID_GENERATE,
  42. .name = TASKSTATS_GENL_NAME,
  43. .version = TASKSTATS_GENL_VERSION,
  44. .maxattr = TASKSTATS_CMD_ATTR_MAX,
  45. };
  46. static const struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1] = {
  47. [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 },
  48. [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
  49. [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
  50. [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};
  51. static const struct nla_policy cgroupstats_cmd_get_policy[CGROUPSTATS_CMD_ATTR_MAX+1] = {
  52. [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 },
  53. };
  54. struct listener {
  55. struct list_head list;
  56. pid_t pid;
  57. char valid;
  58. };
  59. struct listener_list {
  60. struct rw_semaphore sem;
  61. struct list_head list;
  62. };
  63. static DEFINE_PER_CPU(struct listener_list, listener_array);
  64. enum actions {
  65. REGISTER,
  66. DEREGISTER,
  67. CPU_DONT_CARE
  68. };
  69. static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
  70. size_t size)
  71. {
  72. struct sk_buff *skb;
  73. void *reply;
  74. /*
  75. * If new attributes are added, please revisit this allocation
  76. */
  77. skb = genlmsg_new(size, GFP_KERNEL);
  78. if (!skb)
  79. return -ENOMEM;
  80. if (!info) {
  81. int seq = this_cpu_inc_return(taskstats_seqnum) - 1;
  82. reply = genlmsg_put(skb, 0, seq, &family, 0, cmd);
  83. } else
  84. reply = genlmsg_put_reply(skb, info, &family, 0, cmd);
  85. if (reply == NULL) {
  86. nlmsg_free(skb);
  87. return -EINVAL;
  88. }
  89. *skbp = skb;
  90. return 0;
  91. }
  92. /*
  93. * Send taskstats data in @skb to listener with nl_pid @pid
  94. */
  95. static int send_reply(struct sk_buff *skb, struct genl_info *info)
  96. {
  97. struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
  98. void *reply = genlmsg_data(genlhdr);
  99. int rc;
  100. rc = genlmsg_end(skb, reply);
  101. if (rc < 0) {
  102. nlmsg_free(skb);
  103. return rc;
  104. }
  105. return genlmsg_reply(skb, info);
  106. }
  107. /*
  108. * Send taskstats data in @skb to listeners registered for @cpu's exit data
  109. */
  110. static void send_cpu_listeners(struct sk_buff *skb,
  111. struct listener_list *listeners)
  112. {
  113. struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb));
  114. struct listener *s, *tmp;
  115. struct sk_buff *skb_next, *skb_cur = skb;
  116. void *reply = genlmsg_data(genlhdr);
  117. int rc, delcount = 0;
  118. rc = genlmsg_end(skb, reply);
  119. if (rc < 0) {
  120. nlmsg_free(skb);
  121. return;
  122. }
  123. rc = 0;
  124. down_read(&listeners->sem);
  125. list_for_each_entry(s, &listeners->list, list) {
  126. skb_next = NULL;
  127. if (!list_is_last(&s->list, &listeners->list)) {
  128. skb_next = skb_clone(skb_cur, GFP_KERNEL);
  129. if (!skb_next)
  130. break;
  131. }
  132. rc = genlmsg_unicast(&init_net, skb_cur, s->pid);
  133. if (rc == -ECONNREFUSED) {
  134. s->valid = 0;
  135. delcount++;
  136. }
  137. skb_cur = skb_next;
  138. }
  139. up_read(&listeners->sem);
  140. if (skb_cur)
  141. nlmsg_free(skb_cur);
  142. if (!delcount)
  143. return;
  144. /* Delete invalidated entries */
  145. down_write(&listeners->sem);
  146. list_for_each_entry_safe(s, tmp, &listeners->list, list) {
  147. if (!s->valid) {
  148. list_del(&s->list);
  149. kfree(s);
  150. }
  151. }
  152. up_write(&listeners->sem);
  153. }
  154. static void fill_stats(struct user_namespace *user_ns,
  155. struct pid_namespace *pid_ns,
  156. struct task_struct *tsk, struct taskstats *stats)
  157. {
  158. memset(stats, 0, sizeof(*stats));
  159. /*
  160. * Each accounting subsystem adds calls to its functions to
  161. * fill in relevant parts of struct taskstsats as follows
  162. *
  163. * per-task-foo(stats, tsk);
  164. */
  165. delayacct_add_tsk(stats, tsk);
  166. /* fill in basic acct fields */
  167. stats->version = TASKSTATS_VERSION;
  168. stats->nvcsw = tsk->nvcsw;
  169. stats->nivcsw = tsk->nivcsw;
  170. bacct_add_tsk(user_ns, pid_ns, stats, tsk);
  171. /* fill in extended acct fields */
  172. xacct_add_tsk(stats, tsk);
  173. }
  174. static int fill_stats_for_pid(pid_t pid, struct taskstats *stats)
  175. {
  176. struct task_struct *tsk;
  177. rcu_read_lock();
  178. tsk = find_task_by_vpid(pid);
  179. if (tsk)
  180. get_task_struct(tsk);
  181. rcu_read_unlock();
  182. if (!tsk)
  183. return -ESRCH;
  184. fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats);
  185. put_task_struct(tsk);
  186. return 0;
  187. }
  188. static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats)
  189. {
  190. struct task_struct *tsk, *first;
  191. unsigned long flags;
  192. int rc = -ESRCH;
  193. /*
  194. * Add additional stats from live tasks except zombie thread group
  195. * leaders who are already counted with the dead tasks
  196. */
  197. rcu_read_lock();
  198. first = find_task_by_vpid(tgid);
  199. if (!first || !lock_task_sighand(first, &flags))
  200. goto out;
  201. if (first->signal->stats)
  202. memcpy(stats, first->signal->stats, sizeof(*stats));
  203. else
  204. memset(stats, 0, sizeof(*stats));
  205. tsk = first;
  206. do {
  207. if (tsk->exit_state)
  208. continue;
  209. /*
  210. * Accounting subsystem can call its functions here to
  211. * fill in relevant parts of struct taskstsats as follows
  212. *
  213. * per-task-foo(stats, tsk);
  214. */
  215. delayacct_add_tsk(stats, tsk);
  216. stats->nvcsw += tsk->nvcsw;
  217. stats->nivcsw += tsk->nivcsw;
  218. } while_each_thread(first, tsk);
  219. unlock_task_sighand(first, &flags);
  220. rc = 0;
  221. out:
  222. rcu_read_unlock();
  223. stats->version = TASKSTATS_VERSION;
  224. /*
  225. * Accounting subsystems can also add calls here to modify
  226. * fields of taskstats.
  227. */
  228. return rc;
  229. }
  230. static void fill_tgid_exit(struct task_struct *tsk)
  231. {
  232. unsigned long flags;
  233. spin_lock_irqsave(&tsk->sighand->siglock, flags);
  234. if (!tsk->signal->stats)
  235. goto ret;
  236. /*
  237. * Each accounting subsystem calls its functions here to
  238. * accumalate its per-task stats for tsk, into the per-tgid structure
  239. *
  240. * per-task-foo(tsk->signal->stats, tsk);
  241. */
  242. delayacct_add_tsk(tsk->signal->stats, tsk);
  243. ret:
  244. spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
  245. return;
  246. }
  247. static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd)
  248. {
  249. struct listener_list *listeners;
  250. struct listener *s, *tmp, *s2;
  251. unsigned int cpu;
  252. if (!cpumask_subset(mask, cpu_possible_mask))
  253. return -EINVAL;
  254. if (current_user_ns() != &init_user_ns)
  255. return -EINVAL;
  256. if (task_active_pid_ns(current) != &init_pid_ns)
  257. return -EINVAL;
  258. if (isadd == REGISTER) {
  259. for_each_cpu(cpu, mask) {
  260. s = kmalloc_node(sizeof(struct listener),
  261. GFP_KERNEL, cpu_to_node(cpu));
  262. if (!s)
  263. goto cleanup;
  264. s->pid = pid;
  265. s->valid = 1;
  266. listeners = &per_cpu(listener_array, cpu);
  267. down_write(&listeners->sem);
  268. list_for_each_entry(s2, &listeners->list, list) {
  269. if (s2->pid == pid && s2->valid)
  270. goto exists;
  271. }
  272. list_add(&s->list, &listeners->list);
  273. s = NULL;
  274. exists:
  275. up_write(&listeners->sem);
  276. kfree(s); /* nop if NULL */
  277. }
  278. return 0;
  279. }
  280. /* Deregister or cleanup */
  281. cleanup:
  282. for_each_cpu(cpu, mask) {
  283. listeners = &per_cpu(listener_array, cpu);
  284. down_write(&listeners->sem);
  285. list_for_each_entry_safe(s, tmp, &listeners->list, list) {
  286. if (s->pid == pid) {
  287. list_del(&s->list);
  288. kfree(s);
  289. break;
  290. }
  291. }
  292. up_write(&listeners->sem);
  293. }
  294. return 0;
  295. }
  296. static int parse(struct nlattr *na, struct cpumask *mask)
  297. {
  298. char *data;
  299. int len;
  300. int ret;
  301. if (na == NULL)
  302. return 1;
  303. len = nla_len(na);
  304. if (len > TASKSTATS_CPUMASK_MAXLEN)
  305. return -E2BIG;
  306. if (len < 1)
  307. return -EINVAL;
  308. data = kmalloc(len, GFP_KERNEL);
  309. if (!data)
  310. return -ENOMEM;
  311. nla_strlcpy(data, na, len);
  312. ret = cpulist_parse(data, mask);
  313. kfree(data);
  314. return ret;
  315. }
  316. #if defined(CONFIG_64BIT) && !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
  317. #define TASKSTATS_NEEDS_PADDING 1
  318. #endif
  319. static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid)
  320. {
  321. struct nlattr *na, *ret;
  322. int aggr;
  323. aggr = (type == TASKSTATS_TYPE_PID)
  324. ? TASKSTATS_TYPE_AGGR_PID
  325. : TASKSTATS_TYPE_AGGR_TGID;
  326. /*
  327. * The taskstats structure is internally aligned on 8 byte
  328. * boundaries but the layout of the aggregrate reply, with
  329. * two NLA headers and the pid (each 4 bytes), actually
  330. * force the entire structure to be unaligned. This causes
  331. * the kernel to issue unaligned access warnings on some
  332. * architectures like ia64. Unfortunately, some software out there
  333. * doesn't properly unroll the NLA packet and assumes that the start
  334. * of the taskstats structure will always be 20 bytes from the start
  335. * of the netlink payload. Aligning the start of the taskstats
  336. * structure breaks this software, which we don't want. So, for now
  337. * the alignment only happens on architectures that require it
  338. * and those users will have to update to fixed versions of those
  339. * packages. Space is reserved in the packet only when needed.
  340. * This ifdef should be removed in several years e.g. 2012 once
  341. * we can be confident that fixed versions are installed on most
  342. * systems. We add the padding before the aggregate since the
  343. * aggregate is already a defined type.
  344. */
  345. #ifdef TASKSTATS_NEEDS_PADDING
  346. if (nla_put(skb, TASKSTATS_TYPE_NULL, 0, NULL) < 0)
  347. goto err;
  348. #endif
  349. na = nla_nest_start(skb, aggr);
  350. if (!na)
  351. goto err;
  352. if (nla_put(skb, type, sizeof(pid), &pid) < 0)
  353. goto err;
  354. ret = nla_reserve(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats));
  355. if (!ret)
  356. goto err;
  357. nla_nest_end(skb, na);
  358. return nla_data(ret);
  359. err:
  360. return NULL;
  361. }
  362. static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
  363. {
  364. int rc = 0;
  365. struct sk_buff *rep_skb;
  366. struct cgroupstats *stats;
  367. struct nlattr *na;
  368. size_t size;
  369. u32 fd;
  370. struct fd f;
  371. na = info->attrs[CGROUPSTATS_CMD_ATTR_FD];
  372. if (!na)
  373. return -EINVAL;
  374. fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]);
  375. f = fdget(fd);
  376. if (!f.file)
  377. return 0;
  378. size = nla_total_size(sizeof(struct cgroupstats));
  379. rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb,
  380. size);
  381. if (rc < 0)
  382. goto err;
  383. na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS,
  384. sizeof(struct cgroupstats));
  385. if (na == NULL) {
  386. nlmsg_free(rep_skb);
  387. rc = -EMSGSIZE;
  388. goto err;
  389. }
  390. stats = nla_data(na);
  391. memset(stats, 0, sizeof(*stats));
  392. rc = cgroupstats_build(stats, f.file->f_dentry);
  393. if (rc < 0) {
  394. nlmsg_free(rep_skb);
  395. goto err;
  396. }
  397. rc = send_reply(rep_skb, info);
  398. err:
  399. fdput(f);
  400. return rc;
  401. }
  402. static int cmd_attr_register_cpumask(struct genl_info *info)
  403. {
  404. cpumask_var_t mask;
  405. int rc;
  406. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  407. return -ENOMEM;
  408. rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask);
  409. if (rc < 0)
  410. goto out;
  411. rc = add_del_listener(info->snd_portid, mask, REGISTER);
  412. out:
  413. free_cpumask_var(mask);
  414. return rc;
  415. }
  416. static int cmd_attr_deregister_cpumask(struct genl_info *info)
  417. {
  418. cpumask_var_t mask;
  419. int rc;
  420. if (!alloc_cpumask_var(&mask, GFP_KERNEL))
  421. return -ENOMEM;
  422. rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask);
  423. if (rc < 0)
  424. goto out;
  425. rc = add_del_listener(info->snd_portid, mask, DEREGISTER);
  426. out:
  427. free_cpumask_var(mask);
  428. return rc;
  429. }
  430. static size_t taskstats_packet_size(void)
  431. {
  432. size_t size;
  433. size = nla_total_size(sizeof(u32)) +
  434. nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);
  435. #ifdef TASKSTATS_NEEDS_PADDING
  436. size += nla_total_size(0); /* Padding for alignment */
  437. #endif
  438. return size;
  439. }
  440. static int cmd_attr_pid(struct genl_info *info)
  441. {
  442. struct taskstats *stats;
  443. struct sk_buff *rep_skb;
  444. size_t size;
  445. u32 pid;
  446. int rc;
  447. size = taskstats_packet_size();
  448. rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
  449. if (rc < 0)
  450. return rc;
  451. rc = -EINVAL;
  452. pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
  453. stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid);
  454. if (!stats)
  455. goto err;
  456. rc = fill_stats_for_pid(pid, stats);
  457. if (rc < 0)
  458. goto err;
  459. return send_reply(rep_skb, info);
  460. err:
  461. nlmsg_free(rep_skb);
  462. return rc;
  463. }
  464. static int cmd_attr_tgid(struct genl_info *info)
  465. {
  466. struct taskstats *stats;
  467. struct sk_buff *rep_skb;
  468. size_t size;
  469. u32 tgid;
  470. int rc;
  471. size = taskstats_packet_size();
  472. rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size);
  473. if (rc < 0)
  474. return rc;
  475. rc = -EINVAL;
  476. tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
  477. stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid);
  478. if (!stats)
  479. goto err;
  480. rc = fill_stats_for_tgid(tgid, stats);
  481. if (rc < 0)
  482. goto err;
  483. return send_reply(rep_skb, info);
  484. err:
  485. nlmsg_free(rep_skb);
  486. return rc;
  487. }
  488. static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
  489. {
  490. if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
  491. return cmd_attr_register_cpumask(info);
  492. else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
  493. return cmd_attr_deregister_cpumask(info);
  494. else if (info->attrs[TASKSTATS_CMD_ATTR_PID])
  495. return cmd_attr_pid(info);
  496. else if (info->attrs[TASKSTATS_CMD_ATTR_TGID])
  497. return cmd_attr_tgid(info);
  498. else
  499. return -EINVAL;
  500. }
  501. static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk)
  502. {
  503. struct signal_struct *sig = tsk->signal;
  504. struct taskstats *stats;
  505. if (sig->stats || thread_group_empty(tsk))
  506. goto ret;
  507. /* No problem if kmem_cache_zalloc() fails */
  508. stats = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL);
  509. spin_lock_irq(&tsk->sighand->siglock);
  510. if (!sig->stats) {
  511. sig->stats = stats;
  512. stats = NULL;
  513. }
  514. spin_unlock_irq(&tsk->sighand->siglock);
  515. if (stats)
  516. kmem_cache_free(taskstats_cache, stats);
  517. ret:
  518. return sig->stats;
  519. }
  520. /* Send pid data out on exit */
  521. void taskstats_exit(struct task_struct *tsk, int group_dead)
  522. {
  523. int rc;
  524. struct listener_list *listeners;
  525. struct taskstats *stats;
  526. struct sk_buff *rep_skb;
  527. size_t size;
  528. int is_thread_group;
  529. if (!family_registered)
  530. return;
  531. /*
  532. * Size includes space for nested attributes
  533. */
  534. size = taskstats_packet_size();
  535. is_thread_group = !!taskstats_tgid_alloc(tsk);
  536. if (is_thread_group) {
  537. /* PID + STATS + TGID + STATS */
  538. size = 2 * size;
  539. /* fill the tsk->signal->stats structure */
  540. fill_tgid_exit(tsk);
  541. }
  542. listeners = __this_cpu_ptr(&listener_array);
  543. if (list_empty(&listeners->list))
  544. return;
  545. rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size);
  546. if (rc < 0)
  547. return;
  548. stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID,
  549. task_pid_nr_ns(tsk, &init_pid_ns));
  550. if (!stats)
  551. goto err;
  552. fill_stats(&init_user_ns, &init_pid_ns, tsk, stats);
  553. /*
  554. * Doesn't matter if tsk is the leader or the last group member leaving
  555. */
  556. if (!is_thread_group || !group_dead)
  557. goto send;
  558. stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID,
  559. task_tgid_nr_ns(tsk, &init_pid_ns));
  560. if (!stats)
  561. goto err;
  562. memcpy(stats, tsk->signal->stats, sizeof(*stats));
  563. send:
  564. send_cpu_listeners(rep_skb, listeners);
  565. return;
  566. err:
  567. nlmsg_free(rep_skb);
  568. }
  569. static struct genl_ops taskstats_ops = {
  570. .cmd = TASKSTATS_CMD_GET,
  571. .doit = taskstats_user_cmd,
  572. .policy = taskstats_cmd_get_policy,
  573. .flags = GENL_ADMIN_PERM,
  574. };
  575. static struct genl_ops cgroupstats_ops = {
  576. .cmd = CGROUPSTATS_CMD_GET,
  577. .doit = cgroupstats_user_cmd,
  578. .policy = cgroupstats_cmd_get_policy,
  579. };
  580. /* Needed early in initialization */
  581. void __init taskstats_init_early(void)
  582. {
  583. unsigned int i;
  584. taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC);
  585. for_each_possible_cpu(i) {
  586. INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
  587. init_rwsem(&(per_cpu(listener_array, i).sem));
  588. }
  589. }
  590. static int __init taskstats_init(void)
  591. {
  592. int rc;
  593. rc = genl_register_family(&family);
  594. if (rc)
  595. return rc;
  596. rc = genl_register_ops(&family, &taskstats_ops);
  597. if (rc < 0)
  598. goto err;
  599. rc = genl_register_ops(&family, &cgroupstats_ops);
  600. if (rc < 0)
  601. goto err_cgroup_ops;
  602. family_registered = 1;
  603. pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION);
  604. return 0;
  605. err_cgroup_ops:
  606. genl_unregister_ops(&family, &taskstats_ops);
  607. err:
  608. genl_unregister_family(&family);
  609. return rc;
  610. }
  611. /*
  612. * late initcall ensures initialization of statistics collection
  613. * mechanisms precedes initialization of the taskstats interface
  614. */
  615. late_initcall(taskstats_init);