profile.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616
  1. /*
  2. * linux/kernel/profile.c
  3. * Simple profiling. Manages a direct-mapped profile hit count buffer,
  4. * with configurable resolution, support for restricting the cpus on
  5. * which profiling is done, and switching between cpu time and
  6. * schedule() calls via kernel command line parameters passed at boot.
  7. *
  8. * Scheduler profiling support, Arjan van de Ven and Ingo Molnar,
  9. * Red Hat, July 2004
  10. * Consolidation of architecture support code for profiling,
  11. * William Irwin, Oracle, July 2004
  12. * Amortized hit count accounting via per-cpu open-addressed hashtables
  13. * to resolve timer interrupt livelocks, William Irwin, Oracle, 2004
  14. */
  15. #include <linux/module.h>
  16. #include <linux/profile.h>
  17. #include <linux/bootmem.h>
  18. #include <linux/notifier.h>
  19. #include <linux/mm.h>
  20. #include <linux/cpumask.h>
  21. #include <linux/cpu.h>
  22. #include <linux/highmem.h>
  23. #include <linux/mutex.h>
  24. #include <linux/slab.h>
  25. #include <linux/vmalloc.h>
  26. #include <asm/sections.h>
  27. #include <asm/irq_regs.h>
  28. #include <asm/ptrace.h>
  29. struct profile_hit {
  30. u32 pc, hits;
  31. };
  32. #define PROFILE_GRPSHIFT 3
  33. #define PROFILE_GRPSZ (1 << PROFILE_GRPSHIFT)
  34. #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
  35. #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
  36. /* Oprofile timer tick hook */
  37. static int (*timer_hook)(struct pt_regs *) __read_mostly;
  38. static atomic_t *prof_buffer;
  39. static unsigned long prof_len, prof_shift;
  40. int prof_on __read_mostly;
  41. EXPORT_SYMBOL_GPL(prof_on);
  42. static cpumask_t prof_cpu_mask = CPU_MASK_ALL;
  43. #ifdef CONFIG_SMP
  44. static DEFINE_PER_CPU(struct profile_hit *[2], cpu_profile_hits);
  45. static DEFINE_PER_CPU(int, cpu_profile_flip);
  46. static DEFINE_MUTEX(profile_flip_mutex);
  47. #endif /* CONFIG_SMP */
  48. int profile_setup(char *str)
  49. {
  50. static char schedstr[] = "schedule";
  51. static char sleepstr[] = "sleep";
  52. static char kvmstr[] = "kvm";
  53. int par;
  54. if (!strncmp(str, sleepstr, strlen(sleepstr))) {
  55. #ifdef CONFIG_SCHEDSTATS
  56. prof_on = SLEEP_PROFILING;
  57. if (str[strlen(sleepstr)] == ',')
  58. str += strlen(sleepstr) + 1;
  59. if (get_option(&str, &par))
  60. prof_shift = par;
  61. printk(KERN_INFO
  62. "kernel sleep profiling enabled (shift: %ld)\n",
  63. prof_shift);
  64. #else
  65. printk(KERN_WARNING
  66. "kernel sleep profiling requires CONFIG_SCHEDSTATS\n");
  67. #endif /* CONFIG_SCHEDSTATS */
  68. } else if (!strncmp(str, schedstr, strlen(schedstr))) {
  69. prof_on = SCHED_PROFILING;
  70. if (str[strlen(schedstr)] == ',')
  71. str += strlen(schedstr) + 1;
  72. if (get_option(&str, &par))
  73. prof_shift = par;
  74. printk(KERN_INFO
  75. "kernel schedule profiling enabled (shift: %ld)\n",
  76. prof_shift);
  77. } else if (!strncmp(str, kvmstr, strlen(kvmstr))) {
  78. prof_on = KVM_PROFILING;
  79. if (str[strlen(kvmstr)] == ',')
  80. str += strlen(kvmstr) + 1;
  81. if (get_option(&str, &par))
  82. prof_shift = par;
  83. printk(KERN_INFO
  84. "kernel KVM profiling enabled (shift: %ld)\n",
  85. prof_shift);
  86. } else if (get_option(&str, &par)) {
  87. prof_shift = par;
  88. prof_on = CPU_PROFILING;
  89. printk(KERN_INFO "kernel profiling enabled (shift: %ld)\n",
  90. prof_shift);
  91. }
  92. return 1;
  93. }
  94. __setup("profile=", profile_setup);
  95. int __ref profile_init(void)
  96. {
  97. int buffer_bytes;
  98. if (!prof_on)
  99. return 0;
  100. /* only text is profiled */
  101. prof_len = (_etext - _stext) >> prof_shift;
  102. buffer_bytes = prof_len*sizeof(atomic_t);
  103. if (!slab_is_available()) {
  104. prof_buffer = alloc_bootmem(buffer_bytes);
  105. return 0;
  106. }
  107. prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL);
  108. if (prof_buffer)
  109. return 0;
  110. prof_buffer = alloc_pages_exact(buffer_bytes, GFP_KERNEL|__GFP_ZERO);
  111. if (prof_buffer)
  112. return 0;
  113. prof_buffer = vmalloc(buffer_bytes);
  114. if (prof_buffer)
  115. return 0;
  116. return -ENOMEM;
  117. }
  118. /* Profile event notifications */
  119. static BLOCKING_NOTIFIER_HEAD(task_exit_notifier);
  120. static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
  121. static BLOCKING_NOTIFIER_HEAD(munmap_notifier);
  122. void profile_task_exit(struct task_struct *task)
  123. {
  124. blocking_notifier_call_chain(&task_exit_notifier, 0, task);
  125. }
  126. int profile_handoff_task(struct task_struct *task)
  127. {
  128. int ret;
  129. ret = atomic_notifier_call_chain(&task_free_notifier, 0, task);
  130. return (ret == NOTIFY_OK) ? 1 : 0;
  131. }
  132. void profile_munmap(unsigned long addr)
  133. {
  134. blocking_notifier_call_chain(&munmap_notifier, 0, (void *)addr);
  135. }
  136. int task_handoff_register(struct notifier_block *n)
  137. {
  138. return atomic_notifier_chain_register(&task_free_notifier, n);
  139. }
  140. EXPORT_SYMBOL_GPL(task_handoff_register);
  141. int task_handoff_unregister(struct notifier_block *n)
  142. {
  143. return atomic_notifier_chain_unregister(&task_free_notifier, n);
  144. }
  145. EXPORT_SYMBOL_GPL(task_handoff_unregister);
  146. int profile_event_register(enum profile_type type, struct notifier_block *n)
  147. {
  148. int err = -EINVAL;
  149. switch (type) {
  150. case PROFILE_TASK_EXIT:
  151. err = blocking_notifier_chain_register(
  152. &task_exit_notifier, n);
  153. break;
  154. case PROFILE_MUNMAP:
  155. err = blocking_notifier_chain_register(
  156. &munmap_notifier, n);
  157. break;
  158. }
  159. return err;
  160. }
  161. EXPORT_SYMBOL_GPL(profile_event_register);
  162. int profile_event_unregister(enum profile_type type, struct notifier_block *n)
  163. {
  164. int err = -EINVAL;
  165. switch (type) {
  166. case PROFILE_TASK_EXIT:
  167. err = blocking_notifier_chain_unregister(
  168. &task_exit_notifier, n);
  169. break;
  170. case PROFILE_MUNMAP:
  171. err = blocking_notifier_chain_unregister(
  172. &munmap_notifier, n);
  173. break;
  174. }
  175. return err;
  176. }
  177. EXPORT_SYMBOL_GPL(profile_event_unregister);
  178. int register_timer_hook(int (*hook)(struct pt_regs *))
  179. {
  180. if (timer_hook)
  181. return -EBUSY;
  182. timer_hook = hook;
  183. return 0;
  184. }
  185. EXPORT_SYMBOL_GPL(register_timer_hook);
  186. void unregister_timer_hook(int (*hook)(struct pt_regs *))
  187. {
  188. WARN_ON(hook != timer_hook);
  189. timer_hook = NULL;
  190. /* make sure all CPUs see the NULL hook */
  191. synchronize_sched(); /* Allow ongoing interrupts to complete. */
  192. }
  193. EXPORT_SYMBOL_GPL(unregister_timer_hook);
  194. #ifdef CONFIG_SMP
  195. /*
  196. * Each cpu has a pair of open-addressed hashtables for pending
  197. * profile hits. read_profile() IPI's all cpus to request them
  198. * to flip buffers and flushes their contents to prof_buffer itself.
  199. * Flip requests are serialized by the profile_flip_mutex. The sole
  200. * use of having a second hashtable is for avoiding cacheline
  201. * contention that would otherwise happen during flushes of pending
  202. * profile hits required for the accuracy of reported profile hits
  203. * and so resurrect the interrupt livelock issue.
  204. *
  205. * The open-addressed hashtables are indexed by profile buffer slot
  206. * and hold the number of pending hits to that profile buffer slot on
  207. * a cpu in an entry. When the hashtable overflows, all pending hits
  208. * are accounted to their corresponding profile buffer slots with
  209. * atomic_add() and the hashtable emptied. As numerous pending hits
  210. * may be accounted to a profile buffer slot in a hashtable entry,
  211. * this amortizes a number of atomic profile buffer increments likely
  212. * to be far larger than the number of entries in the hashtable,
  213. * particularly given that the number of distinct profile buffer
  214. * positions to which hits are accounted during short intervals (e.g.
  215. * several seconds) is usually very small. Exclusion from buffer
  216. * flipping is provided by interrupt disablement (note that for
  217. * SCHED_PROFILING or SLEEP_PROFILING profile_hit() may be called from
  218. * process context).
  219. * The hash function is meant to be lightweight as opposed to strong,
  220. * and was vaguely inspired by ppc64 firmware-supported inverted
  221. * pagetable hash functions, but uses a full hashtable full of finite
  222. * collision chains, not just pairs of them.
  223. *
  224. * -- wli
  225. */
  226. static void __profile_flip_buffers(void *unused)
  227. {
  228. int cpu = smp_processor_id();
  229. per_cpu(cpu_profile_flip, cpu) = !per_cpu(cpu_profile_flip, cpu);
  230. }
  231. static void profile_flip_buffers(void)
  232. {
  233. int i, j, cpu;
  234. mutex_lock(&profile_flip_mutex);
  235. j = per_cpu(cpu_profile_flip, get_cpu());
  236. put_cpu();
  237. on_each_cpu(__profile_flip_buffers, NULL, 1);
  238. for_each_online_cpu(cpu) {
  239. struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[j];
  240. for (i = 0; i < NR_PROFILE_HIT; ++i) {
  241. if (!hits[i].hits) {
  242. if (hits[i].pc)
  243. hits[i].pc = 0;
  244. continue;
  245. }
  246. atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
  247. hits[i].hits = hits[i].pc = 0;
  248. }
  249. }
  250. mutex_unlock(&profile_flip_mutex);
  251. }
  252. static void profile_discard_flip_buffers(void)
  253. {
  254. int i, cpu;
  255. mutex_lock(&profile_flip_mutex);
  256. i = per_cpu(cpu_profile_flip, get_cpu());
  257. put_cpu();
  258. on_each_cpu(__profile_flip_buffers, NULL, 1);
  259. for_each_online_cpu(cpu) {
  260. struct profile_hit *hits = per_cpu(cpu_profile_hits, cpu)[i];
  261. memset(hits, 0, NR_PROFILE_HIT*sizeof(struct profile_hit));
  262. }
  263. mutex_unlock(&profile_flip_mutex);
  264. }
  265. void profile_hits(int type, void *__pc, unsigned int nr_hits)
  266. {
  267. unsigned long primary, secondary, flags, pc = (unsigned long)__pc;
  268. int i, j, cpu;
  269. struct profile_hit *hits;
  270. if (prof_on != type || !prof_buffer)
  271. return;
  272. pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1);
  273. i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
  274. secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT;
  275. cpu = get_cpu();
  276. hits = per_cpu(cpu_profile_hits, cpu)[per_cpu(cpu_profile_flip, cpu)];
  277. if (!hits) {
  278. put_cpu();
  279. return;
  280. }
  281. /*
  282. * We buffer the global profiler buffer into a per-CPU
  283. * queue and thus reduce the number of global (and possibly
  284. * NUMA-alien) accesses. The write-queue is self-coalescing:
  285. */
  286. local_irq_save(flags);
  287. do {
  288. for (j = 0; j < PROFILE_GRPSZ; ++j) {
  289. if (hits[i + j].pc == pc) {
  290. hits[i + j].hits += nr_hits;
  291. goto out;
  292. } else if (!hits[i + j].hits) {
  293. hits[i + j].pc = pc;
  294. hits[i + j].hits = nr_hits;
  295. goto out;
  296. }
  297. }
  298. i = (i + secondary) & (NR_PROFILE_HIT - 1);
  299. } while (i != primary);
  300. /*
  301. * Add the current hit(s) and flush the write-queue out
  302. * to the global buffer:
  303. */
  304. atomic_add(nr_hits, &prof_buffer[pc]);
  305. for (i = 0; i < NR_PROFILE_HIT; ++i) {
  306. atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
  307. hits[i].pc = hits[i].hits = 0;
  308. }
  309. out:
  310. local_irq_restore(flags);
  311. put_cpu();
  312. }
  313. static int __devinit profile_cpu_callback(struct notifier_block *info,
  314. unsigned long action, void *__cpu)
  315. {
  316. int node, cpu = (unsigned long)__cpu;
  317. struct page *page;
  318. switch (action) {
  319. case CPU_UP_PREPARE:
  320. case CPU_UP_PREPARE_FROZEN:
  321. node = cpu_to_node(cpu);
  322. per_cpu(cpu_profile_flip, cpu) = 0;
  323. if (!per_cpu(cpu_profile_hits, cpu)[1]) {
  324. page = alloc_pages_node(node,
  325. GFP_KERNEL | __GFP_ZERO,
  326. 0);
  327. if (!page)
  328. return NOTIFY_BAD;
  329. per_cpu(cpu_profile_hits, cpu)[1] = page_address(page);
  330. }
  331. if (!per_cpu(cpu_profile_hits, cpu)[0]) {
  332. page = alloc_pages_node(node,
  333. GFP_KERNEL | __GFP_ZERO,
  334. 0);
  335. if (!page)
  336. goto out_free;
  337. per_cpu(cpu_profile_hits, cpu)[0] = page_address(page);
  338. }
  339. break;
  340. out_free:
  341. page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
  342. per_cpu(cpu_profile_hits, cpu)[1] = NULL;
  343. __free_page(page);
  344. return NOTIFY_BAD;
  345. case CPU_ONLINE:
  346. case CPU_ONLINE_FROZEN:
  347. cpu_set(cpu, prof_cpu_mask);
  348. break;
  349. case CPU_UP_CANCELED:
  350. case CPU_UP_CANCELED_FROZEN:
  351. case CPU_DEAD:
  352. case CPU_DEAD_FROZEN:
  353. cpu_clear(cpu, prof_cpu_mask);
  354. if (per_cpu(cpu_profile_hits, cpu)[0]) {
  355. page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
  356. per_cpu(cpu_profile_hits, cpu)[0] = NULL;
  357. __free_page(page);
  358. }
  359. if (per_cpu(cpu_profile_hits, cpu)[1]) {
  360. page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
  361. per_cpu(cpu_profile_hits, cpu)[1] = NULL;
  362. __free_page(page);
  363. }
  364. break;
  365. }
  366. return NOTIFY_OK;
  367. }
  368. #else /* !CONFIG_SMP */
  369. #define profile_flip_buffers() do { } while (0)
  370. #define profile_discard_flip_buffers() do { } while (0)
  371. #define profile_cpu_callback NULL
  372. void profile_hits(int type, void *__pc, unsigned int nr_hits)
  373. {
  374. unsigned long pc;
  375. if (prof_on != type || !prof_buffer)
  376. return;
  377. pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
  378. atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
  379. }
  380. #endif /* !CONFIG_SMP */
  381. EXPORT_SYMBOL_GPL(profile_hits);
  382. void profile_tick(int type)
  383. {
  384. struct pt_regs *regs = get_irq_regs();
  385. if (type == CPU_PROFILING && timer_hook)
  386. timer_hook(regs);
  387. if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask))
  388. profile_hit(type, (void *)profile_pc(regs));
  389. }
  390. #ifdef CONFIG_PROC_FS
  391. #include <linux/proc_fs.h>
  392. #include <asm/uaccess.h>
  393. #include <asm/ptrace.h>
  394. static int prof_cpu_mask_read_proc(char *page, char **start, off_t off,
  395. int count, int *eof, void *data)
  396. {
  397. int len = cpumask_scnprintf(page, count, *(cpumask_t *)data);
  398. if (count - len < 2)
  399. return -EINVAL;
  400. len += sprintf(page + len, "\n");
  401. return len;
  402. }
  403. static int prof_cpu_mask_write_proc(struct file *file,
  404. const char __user *buffer, unsigned long count, void *data)
  405. {
  406. cpumask_t *mask = (cpumask_t *)data;
  407. unsigned long full_count = count, err;
  408. cpumask_t new_value;
  409. err = cpumask_parse_user(buffer, count, new_value);
  410. if (err)
  411. return err;
  412. *mask = new_value;
  413. return full_count;
  414. }
  415. void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
  416. {
  417. struct proc_dir_entry *entry;
  418. /* create /proc/irq/prof_cpu_mask */
  419. entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir);
  420. if (!entry)
  421. return;
  422. entry->data = (void *)&prof_cpu_mask;
  423. entry->read_proc = prof_cpu_mask_read_proc;
  424. entry->write_proc = prof_cpu_mask_write_proc;
  425. }
  426. /*
  427. * This function accesses profiling information. The returned data is
  428. * binary: the sampling step and the actual contents of the profile
  429. * buffer. Use of the program readprofile is recommended in order to
  430. * get meaningful info out of these data.
  431. */
  432. static ssize_t
  433. read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  434. {
  435. unsigned long p = *ppos;
  436. ssize_t read;
  437. char *pnt;
  438. unsigned int sample_step = 1 << prof_shift;
  439. profile_flip_buffers();
  440. if (p >= (prof_len+1)*sizeof(unsigned int))
  441. return 0;
  442. if (count > (prof_len+1)*sizeof(unsigned int) - p)
  443. count = (prof_len+1)*sizeof(unsigned int) - p;
  444. read = 0;
  445. while (p < sizeof(unsigned int) && count > 0) {
  446. if (put_user(*((char *)(&sample_step)+p), buf))
  447. return -EFAULT;
  448. buf++; p++; count--; read++;
  449. }
  450. pnt = (char *)prof_buffer + p - sizeof(atomic_t);
  451. if (copy_to_user(buf, (void *)pnt, count))
  452. return -EFAULT;
  453. read += count;
  454. *ppos += read;
  455. return read;
  456. }
  457. /*
  458. * Writing to /proc/profile resets the counters
  459. *
  460. * Writing a 'profiling multiplier' value into it also re-sets the profiling
  461. * interrupt frequency, on architectures that support this.
  462. */
  463. static ssize_t write_profile(struct file *file, const char __user *buf,
  464. size_t count, loff_t *ppos)
  465. {
  466. #ifdef CONFIG_SMP
  467. extern int setup_profiling_timer(unsigned int multiplier);
  468. if (count == sizeof(int)) {
  469. unsigned int multiplier;
  470. if (copy_from_user(&multiplier, buf, sizeof(int)))
  471. return -EFAULT;
  472. if (setup_profiling_timer(multiplier))
  473. return -EINVAL;
  474. }
  475. #endif
  476. profile_discard_flip_buffers();
  477. memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
  478. return count;
  479. }
  480. static const struct file_operations proc_profile_operations = {
  481. .read = read_profile,
  482. .write = write_profile,
  483. };
  484. #ifdef CONFIG_SMP
  485. static inline void profile_nop(void *unused)
  486. {
  487. }
  488. static int create_hash_tables(void)
  489. {
  490. int cpu;
  491. for_each_online_cpu(cpu) {
  492. int node = cpu_to_node(cpu);
  493. struct page *page;
  494. page = alloc_pages_node(node,
  495. GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
  496. 0);
  497. if (!page)
  498. goto out_cleanup;
  499. per_cpu(cpu_profile_hits, cpu)[1]
  500. = (struct profile_hit *)page_address(page);
  501. page = alloc_pages_node(node,
  502. GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
  503. 0);
  504. if (!page)
  505. goto out_cleanup;
  506. per_cpu(cpu_profile_hits, cpu)[0]
  507. = (struct profile_hit *)page_address(page);
  508. }
  509. return 0;
  510. out_cleanup:
  511. prof_on = 0;
  512. smp_mb();
  513. on_each_cpu(profile_nop, NULL, 1);
  514. for_each_online_cpu(cpu) {
  515. struct page *page;
  516. if (per_cpu(cpu_profile_hits, cpu)[0]) {
  517. page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]);
  518. per_cpu(cpu_profile_hits, cpu)[0] = NULL;
  519. __free_page(page);
  520. }
  521. if (per_cpu(cpu_profile_hits, cpu)[1]) {
  522. page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]);
  523. per_cpu(cpu_profile_hits, cpu)[1] = NULL;
  524. __free_page(page);
  525. }
  526. }
  527. return -1;
  528. }
  529. #else
  530. #define create_hash_tables() ({ 0; })
  531. #endif
  532. int create_proc_profile(void)
  533. {
  534. struct proc_dir_entry *entry;
  535. if (!prof_on)
  536. return 0;
  537. if (create_hash_tables())
  538. return -ENOMEM;
  539. entry = proc_create("profile", S_IWUSR | S_IRUGO,
  540. NULL, &proc_profile_operations);
  541. if (!entry)
  542. return 0;
  543. entry->size = (1+prof_len) * sizeof(atomic_t);
  544. hotcpu_notifier(profile_cpu_callback, 0);
  545. return 0;
  546. }
  547. module_init(create_proc_profile);
  548. #endif /* CONFIG_PROC_FS */