process.c 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. #include <linux/mm.h>
  2. #include <linux/kernel.h>
  3. #include <linux/slab.h>
  4. #include <linux/sched.h>
  5. #include <linux/export.h>
  6. #include <linux/stackprotector.h>
  7. struct kmem_cache *task_xstate_cachep = NULL;
  8. unsigned int xstate_size;
  9. #ifdef CONFIG_CC_STACKPROTECTOR
  10. unsigned long __stack_chk_guard __read_mostly;
  11. EXPORT_SYMBOL(__stack_chk_guard);
  12. #endif
  13. int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  14. {
  15. *dst = *src;
  16. if (src->thread.xstate) {
  17. dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
  18. GFP_KERNEL);
  19. if (!dst->thread.xstate)
  20. return -ENOMEM;
  21. memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
  22. }
  23. return 0;
  24. }
  25. void free_thread_xstate(struct task_struct *tsk)
  26. {
  27. if (tsk->thread.xstate) {
  28. kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
  29. tsk->thread.xstate = NULL;
  30. }
  31. }
  32. #if THREAD_SHIFT < PAGE_SHIFT
  33. static struct kmem_cache *thread_info_cache;
  34. struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
  35. {
  36. struct thread_info *ti;
  37. #ifdef CONFIG_DEBUG_STACK_USAGE
  38. gfp_t mask = GFP_KERNEL | __GFP_ZERO;
  39. #else
  40. gfp_t mask = GFP_KERNEL;
  41. #endif
  42. ti = kmem_cache_alloc_node(thread_info_cache, mask, node);
  43. return ti;
  44. }
  45. void free_thread_info(struct thread_info *ti)
  46. {
  47. free_thread_xstate(ti->task);
  48. kmem_cache_free(thread_info_cache, ti);
  49. }
  50. void thread_info_cache_init(void)
  51. {
  52. thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
  53. THREAD_SIZE, SLAB_PANIC, NULL);
  54. }
  55. #else
  56. struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
  57. {
  58. #ifdef CONFIG_DEBUG_STACK_USAGE
  59. gfp_t mask = GFP_KERNEL | __GFP_ZERO;
  60. #else
  61. gfp_t mask = GFP_KERNEL;
  62. #endif
  63. struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
  64. return page ? page_address(page) : NULL;
  65. }
  66. void free_thread_info(struct thread_info *ti)
  67. {
  68. free_thread_xstate(ti->task);
  69. free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
  70. }
  71. #endif /* THREAD_SHIFT < PAGE_SHIFT */
  72. void arch_task_cache_init(void)
  73. {
  74. if (!xstate_size)
  75. return;
  76. task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
  77. __alignof__(union thread_xstate),
  78. SLAB_PANIC | SLAB_NOTRACK, NULL);
  79. }
  80. #ifdef CONFIG_SH_FPU_EMU
  81. # define HAVE_SOFTFP 1
  82. #else
  83. # define HAVE_SOFTFP 0
  84. #endif
  85. void __cpuinit init_thread_xstate(void)
  86. {
  87. if (boot_cpu_data.flags & CPU_HAS_FPU)
  88. xstate_size = sizeof(struct sh_fpu_hard_struct);
  89. else if (HAVE_SOFTFP)
  90. xstate_size = sizeof(struct sh_fpu_soft_struct);
  91. else
  92. xstate_size = 0;
  93. }