process.c 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. #include <linux/mm.h>
  2. #include <linux/kernel.h>
  3. #include <linux/sched.h>
  4. struct kmem_cache *task_xstate_cachep = NULL;
  5. unsigned int xstate_size;
  6. int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  7. {
  8. *dst = *src;
  9. if (src->thread.xstate) {
  10. dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
  11. GFP_KERNEL);
  12. if (!dst->thread.xstate)
  13. return -ENOMEM;
  14. memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
  15. }
  16. return 0;
  17. }
  18. void free_thread_xstate(struct task_struct *tsk)
  19. {
  20. if (tsk->thread.xstate) {
  21. kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
  22. tsk->thread.xstate = NULL;
  23. }
  24. }
  25. #if THREAD_SHIFT < PAGE_SHIFT
  26. static struct kmem_cache *thread_info_cache;
  27. struct thread_info *alloc_thread_info(struct task_struct *tsk)
  28. {
  29. struct thread_info *ti;
  30. ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
  31. if (unlikely(ti == NULL))
  32. return NULL;
  33. #ifdef CONFIG_DEBUG_STACK_USAGE
  34. memset(ti, 0, THREAD_SIZE);
  35. #endif
  36. return ti;
  37. }
  38. void free_thread_info(struct thread_info *ti)
  39. {
  40. free_thread_xstate(ti->task);
  41. kmem_cache_free(thread_info_cache, ti);
  42. }
  43. void thread_info_cache_init(void)
  44. {
  45. thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
  46. THREAD_SIZE, SLAB_PANIC, NULL);
  47. }
  48. #else
  49. struct thread_info *alloc_thread_info(struct task_struct *tsk)
  50. {
  51. #ifdef CONFIG_DEBUG_STACK_USAGE
  52. gfp_t mask = GFP_KERNEL | __GFP_ZERO;
  53. #else
  54. gfp_t mask = GFP_KERNEL;
  55. #endif
  56. return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
  57. }
  58. void free_thread_info(struct thread_info *ti)
  59. {
  60. free_thread_xstate(ti->task);
  61. free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
  62. }
  63. #endif /* THREAD_SHIFT < PAGE_SHIFT */
  64. void arch_task_cache_init(void)
  65. {
  66. if (!xstate_size)
  67. return;
  68. task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
  69. __alignof__(union thread_xstate),
  70. SLAB_PANIC | SLAB_NOTRACK, NULL);
  71. }
  72. #ifdef CONFIG_SH_FPU_EMU
  73. # define HAVE_SOFTFP 1
  74. #else
  75. # define HAVE_SOFTFP 0
  76. #endif
  77. void init_thread_xstate(void)
  78. {
  79. if (boot_cpu_data.flags & CPU_HAS_FPU)
  80. xstate_size = sizeof(struct sh_fpu_hard_struct);
  81. else if (HAVE_SOFTFP)
  82. xstate_size = sizeof(struct sh_fpu_soft_struct);
  83. else
  84. xstate_size = 0;
  85. }