process.c 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. #include <linux/mm.h>
  2. #include <linux/kernel.h>
  3. #include <linux/slab.h>
  4. #include <linux/sched.h>
  5. struct kmem_cache *task_xstate_cachep = NULL;
  6. unsigned int xstate_size;
  7. int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  8. {
  9. *dst = *src;
  10. if (src->thread.xstate) {
  11. dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
  12. GFP_KERNEL);
  13. if (!dst->thread.xstate)
  14. return -ENOMEM;
  15. memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
  16. }
  17. return 0;
  18. }
  19. void free_thread_xstate(struct task_struct *tsk)
  20. {
  21. if (tsk->thread.xstate) {
  22. kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
  23. tsk->thread.xstate = NULL;
  24. }
  25. }
  26. #if THREAD_SHIFT < PAGE_SHIFT
  27. static struct kmem_cache *thread_info_cache;
  28. struct thread_info *alloc_thread_info(struct task_struct *tsk)
  29. {
  30. struct thread_info *ti;
  31. ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL);
  32. if (unlikely(ti == NULL))
  33. return NULL;
  34. #ifdef CONFIG_DEBUG_STACK_USAGE
  35. memset(ti, 0, THREAD_SIZE);
  36. #endif
  37. return ti;
  38. }
  39. void free_thread_info(struct thread_info *ti)
  40. {
  41. free_thread_xstate(ti->task);
  42. kmem_cache_free(thread_info_cache, ti);
  43. }
  44. void thread_info_cache_init(void)
  45. {
  46. thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
  47. THREAD_SIZE, SLAB_PANIC, NULL);
  48. }
  49. #else
  50. struct thread_info *alloc_thread_info(struct task_struct *tsk)
  51. {
  52. #ifdef CONFIG_DEBUG_STACK_USAGE
  53. gfp_t mask = GFP_KERNEL | __GFP_ZERO;
  54. #else
  55. gfp_t mask = GFP_KERNEL;
  56. #endif
  57. return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER);
  58. }
  59. void free_thread_info(struct thread_info *ti)
  60. {
  61. free_thread_xstate(ti->task);
  62. free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
  63. }
  64. #endif /* THREAD_SHIFT < PAGE_SHIFT */
  65. void arch_task_cache_init(void)
  66. {
  67. if (!xstate_size)
  68. return;
  69. task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
  70. __alignof__(union thread_xstate),
  71. SLAB_PANIC | SLAB_NOTRACK, NULL);
  72. }
  73. #ifdef CONFIG_SH_FPU_EMU
  74. # define HAVE_SOFTFP 1
  75. #else
  76. # define HAVE_SOFTFP 0
  77. #endif
  78. void __cpuinit init_thread_xstate(void)
  79. {
  80. if (boot_cpu_data.flags & CPU_HAS_FPU)
  81. xstate_size = sizeof(struct sh_fpu_hard_struct);
  82. else if (HAVE_SOFTFP)
  83. xstate_size = sizeof(struct sh_fpu_soft_struct);
  84. else
  85. xstate_size = 0;
  86. }