process.c 2.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104
  1. #include <linux/mm.h>
  2. #include <linux/kernel.h>
  3. #include <linux/slab.h>
  4. #include <linux/sched.h>
  5. struct kmem_cache *task_xstate_cachep = NULL;
  6. unsigned int xstate_size;
  7. int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  8. {
  9. *dst = *src;
  10. if (src->thread.xstate) {
  11. dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep,
  12. GFP_KERNEL);
  13. if (!dst->thread.xstate)
  14. return -ENOMEM;
  15. memcpy(dst->thread.xstate, src->thread.xstate, xstate_size);
  16. }
  17. return 0;
  18. }
  19. void free_thread_xstate(struct task_struct *tsk)
  20. {
  21. if (tsk->thread.xstate) {
  22. kmem_cache_free(task_xstate_cachep, tsk->thread.xstate);
  23. tsk->thread.xstate = NULL;
  24. }
  25. }
  26. #if THREAD_SHIFT < PAGE_SHIFT
  27. static struct kmem_cache *thread_info_cache;
  28. struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
  29. {
  30. struct thread_info *ti;
  31. #ifdef CONFIG_DEBUG_STACK_USAGE
  32. gfp_t mask = GFP_KERNEL | __GFP_ZERO;
  33. #else
  34. gfp_t mask = GFP_KERNEL;
  35. #endif
  36. ti = kmem_cache_alloc_node(thread_info_cache, mask, node);
  37. return ti;
  38. }
  39. void free_thread_info(struct thread_info *ti)
  40. {
  41. free_thread_xstate(ti->task);
  42. kmem_cache_free(thread_info_cache, ti);
  43. }
  44. void thread_info_cache_init(void)
  45. {
  46. thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
  47. THREAD_SIZE, SLAB_PANIC, NULL);
  48. }
  49. #else
  50. struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
  51. {
  52. #ifdef CONFIG_DEBUG_STACK_USAGE
  53. gfp_t mask = GFP_KERNEL | __GFP_ZERO;
  54. #else
  55. gfp_t mask = GFP_KERNEL;
  56. #endif
  57. struct page *page = alloc_pages_node(node, mask, THREAD_SIZE_ORDER);
  58. return page ? page_address(page) : NULL;
  59. }
  60. void free_thread_info(struct thread_info *ti)
  61. {
  62. free_thread_xstate(ti->task);
  63. free_pages((unsigned long)ti, THREAD_SIZE_ORDER);
  64. }
  65. #endif /* THREAD_SHIFT < PAGE_SHIFT */
  66. void arch_task_cache_init(void)
  67. {
  68. if (!xstate_size)
  69. return;
  70. task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size,
  71. __alignof__(union thread_xstate),
  72. SLAB_PANIC | SLAB_NOTRACK, NULL);
  73. }
  74. #ifdef CONFIG_SH_FPU_EMU
  75. # define HAVE_SOFTFP 1
  76. #else
  77. # define HAVE_SOFTFP 0
  78. #endif
  79. void __cpuinit init_thread_xstate(void)
  80. {
  81. if (boot_cpu_data.flags & CPU_HAS_FPU)
  82. xstate_size = sizeof(struct sh_fpu_hard_struct);
  83. else if (HAVE_SOFTFP)
  84. xstate_size = sizeof(struct sh_fpu_soft_struct);
  85. else
  86. xstate_size = 0;
  87. }