paca.h 7.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251
  1. /*
  2. * This control block defines the PACA which defines the processor
  3. * specific data for each logical processor on the system.
  4. * There are some pointers defined that are utilized by PLIC.
  5. *
  6. * C 2001 PPC 64 Team, IBM Corp
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. */
  13. #ifndef _ASM_POWERPC_PACA_H
  14. #define _ASM_POWERPC_PACA_H
  15. #ifdef __KERNEL__
  16. #ifdef CONFIG_PPC64
  17. #include <linux/string.h>
  18. #include <asm/types.h>
  19. #include <asm/lppaca.h>
  20. #include <asm/mmu.h>
  21. #include <asm/page.h>
  22. #include <asm/exception-64e.h>
  23. #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
  24. #include <asm/kvm_book3s_asm.h>
  25. #endif
  26. #include <asm/accounting.h>
  27. #include <asm/hmi.h>
  28. register struct paca_struct *local_paca asm("r13");
  29. #if defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_SMP)
  30. extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
  31. /*
  32. * Add standard checks that preemption cannot occur when using get_paca():
  33. * otherwise the paca_struct it points to may be the wrong one just after.
  34. */
  35. #define get_paca() ((void) debug_smp_processor_id(), local_paca)
  36. #else
  37. #define get_paca() local_paca
  38. #endif
  39. #define get_lppaca() (get_paca()->lppaca_ptr)
  40. #define get_slb_shadow() (get_paca()->slb_shadow_ptr)
  41. struct task_struct;
  42. /*
  43. * Defines the layout of the paca.
  44. *
  45. * This structure is not directly accessed by firmware or the service
  46. * processor.
  47. */
  48. struct paca_struct {
  49. #ifdef CONFIG_PPC_BOOK3S
  50. /*
  51. * Because hw_cpu_id, unlike other paca fields, is accessed
  52. * routinely from other CPUs (from the IRQ code), we stick to
  53. * read-only (after boot) fields in the first cacheline to
  54. * avoid cacheline bouncing.
  55. */
  56. struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */
  57. #endif /* CONFIG_PPC_BOOK3S */
  58. /*
  59. * MAGIC: the spinlock functions in arch/powerpc/lib/locks.c
  60. * load lock_token and paca_index with a single lwz
  61. * instruction. They must travel together and be properly
  62. * aligned.
  63. */
  64. #ifdef __BIG_ENDIAN__
  65. u16 lock_token; /* Constant 0x8000, used in locks */
  66. u16 paca_index; /* Logical processor number */
  67. #else
  68. u16 paca_index; /* Logical processor number */
  69. u16 lock_token; /* Constant 0x8000, used in locks */
  70. #endif
  71. u64 kernel_toc; /* Kernel TOC address */
  72. u64 kernelbase; /* Base address of kernel */
  73. u64 kernel_msr; /* MSR while running in kernel */
  74. void *emergency_sp; /* pointer to emergency stack */
  75. u64 data_offset; /* per cpu data offset */
  76. s16 hw_cpu_id; /* Physical processor number */
  77. u8 cpu_start; /* At startup, processor spins until */
  78. /* this becomes non-zero. */
  79. u8 kexec_state; /* set when kexec down has irqs off */
  80. #ifdef CONFIG_PPC_STD_MMU_64
  81. struct slb_shadow *slb_shadow_ptr;
  82. struct dtl_entry *dispatch_log;
  83. struct dtl_entry *dispatch_log_end;
  84. #endif /* CONFIG_PPC_STD_MMU_64 */
  85. u64 dscr_default; /* per-CPU default DSCR */
  86. #ifdef CONFIG_PPC_STD_MMU_64
  87. /*
  88. * Now, starting in cacheline 2, the exception save areas
  89. */
  90. /* used for most interrupts/exceptions */
  91. u64 exgen[13] __attribute__((aligned(0x80)));
  92. u64 exmc[13]; /* used for machine checks */
  93. u64 exslb[13]; /* used for SLB/segment table misses
  94. * on the linear mapping */
  95. /* SLB related definitions */
  96. u16 vmalloc_sllp;
  97. u16 slb_cache_ptr;
  98. u32 slb_cache[SLB_CACHE_ENTRIES];
  99. #endif /* CONFIG_PPC_STD_MMU_64 */
  100. #ifdef CONFIG_PPC_BOOK3E
  101. u64 exgen[8] __aligned(0x40);
  102. /* Keep pgd in the same cacheline as the start of extlb */
  103. pgd_t *pgd __aligned(0x40); /* Current PGD */
  104. pgd_t *kernel_pgd; /* Kernel PGD */
  105. /* Shared by all threads of a core -- points to tcd of first thread */
  106. struct tlb_core_data *tcd_ptr;
  107. /*
  108. * We can have up to 3 levels of reentrancy in the TLB miss handler,
  109. * in each of four exception levels (normal, crit, mcheck, debug).
  110. */
  111. u64 extlb[12][EX_TLB_SIZE / sizeof(u64)];
  112. u64 exmc[8]; /* used for machine checks */
  113. u64 excrit[8]; /* used for crit interrupts */
  114. u64 exdbg[8]; /* used for debug interrupts */
  115. /* Kernel stack pointers for use by special exceptions */
  116. void *mc_kstack;
  117. void *crit_kstack;
  118. void *dbg_kstack;
  119. struct tlb_core_data tcd;
  120. #endif /* CONFIG_PPC_BOOK3E */
  121. #ifdef CONFIG_PPC_BOOK3S
  122. mm_context_id_t mm_ctx_id;
  123. #ifdef CONFIG_PPC_MM_SLICES
  124. u64 mm_ctx_low_slices_psize;
  125. unsigned char mm_ctx_high_slices_psize[SLICE_ARRAY_SIZE];
  126. #else
  127. u16 mm_ctx_user_psize;
  128. u16 mm_ctx_sllp;
  129. #endif
  130. #endif
  131. /*
  132. * then miscellaneous read-write fields
  133. */
  134. struct task_struct *__current; /* Pointer to current */
  135. u64 kstack; /* Saved Kernel stack addr */
  136. u64 stab_rr; /* stab/slb round-robin counter */
  137. u64 saved_r1; /* r1 save for RTAS calls or PM */
  138. u64 saved_msr; /* MSR saved here by enter_rtas */
  139. u16 trap_save; /* Used when bad stack is encountered */
  140. u8 soft_enabled; /* irq soft-enable flag */
  141. u8 irq_happened; /* irq happened while soft-disabled */
  142. u8 io_sync; /* writel() needs spin_unlock sync */
  143. u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
  144. u8 nap_state_lost; /* NV GPR values lost in power7_idle */
  145. u64 sprg_vdso; /* Saved user-visible sprg */
  146. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  147. u64 tm_scratch; /* TM scratch area for reclaim */
  148. #endif
  149. #ifdef CONFIG_PPC_POWERNV
  150. /* Per-core mask tracking idle threads and a lock bit-[L][TTTTTTTT] */
  151. u32 *core_idle_state_ptr;
  152. u8 thread_idle_state; /* PNV_THREAD_RUNNING/NAP/SLEEP */
  153. /* Mask to indicate thread id in core */
  154. u8 thread_mask;
  155. /* Mask to denote subcore sibling threads */
  156. u8 subcore_sibling_mask;
  157. #endif
  158. #ifdef CONFIG_PPC_BOOK3S_64
  159. /* Exclusive emergency stack pointer for machine check exception. */
  160. void *mc_emergency_sp;
  161. /*
  162. * Flag to check whether we are in machine check early handler
  163. * and already using emergency stack.
  164. */
  165. u16 in_mce;
  166. u8 hmi_event_available; /* HMI event is available */
  167. #endif
  168. /* Stuff for accurate time accounting */
  169. struct cpu_accounting_data accounting;
  170. u64 stolen_time; /* TB ticks taken by hypervisor */
  171. u64 dtl_ridx; /* read index in dispatch log */
  172. struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */
  173. #ifdef CONFIG_KVM_BOOK3S_HANDLER
  174. #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
  175. /* We use this to store guest state in */
  176. struct kvmppc_book3s_shadow_vcpu shadow_vcpu;
  177. #endif
  178. struct kvmppc_host_state kvm_hstate;
  179. #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
  180. /*
  181. * Bitmap for sibling subcore status. See kvm/book3s_hv_ras.c for
  182. * more details
  183. */
  184. struct sibling_subcore_state *sibling_subcore_state;
  185. #endif
  186. #endif
  187. #ifdef CONFIG_PPC_BOOK3S_64
  188. /*
  189. * rfi fallback flush must be in its own cacheline to prevent
  190. * other paca data leaking into the L1d
  191. */
  192. u64 exrfi[13] __aligned(0x80);
  193. void *rfi_flush_fallback_area;
  194. u64 l1d_flush_size;
  195. #endif
  196. };
  197. #ifdef CONFIG_PPC_BOOK3S
  198. static inline void copy_mm_to_paca(mm_context_t *context)
  199. {
  200. get_paca()->mm_ctx_id = context->id;
  201. #ifdef CONFIG_PPC_MM_SLICES
  202. get_paca()->mm_ctx_low_slices_psize = context->low_slices_psize;
  203. memcpy(&get_paca()->mm_ctx_high_slices_psize,
  204. &context->high_slices_psize, SLICE_ARRAY_SIZE);
  205. #else
  206. get_paca()->mm_ctx_user_psize = context->user_psize;
  207. get_paca()->mm_ctx_sllp = context->sllp;
  208. #endif
  209. }
  210. #else
  211. static inline void copy_mm_to_paca(mm_context_t *context){}
  212. #endif
  213. extern struct paca_struct *paca;
  214. extern void initialise_paca(struct paca_struct *new_paca, int cpu);
  215. extern void setup_paca(struct paca_struct *new_paca);
  216. extern void allocate_pacas(void);
  217. extern void free_unused_pacas(void);
  218. #else /* CONFIG_PPC64 */
  219. static inline void allocate_pacas(void) { };
  220. static inline void free_unused_pacas(void) { };
  221. #endif /* CONFIG_PPC64 */
  222. #endif /* __KERNEL__ */
  223. #endif /* _ASM_POWERPC_PACA_H */