cputhreads.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. #ifndef _ASM_POWERPC_CPUTHREADS_H
  2. #define _ASM_POWERPC_CPUTHREADS_H
  3. #ifndef __ASSEMBLY__
  4. #include <linux/cpumask.h>
  5. #include <asm/cpu_has_feature.h>
  6. /*
  7. * Mapping of threads to cores
  8. *
  9. * Note: This implementation is limited to a power of 2 number of
  10. * threads per core and the same number for each core in the system
  11. * (though it would work if some processors had less threads as long
  12. * as the CPU numbers are still allocated, just not brought online).
  13. *
  14. * However, the API allows for a different implementation in the future
  15. * if needed, as long as you only use the functions and not the variables
  16. * directly.
  17. */
  18. #ifdef CONFIG_SMP
  19. extern int threads_per_core;
  20. extern int threads_per_subcore;
  21. extern int threads_shift;
  22. extern cpumask_t threads_core_mask;
  23. #else
  24. #define threads_per_core 1
  25. #define threads_per_subcore 1
  26. #define threads_shift 0
  27. #define threads_core_mask (*get_cpu_mask(0))
  28. #endif
  29. /* cpu_thread_mask_to_cores - Return a cpumask of one per cores
  30. * hit by the argument
  31. *
  32. * @threads: a cpumask of online threads
  33. *
  34. * This function returns a cpumask which will have one online cpu's
  35. * bit set for each core that has at least one thread set in the argument.
  36. *
  37. * This can typically be used for things like IPI for tlb invalidations
  38. * since those need to be done only once per core/TLB
  39. */
  40. static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads)
  41. {
  42. cpumask_t tmp, res;
  43. int i, cpu;
  44. cpumask_clear(&res);
  45. for (i = 0; i < NR_CPUS; i += threads_per_core) {
  46. cpumask_shift_left(&tmp, &threads_core_mask, i);
  47. if (cpumask_intersects(threads, &tmp)) {
  48. cpu = cpumask_next_and(-1, &tmp, cpu_online_mask);
  49. if (cpu < nr_cpu_ids)
  50. cpumask_set_cpu(cpu, &res);
  51. }
  52. }
  53. return res;
  54. }
  55. static inline int cpu_nr_cores(void)
  56. {
  57. return nr_cpu_ids >> threads_shift;
  58. }
  59. static inline cpumask_t cpu_online_cores_map(void)
  60. {
  61. return cpu_thread_mask_to_cores(cpu_online_mask);
  62. }
  63. #ifdef CONFIG_SMP
  64. int cpu_core_index_of_thread(int cpu);
  65. int cpu_first_thread_of_core(int core);
  66. #else
  67. static inline int cpu_core_index_of_thread(int cpu) { return cpu; }
  68. static inline int cpu_first_thread_of_core(int core) { return core; }
  69. #endif
  70. static inline int cpu_thread_in_core(int cpu)
  71. {
  72. return cpu & (threads_per_core - 1);
  73. }
  74. static inline int cpu_thread_in_subcore(int cpu)
  75. {
  76. return cpu & (threads_per_subcore - 1);
  77. }
  78. static inline int cpu_first_thread_sibling(int cpu)
  79. {
  80. return cpu & ~(threads_per_core - 1);
  81. }
  82. static inline int cpu_last_thread_sibling(int cpu)
  83. {
  84. return cpu | (threads_per_core - 1);
  85. }
  86. static inline u32 get_tensr(void)
  87. {
  88. #ifdef CONFIG_BOOKE
  89. if (cpu_has_feature(CPU_FTR_SMT))
  90. return mfspr(SPRN_TENSR);
  91. #endif
  92. return 1;
  93. }
  94. void book3e_start_thread(int thread, unsigned long addr);
  95. void book3e_stop_thread(int thread);
  96. #endif /* __ASSEMBLY__ */
  97. #define INVALID_THREAD_HWID 0x0fff
  98. #endif /* _ASM_POWERPC_CPUTHREADS_H */