cache.h 2.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182
  1. #ifndef __LINUX_CACHE_H
  2. #define __LINUX_CACHE_H
  3. #include <uapi/linux/kernel.h>
  4. #include <asm/cache.h>
  5. #ifndef L1_CACHE_ALIGN
  6. #define L1_CACHE_ALIGN(x) __ALIGN_KERNEL(x, L1_CACHE_BYTES)
  7. #endif
  8. #ifndef SMP_CACHE_BYTES
  9. #define SMP_CACHE_BYTES L1_CACHE_BYTES
  10. #endif
  11. /*
  12. * __read_mostly is used to keep rarely changing variables out of frequently
  13. * updated cachelines. If an architecture doesn't support it, ignore the
  14. * hint.
  15. */
  16. #ifndef __read_mostly
  17. #define __read_mostly
  18. #endif
  19. /*
  20. * __ro_after_init is used to mark things that are read-only after init (i.e.
  21. * after mark_rodata_ro() has been called). These are effectively read-only,
  22. * but may get written to during init, so can't live in .rodata (via "const").
  23. */
  24. #ifndef __ro_after_init
  25. #define __ro_after_init __attribute__((__section__(".data..ro_after_init")))
  26. #endif
  27. #ifndef ____cacheline_aligned
  28. #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
  29. #endif
  30. #ifndef ____cacheline_aligned_in_smp
  31. #ifdef CONFIG_SMP
  32. #define ____cacheline_aligned_in_smp ____cacheline_aligned
  33. #else
  34. #define ____cacheline_aligned_in_smp
  35. #endif /* CONFIG_SMP */
  36. #endif
  37. #ifndef __cacheline_aligned
  38. #define __cacheline_aligned \
  39. __attribute__((__aligned__(SMP_CACHE_BYTES), \
  40. __section__(".data..cacheline_aligned")))
  41. #endif /* __cacheline_aligned */
  42. #ifndef __cacheline_aligned_in_smp
  43. #ifdef CONFIG_SMP
  44. #define __cacheline_aligned_in_smp __cacheline_aligned
  45. #else
  46. #define __cacheline_aligned_in_smp
  47. #endif /* CONFIG_SMP */
  48. #endif
  49. /*
  50. * The maximum alignment needed for some critical structures
  51. * These could be inter-node cacheline sizes/L3 cacheline
  52. * size etc. Define this in asm/cache.h for your arch
  53. */
  54. #ifndef INTERNODE_CACHE_SHIFT
  55. #define INTERNODE_CACHE_SHIFT L1_CACHE_SHIFT
  56. #endif
  57. #if !defined(____cacheline_internodealigned_in_smp)
  58. #if defined(CONFIG_SMP)
  59. #define ____cacheline_internodealigned_in_smp \
  60. __attribute__((__aligned__(1 << (INTERNODE_CACHE_SHIFT))))
  61. #else
  62. #define ____cacheline_internodealigned_in_smp
  63. #endif
  64. #endif
  65. #ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
  66. #define cache_line_size() L1_CACHE_BYTES
  67. #endif
  68. #endif /* __LINUX_CACHE_H */