sync-r4k.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. /*
  2. * Count register synchronisation.
  3. *
  4. * All CPUs will have their count registers synchronised to the CPU0 next time
  5. * value. This can cause a small timewarp for CPU0. All other CPU's should
  6. * not have done anything significant (but they may have had interrupts
  7. * enabled briefly - prom_smp_finish() should not be responsible for enabling
  8. * interrupts...)
  9. */
  10. #include <linux/kernel.h>
  11. #include <linux/irqflags.h>
  12. #include <linux/cpumask.h>
  13. #include <asm/r4k-timer.h>
  14. #include <linux/atomic.h>
  15. #include <asm/barrier.h>
  16. #include <asm/mipsregs.h>
  17. static unsigned int initcount = 0;
  18. static atomic_t count_count_start = ATOMIC_INIT(0);
  19. static atomic_t count_count_stop = ATOMIC_INIT(0);
  20. #define COUNTON 100
  21. #define NR_LOOPS 3
  22. void synchronise_count_master(int cpu)
  23. {
  24. int i;
  25. unsigned long flags;
  26. printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
  27. local_irq_save(flags);
  28. /*
  29. * We loop a few times to get a primed instruction cache,
  30. * then the last pass is more or less synchronised and
  31. * the master and slaves each set their cycle counters to a known
  32. * value all at once. This reduces the chance of having random offsets
  33. * between the processors, and guarantees that the maximum
  34. * delay between the cycle counters is never bigger than
  35. * the latency of information-passing (cachelines) between
  36. * two CPUs.
  37. */
  38. for (i = 0; i < NR_LOOPS; i++) {
  39. /* slaves loop on '!= 2' */
  40. while (atomic_read(&count_count_start) != 1)
  41. mb();
  42. atomic_set(&count_count_stop, 0);
  43. smp_wmb();
  44. /* Let the slave writes its count register */
  45. atomic_inc(&count_count_start);
  46. /* Count will be initialised to current timer */
  47. if (i == 1)
  48. initcount = read_c0_count();
  49. /*
  50. * Everyone initialises count in the last loop:
  51. */
  52. if (i == NR_LOOPS-1)
  53. write_c0_count(initcount);
  54. /*
  55. * Wait for slave to leave the synchronization point:
  56. */
  57. while (atomic_read(&count_count_stop) != 1)
  58. mb();
  59. atomic_set(&count_count_start, 0);
  60. smp_wmb();
  61. atomic_inc(&count_count_stop);
  62. }
  63. /* Arrange for an interrupt in a short while */
  64. write_c0_compare(read_c0_count() + COUNTON);
  65. local_irq_restore(flags);
  66. /*
  67. * i386 code reported the skew here, but the
  68. * count registers were almost certainly out of sync
  69. * so no point in alarming people
  70. */
  71. printk("done.\n");
  72. }
  73. void synchronise_count_slave(int cpu)
  74. {
  75. int i;
  76. /*
  77. * Not every cpu is online at the time this gets called,
  78. * so we first wait for the master to say everyone is ready
  79. */
  80. for (i = 0; i < NR_LOOPS; i++) {
  81. atomic_inc(&count_count_start);
  82. while (atomic_read(&count_count_start) != 2)
  83. mb();
  84. /*
  85. * Everyone initialises count in the last loop:
  86. */
  87. if (i == NR_LOOPS-1)
  88. write_c0_count(initcount);
  89. atomic_inc(&count_count_stop);
  90. while (atomic_read(&count_count_stop) != 2)
  91. mb();
  92. }
  93. /* Arrange for an interrupt in a short while */
  94. write_c0_compare(read_c0_count() + COUNTON);
  95. }
  96. #undef NR_LOOPS