dram_perf_model.h 1.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041
  1. #ifndef __DRAM_PERF_MODEL_H__
  2. #define __DRAM_PERF_MODEL_H__
  3. #include "queue_model.h"
  4. #include "fixed_types.h"
  5. #include "subsecond_time.h"
  6. #include "dram_cntlr_interface.h"
  7. class ShmemPerf;
  8. // Note: Each Dram Controller owns a single DramModel object
  9. // Hence, m_dram_bandwidth is the bandwidth for a single DRAM controller
  10. // Total Bandwidth = m_dram_bandwidth * Number of DRAM controllers
  11. // Number of DRAM controllers presently = Number of Cores
  12. // m_dram_bandwidth is expressed in GB/s
  13. // Assuming the frequency of a core is 1GHz,
  14. // m_dram_bandwidth is also expressed in 'Bytes per clock cycle'
  15. // This DRAM model is not entirely correct.
  16. // It sort of increases the queueing delay to a huge value if
  17. // the arrival times of adjacent packets are spread over a large
  18. // simulated time period
  19. class DramPerfModel
  20. {
  21. protected:
  22. bool m_enabled;
  23. UInt64 m_num_accesses;
  24. public:
  25. static DramPerfModel* createDramPerfModel(core_id_t core_id, UInt32 cache_block_size);
  26. DramPerfModel(core_id_t core_id, UInt64 cache_block_size) : m_enabled(false), m_num_accesses(0) {}
  27. virtual ~DramPerfModel() {}
  28. virtual SubsecondTime getAccessLatency(SubsecondTime pkt_time, UInt64 pkt_size, core_id_t requester, IntPtr address, DramCntlrInterface::access_t access_type, ShmemPerf *perf) = 0;
  29. void enable() { m_enabled = true; }
  30. void disable() { m_enabled = false; }
  31. UInt64 getTotalAccesses() { return m_num_accesses; }
  32. };
  33. #endif /* __DRAM_PERF_MODEL_H__ */