54 #include "../../../../core/exception.hpp"
56 using namespace libjmmcg;
75 _emit 0x0f
; cpuid - serialise the processor
88 inline unsigned __int64
89 CPUTicker::CPUCountFn::GetCPUCountFnNT::operator()(
void)
const {
90 volatile register ULARGE_INTEGER ts;
105 inline unsigned __int64
106 CPUTicker::CPUCountFn::GetCPUCountFn9X::operator()(
void)
const {
110 const unsigned __int64 ret=GetCPUCountFnNT::operator()();
117 #pragma optimize("",off)
119 CPUTicker::HasRDTSC(
void) {
120 SYSTEM_INFO sys_info;
121 GetSystemInfo(&sys_info);
122 if (sys_info.dwProcessorType==PROCESSOR_INTEL_PENTIUM) {
132 volatile ULARGE_INTEGER ts1,ts2;
146 if (ts2.HighPart==ts1.HighPart) {
147 if (ts2.LowPart>ts1.LowPart) {
148 JMMCG_TRACE(_T(
"RDTSC instruction probably present."));
154 }
else if (ts2.HighPart>ts1.HighPart) {
155 JMMCG_TRACE(_T(
"RDTSC instruction probably present."));
166 #pragma optimize("",on)
169 CPUTicker::CPUTicker(
void)
173 throw exception_type(
_T(
"RDTSC not supported."),
JMMCG_FUNCTION(*
this),__REV_INFO__);
178 CPUTicker::CPUTicker(
const CPUTicker &ticker)
180 TickCount(ticker.TickCount),
181 deviation(ticker.deviation),
186 CPUTicker::~CPUTicker(
void) {
190 CPUTicker::operator=(
const CPUTicker &ticker) {
191 TickCount=ticker.TickCount;
192 deviation=ticker.deviation;
198 CPUTicker::GetCPUCount(
void)
noexcept(
true) {
199 TickCount=counter.GetCount();
203 CPUTicker::GetTickCountS(
void)
const {
204 register unsigned __int64 ticks;
205 if ((ticks=TickCount)>=
static_cast<
unsigned __int64>(std::numeric_limits<__int64>::max())) {
206 throw exception_type(
_T(
"__int64 range error"),
JMMCG_FUNCTION(&CPUTicker::GetTickCountS),__REV_INFO__);
208 return static_cast<
signed __int64>(ticks);
212 CPUTicker::operator-(
const CPUTicker &ticker)
const {
213 register CPUTicker tmp(*
this);
214 tmp.TickCount=TickCount-ticker.TickCount;
215 if (TickCount<ticker.TickCount) {
216 throw exception_type(
_T(
"__int64 range error"),
JMMCG_FUNCTION(&CPUTicker::operator-),__REV_INFO__);
221 #pragma optimize("",off)
223 CPUTicker::GetAndCalcCPUFrequency(
double &frequency,
double &target_ave_dev,
const unsigned long interval,
const unsigned int max_loops) {
224 register LARGE_INTEGER goal,period,current;
225 register unsigned int ctr=0;
226 double curr_freq,ave_freq;
227 double ave_dev,tmp=0;
229 if (!QueryPerformanceFrequency(&period)) {
232 period.QuadPart*=interval;
233 period.QuadPart/=1000;
236 QueryPerformanceCounter(&goal);
237 goal.QuadPart+=period.QuadPart;
240 QueryPerformanceCounter(¤t);
241 }
while(current.QuadPart<goal.QuadPart);
245 register unsigned __int64 ticks;
246 if ((ticks=f.TickCount-s.TickCount)>=
static_cast<
unsigned __int64>(std::numeric_limits<__int64>::max())) {
247 throw exception_type(
_T(
"__int64 range error"),
JMMCG_FUNCTION(&CPUTicker::GetAndCalcCPUFrequency),__REV_INFO__);
249 ave_freq=1000*
static_cast<
double>(
static_cast<
signed __int64>(ticks))/interval;
252 QueryPerformanceCounter(&goal);
253 goal.QuadPart+=period.QuadPart;
256 QueryPerformanceCounter(¤t);
257 }
while(current.QuadPart<goal.QuadPart);
262 if ((ticks=f.TickCount-s.TickCount)>=
static_cast<
unsigned __int64>(std::numeric_limits<__int64>::max())) {
263 throw exception_type(
_T(
"__int64 range error"),
JMMCG_FUNCTION(&CPUTicker::GetAndCalcCPUFrequency),__REV_INFO__);
265 curr_freq=1000*
static_cast<
double>(
static_cast<
signed __int64>(ticks))/interval;
266 ave_freq=(curr_freq+ave_freq)/2;
269 tmp+=fabs(curr_freq-ave_freq);
271 }
while (ave_dev>target_ave_dev && ctr<max_loops);
272 deviation=target_ave_dev=ave_dev;
273 freq=frequency=ave_freq;
274 JMMCG_TRACE(_T(
"Estimated the processor clock frequency =")<<ave_freq<<_T(
"Hz, dev.=�")<<ave_dev<<_T(
"Hz."));
277 #pragma optimize("",on)