benchmark 1.9.5
 
Loading...
Searching...
No Matches
cycleclock.h
1// ----------------------------------------------------------------------
2// CycleClock
3// A CycleClock tells you the current time in Cycles. The "time"
4// is actually time since power-on. This is like time() but doesn't
5// involve a system call and is much more precise.
6//
7// NOTE: Not all cpu/platform/kernel combinations guarantee that this
8// clock increments at a constant rate or is synchronized across all logical
9// cpus in a system.
10//
11// If you need the above guarantees, please consider using a different
12// API. There are efforts to provide an interface which provides a millisecond
13// granularity and implemented as a memory read. A memory read is generally
14// cheaper than the CycleClock for many architectures.
15//
16// Also, in some out of order CPU implementations, the CycleClock is not
17// serializing. So if you're trying to count at cycles granularity, your
18// data might be inaccurate due to out of order instruction execution.
19// ----------------------------------------------------------------------
20
21#ifndef BENCHMARK_CYCLECLOCK_H_
22#define BENCHMARK_CYCLECLOCK_H_
23
24#include <cstdint>
25
26#include "benchmark/benchmark.h"
27#include "internal_macros.h"
28
29#if defined(BENCHMARK_OS_MACOSX)
30#include <mach/mach_time.h>
31#endif
32// For MSVC, we want to use '_asm rdtsc' when possible (since it works
33// with even ancient MSVC compilers), and when not possible the
34// __rdtsc intrinsic, declared in <intrin.h>. Unfortunately, in some
35// environments, <windows.h> and <intrin.h> have conflicting
36// declarations of some other intrinsics, breaking compilation.
37// Therefore, we simply declare __rdtsc ourselves. See also
38// http://connect.microsoft.com/VisualStudio/feedback/details/262047
39//
40// Note that MSVC defines the x64 preprocessor macros when building
41// for Arm64EC, despite it using Arm64 assembly instructions.
42#if defined(COMPILER_MSVC) && !defined(_M_IX86) && !defined(_M_ARM64) && \
43 !defined(_M_ARM64EC)
44extern "C" uint64_t __rdtsc();
45#pragma intrinsic(__rdtsc)
46#endif
47
48#if !defined(BENCHMARK_OS_WINDOWS) || defined(BENCHMARK_OS_MINGW)
49#include <sys/time.h>
50#include <time.h>
51#endif
52
53#ifdef BENCHMARK_OS_EMSCRIPTEN
54#include <emscripten.h>
55#endif
56
57namespace benchmark {
58// NOTE: only i386 and x86_64 have been well tested.
59// PPC, sparc, alpha, and ia64 are based on
60// http://peter.kuscsik.com/wordpress/?p=14
61// with modifications by m3b. See also
62// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
63namespace cycleclock {
64// This should return the number of cycles since power-on. Thread-safe.
65inline BENCHMARK_ALWAYS_INLINE int64_t Now() {
66#if defined(BENCHMARK_OS_MACOSX)
67 // this goes at the top because we need ALL Macs, regardless of
68 // architecture, to return the number of "mach time units" that
69 // have passed since startup. See sysinfo.cc where
70 // InitializeSystemInfo() sets the supposed cpu clock frequency of
71 // macs to the number of mach time units per second, not actual
72 // CPU clock frequency (which can change in the face of CPU
73 // frequency scaling). Also note that when the Mac sleeps, this
74 // counter pauses; it does not continue counting, nor does it
75 // reset to zero.
76 return static_cast<int64_t>(mach_absolute_time());
77#elif defined(BENCHMARK_OS_EMSCRIPTEN)
78 // this goes above x86-specific code because old versions of Emscripten
79 // define __x86_64__, although they have nothing to do with it.
80 return static_cast<int64_t>(emscripten_get_now() * 1e+6);
81#elif defined(__i386__)
82 int64_t ret;
83 __asm__ volatile("rdtsc" : "=A"(ret));
84 return ret;
85
86// Note that Clang, like MSVC, defines the x64 preprocessor macros when building
87// for Arm64EC, despite it using Arm64 assembly instructions.
88#elif (defined(__x86_64__) || defined(__amd64__)) && !defined(__arm64ec__)
89 uint64_t low, high;
90 __asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
91 return static_cast<int64_t>((high << 32) | low);
92#elif defined(__powerpc__) || defined(__ppc__)
93 // This returns a time-base, which is not always precisely a cycle-count.
94#if defined(__powerpc64__) || defined(__ppc64__)
95 int64_t tb;
96 asm volatile("mfspr %0, 268" : "=r"(tb));
97 return tb;
98#else
99 uint32_t tbl, tbu0, tbu1;
100 asm volatile(
101 "mftbu %0\n"
102 "mftb %1\n"
103 "mftbu %2"
104 : "=r"(tbu0), "=r"(tbl), "=r"(tbu1));
105 tbl &= -static_cast<int32_t>(tbu0 == tbu1);
106 // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is no longer needed)
107 return (static_cast<uint64_t>(tbu1) << 32) | tbl;
108#endif
109#elif defined(__sparc__)
110 int64_t tick;
111 asm(".byte 0x83, 0x41, 0x00, 0x00");
112 asm("mov %%g1, %0" : "=r"(tick));
113 return tick;
114#elif defined(__ia64__)
115 int64_t itc;
116 asm("mov %0 = ar.itc" : "=r"(itc));
117 return itc;
118#elif defined(COMPILER_MSVC) && defined(_M_IX86)
119 // Older MSVC compilers (like 7.x) don't seem to support the
120 // __rdtsc intrinsic properly, so I prefer to use _asm instead
121 // when I know it will work. Otherwise, I'll use __rdtsc and hope
122 // the code is being compiled with a non-ancient compiler.
123 _asm rdtsc
124#elif defined(COMPILER_MSVC) && (defined(_M_ARM64) || defined(_M_ARM64EC))
125 // See // https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics
126 // and https://reviews.llvm.org/D53115
127 int64_t virtual_timer_value;
128 virtual_timer_value = _ReadStatusReg(ARM64_CNTVCT);
129 return virtual_timer_value;
130#elif defined(COMPILER_MSVC)
131 return __rdtsc();
132#elif defined(BENCHMARK_OS_NACL)
133 // Native Client validator on x86/x86-64 allows RDTSC instructions,
134 // and this case is handled above. Native Client validator on ARM
135 // rejects MRC instructions (used in the ARM-specific sequence below),
136 // so we handle it here. Portable Native Client compiles to
137 // architecture-agnostic bytecode, which doesn't provide any
138 // cycle counter access mnemonics.
139
140 // Native Client does not provide any API to access cycle counter.
141 // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday
142 // because is provides nanosecond resolution (which is noticeable at
143 // least for PNaCl modules running on x86 Mac & Linux).
144 // Initialize to always return 0 if clock_gettime fails.
145 struct timespec ts = {0, 0};
146 clock_gettime(CLOCK_MONOTONIC, &ts);
147 return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
148#elif defined(__aarch64__) || defined(__arm64ec__)
149 // System timer of ARMv8 runs at a different frequency than the CPU's.
150 // The frequency is fixed, typically in the range 1-50MHz. It can be
151 // read at CNTFRQ special register. We assume the OS has set up
152 // the virtual timer properly.
153 int64_t virtual_timer_value;
154 asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
155 return virtual_timer_value;
156#elif defined(__ARM_ARCH)
157 // V6 is the earliest arch that has a standard cyclecount
158 // Native Client validator doesn't allow MRC instructions.
159#if (__ARM_ARCH >= 6)
160 uint32_t pmccntr;
161 uint32_t pmuseren;
162 uint32_t pmcntenset;
163 // Read the user mode perf monitor counter access permissions.
164 asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
165 if (pmuseren & 1) { // Allows reading perfmon counters for user mode code.
166 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
167 if (pmcntenset & 0x80000000ul) { // Is it counting?
168 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
169 // The counter is set up to count every 64th cycle
170 return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6
171 }
172 }
173#endif
174 struct timeval tv;
175 gettimeofday(&tv, nullptr);
176 return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
177#elif defined(__mips__) || defined(__m68k__)
178 // mips apparently only allows rdtsc for superusers, so we fall
179 // back to gettimeofday. It's possible clock_gettime would be better.
180 struct timeval tv;
181 gettimeofday(&tv, nullptr);
182 return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
183#elif defined(__loongarch__) || defined(__csky__)
184 struct timeval tv;
185 gettimeofday(&tv, nullptr);
186 return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
187#elif defined(__s390__) // Covers both s390 and s390x.
188 // Return the CPU clock.
189 uint64_t tsc;
190#if defined(BENCHMARK_OS_ZOS)
191 // z/OS HLASM syntax.
192 asm(" stck %0" : "=m"(tsc) : : "cc");
193#else
194 // Linux on Z syntax.
195 asm("stck %0" : "=Q"(tsc) : : "cc");
196#endif
197 return tsc;
198#elif defined(__riscv) // RISC-V
199 // Use RDTIME (and RDTIMEH on riscv32).
200 // RDCYCLE is a privileged instruction since Linux 6.6.
201#if __riscv_xlen == 32
202 uint32_t cycles_lo, cycles_hi0, cycles_hi1;
203 // This asm also includes the PowerPC overflow handling strategy, as above.
204 // Implemented in assembly because Clang insisted on branching.
205 asm volatile(
206 "rdtimeh %0\n"
207 "rdtime %1\n"
208 "rdtimeh %2\n"
209 "sub %0, %0, %2\n"
210 "seqz %0, %0\n"
211 "sub %0, zero, %0\n"
212 "and %1, %1, %0\n"
213 : "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1));
214 return static_cast<int64_t>((static_cast<uint64_t>(cycles_hi1) << 32) |
215 cycles_lo);
216#else
217 uint64_t cycles;
218 asm volatile("rdtime %0" : "=r"(cycles));
219 return static_cast<int64_t>(cycles);
220#endif
221#elif defined(__e2k__) || defined(__elbrus__)
222 struct timeval tv;
223 gettimeofday(&tv, nullptr);
224 return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
225#elif defined(__hexagon__)
226 uint64_t pcycle;
227 asm volatile("%0 = C15:14" : "=r"(pcycle));
228 return static_cast<int64_t>(pcycle);
229#elif defined(__alpha__)
230 // Alpha has a cycle counter, the PCC register, but it is an unsigned 32-bit
231 // integer and thus wraps every ~4s, making using it for tick counts
232 // unreliable beyond this time range. The real-time clock is low-precision,
233 // roughtly ~1ms, but it is the only option that can reasonable count
234 // indefinitely.
235 struct timeval tv;
236 gettimeofday(&tv, nullptr);
237 return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
238#elif defined(__hppa__) || defined(__linux__)
239 // Fallback for all other architectures with a recent Linux kernel, e.g.:
240 // HP PA-RISC provides a user-readable clock counter (cr16), but
241 // it's not syncronized across CPUs and only 32-bit wide when programs
242 // are built as 32-bit binaries.
243 // Same for SH-4 and possibly others.
244 // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday
245 // because is provides nanosecond resolution.
246 // Initialize to always return 0 if clock_gettime fails.
247 struct timespec ts = {0, 0};
248 clock_gettime(CLOCK_MONOTONIC, &ts);
249 return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
250#else
251 // The soft failover to a generic implementation is automatic only for ARM.
252 // For other platforms the developer is expected to make an attempt to create
253 // a fast implementation and use generic version if nothing better is
254 // available.
255#error You need to define CycleTimer for your OS and CPU
256#endif
257}
258} // end namespace cycleclock
259} // end namespace benchmark
260
261#endif // BENCHMARK_CYCLECLOCK_H_