LLVM OpenMP* Runtime Library
Loading...
Searching...
No Matches
kmp_os.h
1/*
2 * kmp_os.h -- KPTS runtime header file.
3 */
4
5//===----------------------------------------------------------------------===//
6//
7// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8// See https://llvm.org/LICENSE.txt for license information.
9// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef KMP_OS_H
14#define KMP_OS_H
15
16#include "kmp_config.h"
17#include <atomic>
18#include <stdarg.h>
19#include <stdlib.h>
20#include <string.h>
21
22#define KMP_FTN_PLAIN 1
23#define KMP_FTN_APPEND 2
24#define KMP_FTN_UPPER 3
25/*
26#define KMP_FTN_PREPEND 4
27#define KMP_FTN_UAPPEND 5
28*/
29
30#define KMP_PTR_SKIP (sizeof(void *))
31
32/* -------------------------- Compiler variations ------------------------ */
33
34#define KMP_OFF 0
35#define KMP_ON 1
36
37#define KMP_MEM_CONS_VOLATILE 0
38#define KMP_MEM_CONS_FENCE 1
39
40#ifndef KMP_MEM_CONS_MODEL
41#define KMP_MEM_CONS_MODEL KMP_MEM_CONS_VOLATILE
42#endif
43
44#ifndef __has_cpp_attribute
45#define __has_cpp_attribute(x) 0
46#endif
47
48#ifndef __has_attribute
49#define __has_attribute(x) 0
50#endif
51
52/* ------------------------- Compiler recognition ---------------------- */
53#define KMP_COMPILER_ICC 0
54#define KMP_COMPILER_GCC 0
55#define KMP_COMPILER_CLANG 0
56#define KMP_COMPILER_MSVC 0
57#define KMP_COMPILER_ICX 0
58
59#if __INTEL_CLANG_COMPILER
60#undef KMP_COMPILER_ICX
61#define KMP_COMPILER_ICX 1
62#elif defined(__INTEL_COMPILER)
63#undef KMP_COMPILER_ICC
64#define KMP_COMPILER_ICC 1
65#elif defined(__clang__)
66#undef KMP_COMPILER_CLANG
67#define KMP_COMPILER_CLANG 1
68#elif defined(__GNUC__)
69#undef KMP_COMPILER_GCC
70#define KMP_COMPILER_GCC 1
71#elif defined(_MSC_VER)
72#undef KMP_COMPILER_MSVC
73#define KMP_COMPILER_MSVC 1
74#else
75#error Unknown compiler
76#endif
77
78#if (KMP_OS_LINUX || KMP_OS_WINDOWS || KMP_OS_FREEBSD)
79#define KMP_AFFINITY_SUPPORTED 1
80#if KMP_OS_WINDOWS && KMP_ARCH_X86_64
81#define KMP_GROUP_AFFINITY 1
82#else
83#define KMP_GROUP_AFFINITY 0
84#endif
85#else
86#define KMP_AFFINITY_SUPPORTED 0
87#define KMP_GROUP_AFFINITY 0
88#endif
89
90#if (KMP_OS_LINUX || (KMP_OS_FREEBSD && __FreeBSD_version >= 1301000))
91#define KMP_HAVE_SCHED_GETCPU 1
92#else
93#define KMP_HAVE_SCHED_GETCPU 0
94#endif
95
96/* Check for quad-precision extension. */
97#define KMP_HAVE_QUAD 0
98#if KMP_ARCH_X86 || KMP_ARCH_X86_64
99#if KMP_COMPILER_ICC || KMP_COMPILER_ICX
100/* _Quad is already defined for icc */
101#undef KMP_HAVE_QUAD
102#define KMP_HAVE_QUAD 1
103#elif KMP_COMPILER_CLANG
104/* Clang doesn't support a software-implemented
105 128-bit extended precision type yet */
106typedef long double _Quad;
107#elif KMP_COMPILER_GCC
108/* GCC on NetBSD lacks __multc3/__divtc3 builtins needed for quad until
109 NetBSD 10.0 which ships with GCC 10.5 */
110#if (!KMP_OS_NETBSD || __GNUC__ >= 10)
111typedef __float128 _Quad;
112#undef KMP_HAVE_QUAD
113#define KMP_HAVE_QUAD 1
114#endif
115#elif KMP_COMPILER_MSVC
116typedef long double _Quad;
117#endif
118#else
119#if __LDBL_MAX_EXP__ >= 16384 && KMP_COMPILER_GCC
120typedef long double _Quad;
121#undef KMP_HAVE_QUAD
122#define KMP_HAVE_QUAD 1
123#endif
124#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
125
126#define KMP_USE_X87CONTROL 0
127#if KMP_OS_WINDOWS
128#define KMP_END_OF_LINE "\r\n"
129typedef char kmp_int8;
130typedef unsigned char kmp_uint8;
131typedef short kmp_int16;
132typedef unsigned short kmp_uint16;
133typedef int kmp_int32;
134typedef unsigned int kmp_uint32;
135#define KMP_INT32_SPEC "d"
136#define KMP_UINT32_SPEC "u"
137#ifndef KMP_STRUCT64
138typedef __int64 kmp_int64;
139typedef unsigned __int64 kmp_uint64;
140#define KMP_INT64_SPEC "I64d"
141#define KMP_UINT64_SPEC "I64u"
142#else
143struct kmp_struct64 {
144 kmp_int32 a, b;
145};
146typedef struct kmp_struct64 kmp_int64;
147typedef struct kmp_struct64 kmp_uint64;
148/* Not sure what to use for KMP_[U]INT64_SPEC here */
149#endif
150#if KMP_ARCH_X86 && KMP_MSVC_COMPAT
151#undef KMP_USE_X87CONTROL
152#define KMP_USE_X87CONTROL 1
153#endif
154#if KMP_ARCH_X86_64 || KMP_ARCH_AARCH64
155#define KMP_INTPTR 1
156typedef __int64 kmp_intptr_t;
157typedef unsigned __int64 kmp_uintptr_t;
158#define KMP_INTPTR_SPEC "I64d"
159#define KMP_UINTPTR_SPEC "I64u"
160#endif
161#endif /* KMP_OS_WINDOWS */
162
163#if KMP_OS_UNIX
164#define KMP_END_OF_LINE "\n"
165typedef char kmp_int8;
166typedef unsigned char kmp_uint8;
167typedef short kmp_int16;
168typedef unsigned short kmp_uint16;
169typedef int kmp_int32;
170typedef unsigned int kmp_uint32;
171typedef long long kmp_int64;
172typedef unsigned long long kmp_uint64;
173#define KMP_INT32_SPEC "d"
174#define KMP_UINT32_SPEC "u"
175#define KMP_INT64_SPEC "lld"
176#define KMP_UINT64_SPEC "llu"
177#endif /* KMP_OS_UNIX */
178
179#if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS
180#define KMP_SIZE_T_SPEC KMP_UINT32_SPEC
181#elif KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 || \
182 KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
183 KMP_ARCH_VE || KMP_ARCH_S390X
184#define KMP_SIZE_T_SPEC KMP_UINT64_SPEC
185#else
186#error "Can't determine size_t printf format specifier."
187#endif
188
189#if KMP_ARCH_X86 || KMP_ARCH_ARM
190#define KMP_SIZE_T_MAX (0xFFFFFFFF)
191#else
192#define KMP_SIZE_T_MAX (0xFFFFFFFFFFFFFFFF)
193#endif
194
195typedef size_t kmp_size_t;
196typedef float kmp_real32;
197typedef double kmp_real64;
198
199#ifndef KMP_INTPTR
200#define KMP_INTPTR 1
201typedef long kmp_intptr_t;
202typedef unsigned long kmp_uintptr_t;
203#define KMP_INTPTR_SPEC "ld"
204#define KMP_UINTPTR_SPEC "lu"
205#endif
206
207#ifdef BUILD_I8
208typedef kmp_int64 kmp_int;
209typedef kmp_uint64 kmp_uint;
210#else
211typedef kmp_int32 kmp_int;
212typedef kmp_uint32 kmp_uint;
213#endif /* BUILD_I8 */
214#define KMP_INT_MAX ((kmp_int32)0x7FFFFFFF)
215#define KMP_INT_MIN ((kmp_int32)0x80000000)
216
217// stdarg handling
218#if (KMP_ARCH_ARM || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64) && \
219 (KMP_OS_FREEBSD || KMP_OS_LINUX)
220typedef va_list *kmp_va_list;
221#define kmp_va_deref(ap) (*(ap))
222#define kmp_va_addr_of(ap) (&(ap))
223#else
224typedef va_list kmp_va_list;
225#define kmp_va_deref(ap) (ap)
226#define kmp_va_addr_of(ap) (ap)
227#endif
228
229#ifdef __cplusplus
230// macros to cast out qualifiers and to re-interpret types
231#define CCAST(type, var) const_cast<type>(var)
232#define RCAST(type, var) reinterpret_cast<type>(var)
233//-------------------------------------------------------------------------
234// template for debug prints specification ( d, u, lld, llu ), and to obtain
235// signed/unsigned flavors of a type
236template <typename T> struct traits_t {};
237// int
238template <> struct traits_t<signed int> {
239 typedef signed int signed_t;
240 typedef unsigned int unsigned_t;
241 typedef double floating_t;
242 static char const *spec;
243 static const signed_t max_value = 0x7fffffff;
244 static const signed_t min_value = 0x80000000;
245 static const int type_size = sizeof(signed_t);
246};
247// unsigned int
248template <> struct traits_t<unsigned int> {
249 typedef signed int signed_t;
250 typedef unsigned int unsigned_t;
251 typedef double floating_t;
252 static char const *spec;
253 static const unsigned_t max_value = 0xffffffff;
254 static const unsigned_t min_value = 0x00000000;
255 static const int type_size = sizeof(unsigned_t);
256};
257// long
258template <> struct traits_t<signed long> {
259 typedef signed long signed_t;
260 typedef unsigned long unsigned_t;
261 typedef long double floating_t;
262 static char const *spec;
263 static const int type_size = sizeof(signed_t);
264};
265// long long
266template <> struct traits_t<signed long long> {
267 typedef signed long long signed_t;
268 typedef unsigned long long unsigned_t;
269 typedef long double floating_t;
270 static char const *spec;
271 static const signed_t max_value = 0x7fffffffffffffffLL;
272 static const signed_t min_value = 0x8000000000000000LL;
273 static const int type_size = sizeof(signed_t);
274};
275// unsigned long long
276template <> struct traits_t<unsigned long long> {
277 typedef signed long long signed_t;
278 typedef unsigned long long unsigned_t;
279 typedef long double floating_t;
280 static char const *spec;
281 static const unsigned_t max_value = 0xffffffffffffffffLL;
282 static const unsigned_t min_value = 0x0000000000000000LL;
283 static const int type_size = sizeof(unsigned_t);
284};
285//-------------------------------------------------------------------------
286#else
287#define CCAST(type, var) (type)(var)
288#define RCAST(type, var) (type)(var)
289#endif // __cplusplus
290
291#define KMP_EXPORT extern /* export declaration in guide libraries */
292
293#if __GNUC__ >= 4 && !defined(__MINGW32__)
294#define __forceinline __inline
295#endif
296
297/* Check if the OS/arch can support user-level mwait */
298// All mwait code tests for UMWAIT first, so it should only fall back to ring3
299// MWAIT for KNL.
300#define KMP_HAVE_MWAIT \
301 ((KMP_ARCH_X86 || KMP_ARCH_X86_64) && (KMP_OS_LINUX || KMP_OS_WINDOWS) && \
302 !KMP_MIC2)
303#define KMP_HAVE_UMWAIT \
304 ((KMP_ARCH_X86 || KMP_ARCH_X86_64) && (KMP_OS_LINUX || KMP_OS_WINDOWS) && \
305 !KMP_MIC)
306
307#if KMP_OS_WINDOWS
308#include <windows.h>
309
310static inline int KMP_GET_PAGE_SIZE(void) {
311 SYSTEM_INFO si;
312 GetSystemInfo(&si);
313 return si.dwPageSize;
314}
315#else
316#define KMP_GET_PAGE_SIZE() getpagesize()
317#endif
318
319#define PAGE_ALIGNED(_addr) \
320 (!((size_t)_addr & (size_t)(KMP_GET_PAGE_SIZE() - 1)))
321#define ALIGN_TO_PAGE(x) \
322 (void *)(((size_t)(x)) & ~((size_t)(KMP_GET_PAGE_SIZE() - 1)))
323
324/* ---------- Support for cache alignment, padding, etc. ----------------*/
325
326#ifdef __cplusplus
327extern "C" {
328#endif // __cplusplus
329
330#define INTERNODE_CACHE_LINE 4096 /* for multi-node systems */
331
332/* Define the default size of the cache line */
333#ifndef CACHE_LINE
334#define CACHE_LINE 128 /* cache line size in bytes */
335#else
336#if (CACHE_LINE < 64) && !defined(KMP_OS_DARWIN)
337// 2006-02-13: This produces too many warnings on OS X*. Disable for now
338#warning CACHE_LINE is too small.
339#endif
340#endif /* CACHE_LINE */
341
342#define KMP_CACHE_PREFETCH(ADDR) /* nothing */
343
344// Define attribute that indicates that the fall through from the previous
345// case label is intentional and should not be diagnosed by a compiler
346// Code from libcxx/include/__config
347// Use a function like macro to imply that it must be followed by a semicolon
348#if __cplusplus > 201402L && __has_cpp_attribute(fallthrough)
349#define KMP_FALLTHROUGH() [[fallthrough]]
350// icc cannot properly tell this attribute is absent so force off
351#elif KMP_COMPILER_ICC
352#define KMP_FALLTHROUGH() ((void)0)
353#elif __has_cpp_attribute(clang::fallthrough)
354#define KMP_FALLTHROUGH() [[clang::fallthrough]]
355#elif __has_attribute(fallthrough) || __GNUC__ >= 7
356#define KMP_FALLTHROUGH() __attribute__((__fallthrough__))
357#else
358#define KMP_FALLTHROUGH() ((void)0)
359#endif
360
361#if KMP_HAVE_ATTRIBUTE_WAITPKG
362#define KMP_ATTRIBUTE_TARGET_WAITPKG __attribute__((target("waitpkg")))
363#else
364#define KMP_ATTRIBUTE_TARGET_WAITPKG /* Nothing */
365#endif
366
367#if KMP_HAVE_ATTRIBUTE_RTM
368#define KMP_ATTRIBUTE_TARGET_RTM __attribute__((target("rtm")))
369#else
370#define KMP_ATTRIBUTE_TARGET_RTM /* Nothing */
371#endif
372
373// Define attribute that indicates a function does not return
374#if __cplusplus >= 201103L
375#define KMP_NORETURN [[noreturn]]
376#elif KMP_OS_WINDOWS
377#define KMP_NORETURN __declspec(noreturn)
378#else
379#define KMP_NORETURN __attribute__((noreturn))
380#endif
381
382#if KMP_OS_WINDOWS && KMP_MSVC_COMPAT
383#define KMP_ALIGN(bytes) __declspec(align(bytes))
384#define KMP_THREAD_LOCAL __declspec(thread)
385#define KMP_ALIAS /* Nothing */
386#else
387#define KMP_ALIGN(bytes) __attribute__((aligned(bytes)))
388#define KMP_THREAD_LOCAL __thread
389#define KMP_ALIAS(alias_of) __attribute__((alias(alias_of)))
390#endif
391
392#if KMP_HAVE_WEAK_ATTRIBUTE && !KMP_DYNAMIC_LIB
393#define KMP_WEAK_ATTRIBUTE_EXTERNAL __attribute__((weak))
394#else
395#define KMP_WEAK_ATTRIBUTE_EXTERNAL /* Nothing */
396#endif
397
398#if KMP_HAVE_WEAK_ATTRIBUTE
399#define KMP_WEAK_ATTRIBUTE_INTERNAL __attribute__((weak))
400#else
401#define KMP_WEAK_ATTRIBUTE_INTERNAL /* Nothing */
402#endif
403
404// Define KMP_VERSION_SYMBOL and KMP_EXPAND_NAME
405#ifndef KMP_STR
406#define KMP_STR(x) _KMP_STR(x)
407#define _KMP_STR(x) #x
408#endif
409
410#ifdef KMP_USE_VERSION_SYMBOLS
411// If using versioned symbols, KMP_EXPAND_NAME prepends
412// __kmp_api_ to the real API name
413#define KMP_EXPAND_NAME(api_name) _KMP_EXPAND_NAME(api_name)
414#define _KMP_EXPAND_NAME(api_name) __kmp_api_##api_name
415#define KMP_VERSION_SYMBOL(api_name, ver_num, ver_str) \
416 _KMP_VERSION_SYMBOL(api_name, ver_num, ver_str, "VERSION")
417#define _KMP_VERSION_SYMBOL(api_name, ver_num, ver_str, default_ver) \
418 __typeof__(__kmp_api_##api_name) __kmp_api_##api_name##_##ver_num##_alias \
419 __attribute__((alias(KMP_STR(__kmp_api_##api_name)))); \
420 __asm__( \
421 ".symver " KMP_STR(__kmp_api_##api_name##_##ver_num##_alias) "," KMP_STR( \
422 api_name) "@" ver_str "\n\t"); \
423 __asm__(".symver " KMP_STR(__kmp_api_##api_name) "," KMP_STR( \
424 api_name) "@@" default_ver "\n\t")
425
426#define KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str) \
427 _KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str, "VERSION")
428#define _KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str, \
429 default_ver) \
430 __typeof__(__kmp_api_##apic_name) __kmp_api_##apic_name##_##ver_num##_alias \
431 __attribute__((alias(KMP_STR(__kmp_api_##apic_name)))); \
432 __asm__(".symver " KMP_STR(__kmp_api_##apic_name) "," KMP_STR( \
433 apic_name) "@@" default_ver "\n\t"); \
434 __asm__( \
435 ".symver " KMP_STR(__kmp_api_##apic_name##_##ver_num##_alias) "," KMP_STR( \
436 api_name) "@" ver_str "\n\t")
437
438#else // KMP_USE_VERSION_SYMBOLS
439#define KMP_EXPAND_NAME(api_name) api_name
440#define KMP_VERSION_SYMBOL(api_name, ver_num, ver_str) /* Nothing */
441#define KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, \
442 ver_str) /* Nothing */
443#endif // KMP_USE_VERSION_SYMBOLS
444
445/* Temporary note: if performance testing of this passes, we can remove
446 all references to KMP_DO_ALIGN and replace with KMP_ALIGN. */
447#define KMP_DO_ALIGN(bytes) KMP_ALIGN(bytes)
448#define KMP_ALIGN_CACHE KMP_ALIGN(CACHE_LINE)
449#define KMP_ALIGN_CACHE_INTERNODE KMP_ALIGN(INTERNODE_CACHE_LINE)
450
451/* General purpose fence types for memory operations */
452enum kmp_mem_fence_type {
453 kmp_no_fence, /* No memory fence */
454 kmp_acquire_fence, /* Acquire (read) memory fence */
455 kmp_release_fence, /* Release (write) memory fence */
456 kmp_full_fence /* Full (read+write) memory fence */
457};
458
459// Synchronization primitives
460
461#if KMP_ASM_INTRINS && KMP_OS_WINDOWS && !((KMP_ARCH_AARCH64 || KMP_ARCH_ARM) && (KMP_COMPILER_CLANG || KMP_COMPILER_GCC))
462
463#if KMP_MSVC_COMPAT && !KMP_COMPILER_CLANG
464#pragma intrinsic(InterlockedExchangeAdd)
465#pragma intrinsic(InterlockedCompareExchange)
466#pragma intrinsic(InterlockedExchange)
467#if !(KMP_COMPILER_ICX && KMP_32_BIT_ARCH)
468#pragma intrinsic(InterlockedExchange64)
469#endif
470#endif
471
472// Using InterlockedIncrement / InterlockedDecrement causes a library loading
473// ordering problem, so we use InterlockedExchangeAdd instead.
474#define KMP_TEST_THEN_INC32(p) InterlockedExchangeAdd((volatile long *)(p), 1)
475#define KMP_TEST_THEN_INC_ACQ32(p) \
476 InterlockedExchangeAdd((volatile long *)(p), 1)
477#define KMP_TEST_THEN_ADD4_32(p) InterlockedExchangeAdd((volatile long *)(p), 4)
478#define KMP_TEST_THEN_ADD4_ACQ32(p) \
479 InterlockedExchangeAdd((volatile long *)(p), 4)
480#define KMP_TEST_THEN_DEC32(p) InterlockedExchangeAdd((volatile long *)(p), -1)
481#define KMP_TEST_THEN_DEC_ACQ32(p) \
482 InterlockedExchangeAdd((volatile long *)(p), -1)
483#define KMP_TEST_THEN_ADD32(p, v) \
484 InterlockedExchangeAdd((volatile long *)(p), (v))
485
486#define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
487 InterlockedCompareExchange((volatile long *)(p), (long)(sv), (long)(cv))
488
489#define KMP_XCHG_FIXED32(p, v) \
490 InterlockedExchange((volatile long *)(p), (long)(v))
491#define KMP_XCHG_FIXED64(p, v) \
492 InterlockedExchange64((volatile kmp_int64 *)(p), (kmp_int64)(v))
493
494inline kmp_real32 KMP_XCHG_REAL32(volatile kmp_real32 *p, kmp_real32 v) {
495 kmp_int32 tmp = InterlockedExchange((volatile long *)p, *(long *)&v);
496 return *(kmp_real32 *)&tmp;
497}
498
499#define KMP_TEST_THEN_OR8(p, v) __kmp_test_then_or8((p), (v))
500#define KMP_TEST_THEN_AND8(p, v) __kmp_test_then_and8((p), (v))
501#define KMP_TEST_THEN_OR32(p, v) __kmp_test_then_or32((p), (v))
502#define KMP_TEST_THEN_AND32(p, v) __kmp_test_then_and32((p), (v))
503#define KMP_TEST_THEN_OR64(p, v) __kmp_test_then_or64((p), (v))
504#define KMP_TEST_THEN_AND64(p, v) __kmp_test_then_and64((p), (v))
505
506extern kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 v);
507extern kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 v);
508extern kmp_int32 __kmp_test_then_add32(volatile kmp_int32 *p, kmp_int32 v);
509extern kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 v);
510extern kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 v);
511extern kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 v);
512extern kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 v);
513extern kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 v);
514
515#if KMP_ARCH_AARCH64 && KMP_COMPILER_MSVC && !KMP_COMPILER_CLANG
516#define KMP_TEST_THEN_INC64(p) _InterlockedExchangeAdd64((p), 1LL)
517#define KMP_TEST_THEN_INC_ACQ64(p) _InterlockedExchangeAdd64_acq((p), 1LL)
518#define KMP_TEST_THEN_ADD4_64(p) _InterlockedExchangeAdd64((p), 4LL)
519// #define KMP_TEST_THEN_ADD4_ACQ64(p) _InterlockedExchangeAdd64_acq((p), 4LL)
520// #define KMP_TEST_THEN_DEC64(p) _InterlockedExchangeAdd64((p), -1LL)
521// #define KMP_TEST_THEN_DEC_ACQ64(p) _InterlockedExchangeAdd64_acq((p), -1LL)
522// #define KMP_TEST_THEN_ADD8(p, v) _InterlockedExchangeAdd8((p), (v))
523#define KMP_TEST_THEN_ADD64(p, v) _InterlockedExchangeAdd64((p), (v))
524
525#define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
526 __kmp_compare_and_store_acq8((p), (cv), (sv))
527#define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
528 __kmp_compare_and_store_rel8((p), (cv), (sv))
529#define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
530 __kmp_compare_and_store_acq16((p), (cv), (sv))
531/*
532#define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
533 __kmp_compare_and_store_rel16((p), (cv), (sv))
534*/
535#define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
536 __kmp_compare_and_store_acq32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
537 (kmp_int32)(sv))
538#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
539 __kmp_compare_and_store_rel32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
540 (kmp_int32)(sv))
541#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
542 __kmp_compare_and_store_acq64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
543 (kmp_int64)(sv))
544#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
545 __kmp_compare_and_store_rel64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
546 (kmp_int64)(sv))
547#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
548 __kmp_compare_and_store_ptr((void *volatile *)(p), (void *)(cv), (void *)(sv))
549
550// KMP_COMPARE_AND_STORE expects this order: pointer, compare, exchange
551// _InterlockedCompareExchange expects this order: pointer, exchange, compare
552// KMP_COMPARE_AND_STORE also returns a bool indicating a successful write. A
553// write is successful if the return value of _InterlockedCompareExchange is the
554// same as the compare value.
555inline kmp_int8 __kmp_compare_and_store_acq8(volatile kmp_int8 *p, kmp_int8 cv,
556 kmp_int8 sv) {
557 return _InterlockedCompareExchange8_acq(p, sv, cv) == cv;
558}
559
560inline kmp_int8 __kmp_compare_and_store_rel8(volatile kmp_int8 *p, kmp_int8 cv,
561 kmp_int8 sv) {
562 return _InterlockedCompareExchange8_rel(p, sv, cv) == cv;
563}
564
565inline kmp_int16 __kmp_compare_and_store_acq16(volatile kmp_int16 *p,
566 kmp_int16 cv, kmp_int16 sv) {
567 return _InterlockedCompareExchange16_acq(p, sv, cv) == cv;
568}
569
570inline kmp_int16 __kmp_compare_and_store_rel16(volatile kmp_int16 *p,
571 kmp_int16 cv, kmp_int16 sv) {
572 return _InterlockedCompareExchange16_rel(p, sv, cv) == cv;
573}
574
575inline kmp_int32 __kmp_compare_and_store_acq32(volatile kmp_int32 *p,
576 kmp_int32 cv, kmp_int32 sv) {
577 return _InterlockedCompareExchange_acq((volatile long *)p, sv, cv) == cv;
578}
579
580inline kmp_int32 __kmp_compare_and_store_rel32(volatile kmp_int32 *p,
581 kmp_int32 cv, kmp_int32 sv) {
582 return _InterlockedCompareExchange_rel((volatile long *)p, sv, cv) == cv;
583}
584
585inline kmp_int32 __kmp_compare_and_store_acq64(volatile kmp_int64 *p,
586 kmp_int64 cv, kmp_int64 sv) {
587 return _InterlockedCompareExchange64_acq(p, sv, cv) == cv;
588}
589
590inline kmp_int32 __kmp_compare_and_store_rel64(volatile kmp_int64 *p,
591 kmp_int64 cv, kmp_int64 sv) {
592 return _InterlockedCompareExchange64_rel(p, sv, cv) == cv;
593}
594
595inline kmp_int32 __kmp_compare_and_store_ptr(void *volatile *p, void *cv,
596 void *sv) {
597 return _InterlockedCompareExchangePointer(p, sv, cv) == cv;
598}
599
600// The _RET versions return the value instead of a bool
601
602#define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
603 _InterlockedCompareExchange8((p), (sv), (cv))
604#define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
605 _InterlockedCompareExchange16((p), (sv), (cv))
606
607#define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
608 _InterlockedCompareExchange64((volatile kmp_int64 *)(p), (kmp_int64)(sv), \
609 (kmp_int64)(cv))
610
611
612#define KMP_XCHG_FIXED8(p, v) \
613 _InterlockedExchange8((volatile kmp_int8 *)(p), (kmp_int8)(v));
614#define KMP_XCHG_FIXED16(p, v) _InterlockedExchange16((p), (v));
615#define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
616
617inline kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v) {
618 kmp_int64 tmp = _InterlockedExchange64((volatile kmp_int64 *)p, *(kmp_int64
619 *)&v); return *(kmp_real64 *)&tmp;
620}
621
622#else // !KMP_ARCH_AARCH64
623
624// Routines that we still need to implement in assembly.
625extern kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 v);
626
627extern kmp_int8 __kmp_compare_and_store8(volatile kmp_int8 *p, kmp_int8 cv,
628 kmp_int8 sv);
629extern kmp_int16 __kmp_compare_and_store16(volatile kmp_int16 *p, kmp_int16 cv,
630 kmp_int16 sv);
631extern kmp_int32 __kmp_compare_and_store32(volatile kmp_int32 *p, kmp_int32 cv,
632 kmp_int32 sv);
633extern kmp_int32 __kmp_compare_and_store64(volatile kmp_int64 *p, kmp_int64 cv,
634 kmp_int64 sv);
635extern kmp_int8 __kmp_compare_and_store_ret8(volatile kmp_int8 *p, kmp_int8 cv,
636 kmp_int8 sv);
637extern kmp_int16 __kmp_compare_and_store_ret16(volatile kmp_int16 *p,
638 kmp_int16 cv, kmp_int16 sv);
639extern kmp_int32 __kmp_compare_and_store_ret32(volatile kmp_int32 *p,
640 kmp_int32 cv, kmp_int32 sv);
641extern kmp_int64 __kmp_compare_and_store_ret64(volatile kmp_int64 *p,
642 kmp_int64 cv, kmp_int64 sv);
643
644extern kmp_int8 __kmp_xchg_fixed8(volatile kmp_int8 *p, kmp_int8 v);
645extern kmp_int16 __kmp_xchg_fixed16(volatile kmp_int16 *p, kmp_int16 v);
646extern kmp_int32 __kmp_xchg_fixed32(volatile kmp_int32 *p, kmp_int32 v);
647extern kmp_int64 __kmp_xchg_fixed64(volatile kmp_int64 *p, kmp_int64 v);
648extern kmp_real32 __kmp_xchg_real32(volatile kmp_real32 *p, kmp_real32 v);
649extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
650
651//#define KMP_TEST_THEN_INC32(p) __kmp_test_then_add32((p), 1)
652//#define KMP_TEST_THEN_INC_ACQ32(p) __kmp_test_then_add32((p), 1)
653#define KMP_TEST_THEN_INC64(p) __kmp_test_then_add64((p), 1LL)
654#define KMP_TEST_THEN_INC_ACQ64(p) __kmp_test_then_add64((p), 1LL)
655//#define KMP_TEST_THEN_ADD4_32(p) __kmp_test_then_add32((p), 4)
656//#define KMP_TEST_THEN_ADD4_ACQ32(p) __kmp_test_then_add32((p), 4)
657#define KMP_TEST_THEN_ADD4_64(p) __kmp_test_then_add64((p), 4LL)
658#define KMP_TEST_THEN_ADD4_ACQ64(p) __kmp_test_then_add64((p), 4LL)
659//#define KMP_TEST_THEN_DEC32(p) __kmp_test_then_add32((p), -1)
660//#define KMP_TEST_THEN_DEC_ACQ32(p) __kmp_test_then_add32((p), -1)
661#define KMP_TEST_THEN_DEC64(p) __kmp_test_then_add64((p), -1LL)
662#define KMP_TEST_THEN_DEC_ACQ64(p) __kmp_test_then_add64((p), -1LL)
663//#define KMP_TEST_THEN_ADD32(p, v) __kmp_test_then_add32((p), (v))
664#define KMP_TEST_THEN_ADD8(p, v) __kmp_test_then_add8((p), (v))
665#define KMP_TEST_THEN_ADD64(p, v) __kmp_test_then_add64((p), (v))
666
667
668#define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
669 __kmp_compare_and_store8((p), (cv), (sv))
670#define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
671 __kmp_compare_and_store8((p), (cv), (sv))
672#define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
673 __kmp_compare_and_store16((p), (cv), (sv))
674#define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
675 __kmp_compare_and_store16((p), (cv), (sv))
676#define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
677 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
678 (kmp_int32)(sv))
679#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
680 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
681 (kmp_int32)(sv))
682#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
683 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
684 (kmp_int64)(sv))
685#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
686 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
687 (kmp_int64)(sv))
688
689#if KMP_ARCH_X86
690#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
691 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
692 (kmp_int32)(sv))
693#else /* 64 bit pointers */
694#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
695 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
696 (kmp_int64)(sv))
697#endif /* KMP_ARCH_X86 */
698
699#define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
700 __kmp_compare_and_store_ret8((p), (cv), (sv))
701#define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
702 __kmp_compare_and_store_ret16((p), (cv), (sv))
703#define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
704 __kmp_compare_and_store_ret64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
705 (kmp_int64)(sv))
706
707#define KMP_XCHG_FIXED8(p, v) \
708 __kmp_xchg_fixed8((volatile kmp_int8 *)(p), (kmp_int8)(v));
709#define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16((p), (v));
710//#define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32((p), (v));
711//#define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64((p), (v));
712//#define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32((p), (v));
713#define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
714#endif
715
716#elif (KMP_ASM_INTRINS && KMP_OS_UNIX) || !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
717
718/* cast p to correct type so that proper intrinsic will be used */
719#define KMP_TEST_THEN_INC32(p) \
720 __sync_fetch_and_add((volatile kmp_int32 *)(p), 1)
721#define KMP_TEST_THEN_INC_ACQ32(p) \
722 __sync_fetch_and_add((volatile kmp_int32 *)(p), 1)
723#if KMP_ARCH_MIPS
724#define KMP_TEST_THEN_INC64(p) \
725 __atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
726#define KMP_TEST_THEN_INC_ACQ64(p) \
727 __atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
728#else
729#define KMP_TEST_THEN_INC64(p) \
730 __sync_fetch_and_add((volatile kmp_int64 *)(p), 1LL)
731#define KMP_TEST_THEN_INC_ACQ64(p) \
732 __sync_fetch_and_add((volatile kmp_int64 *)(p), 1LL)
733#endif
734#define KMP_TEST_THEN_ADD4_32(p) \
735 __sync_fetch_and_add((volatile kmp_int32 *)(p), 4)
736#define KMP_TEST_THEN_ADD4_ACQ32(p) \
737 __sync_fetch_and_add((volatile kmp_int32 *)(p), 4)
738#if KMP_ARCH_MIPS
739#define KMP_TEST_THEN_ADD4_64(p) \
740 __atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST)
741#define KMP_TEST_THEN_ADD4_ACQ64(p) \
742 __atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST)
743#define KMP_TEST_THEN_DEC64(p) \
744 __atomic_fetch_sub((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
745#define KMP_TEST_THEN_DEC_ACQ64(p) \
746 __atomic_fetch_sub((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
747#else
748#define KMP_TEST_THEN_ADD4_64(p) \
749 __sync_fetch_and_add((volatile kmp_int64 *)(p), 4LL)
750#define KMP_TEST_THEN_ADD4_ACQ64(p) \
751 __sync_fetch_and_add((volatile kmp_int64 *)(p), 4LL)
752#define KMP_TEST_THEN_DEC64(p) \
753 __sync_fetch_and_sub((volatile kmp_int64 *)(p), 1LL)
754#define KMP_TEST_THEN_DEC_ACQ64(p) \
755 __sync_fetch_and_sub((volatile kmp_int64 *)(p), 1LL)
756#endif
757#define KMP_TEST_THEN_DEC32(p) \
758 __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1)
759#define KMP_TEST_THEN_DEC_ACQ32(p) \
760 __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1)
761#define KMP_TEST_THEN_ADD8(p, v) \
762 __sync_fetch_and_add((volatile kmp_int8 *)(p), (kmp_int8)(v))
763#define KMP_TEST_THEN_ADD32(p, v) \
764 __sync_fetch_and_add((volatile kmp_int32 *)(p), (kmp_int32)(v))
765#if KMP_ARCH_MIPS
766#define KMP_TEST_THEN_ADD64(p, v) \
767 __atomic_fetch_add((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
768 __ATOMIC_SEQ_CST)
769#else
770#define KMP_TEST_THEN_ADD64(p, v) \
771 __sync_fetch_and_add((volatile kmp_int64 *)(p), (kmp_int64)(v))
772#endif
773
774#define KMP_TEST_THEN_OR8(p, v) \
775 __sync_fetch_and_or((volatile kmp_int8 *)(p), (kmp_int8)(v))
776#define KMP_TEST_THEN_AND8(p, v) \
777 __sync_fetch_and_and((volatile kmp_int8 *)(p), (kmp_int8)(v))
778#define KMP_TEST_THEN_OR32(p, v) \
779 __sync_fetch_and_or((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
780#define KMP_TEST_THEN_AND32(p, v) \
781 __sync_fetch_and_and((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
782#if KMP_ARCH_MIPS
783#define KMP_TEST_THEN_OR64(p, v) \
784 __atomic_fetch_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
785 __ATOMIC_SEQ_CST)
786#define KMP_TEST_THEN_AND64(p, v) \
787 __atomic_fetch_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
788 __ATOMIC_SEQ_CST)
789#else
790#define KMP_TEST_THEN_OR64(p, v) \
791 __sync_fetch_and_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
792#define KMP_TEST_THEN_AND64(p, v) \
793 __sync_fetch_and_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
794#endif
795
796#define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
797 __sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
798 (kmp_uint8)(sv))
799#define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
800 __sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
801 (kmp_uint8)(sv))
802#define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
803 __sync_bool_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
804 (kmp_uint16)(sv))
805#define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
806 __sync_bool_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
807 (kmp_uint16)(sv))
808#define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
809 __sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
810 (kmp_uint32)(sv))
811#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
812 __sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
813 (kmp_uint32)(sv))
814#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
815 __sync_bool_compare_and_swap((void *volatile *)(p), (void *)(cv), \
816 (void *)(sv))
817
818#define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
819 __sync_val_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
820 (kmp_uint8)(sv))
821#define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
822 __sync_val_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
823 (kmp_uint16)(sv))
824#define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
825 __sync_val_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
826 (kmp_uint32)(sv))
827#if KMP_ARCH_MIPS
828static inline bool mips_sync_bool_compare_and_swap(volatile kmp_uint64 *p,
829 kmp_uint64 cv,
830 kmp_uint64 sv) {
831 return __atomic_compare_exchange(p, &cv, &sv, false, __ATOMIC_SEQ_CST,
832 __ATOMIC_SEQ_CST);
833}
834static inline bool mips_sync_val_compare_and_swap(volatile kmp_uint64 *p,
835 kmp_uint64 cv,
836 kmp_uint64 sv) {
837 __atomic_compare_exchange(p, &cv, &sv, false, __ATOMIC_SEQ_CST,
838 __ATOMIC_SEQ_CST);
839 return cv;
840}
841#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
842 mips_sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), \
843 (kmp_uint64)(cv), (kmp_uint64)(sv))
844#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
845 mips_sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), \
846 (kmp_uint64)(cv), (kmp_uint64)(sv))
847#define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
848 mips_sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
849 (kmp_uint64)(sv))
850#else
851#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
852 __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
853 (kmp_uint64)(sv))
854#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
855 __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
856 (kmp_uint64)(sv))
857#define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
858 __sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
859 (kmp_uint64)(sv))
860#endif
861
862#if KMP_OS_DARWIN && defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 1800
863#define KMP_XCHG_FIXED8(p, v) \
864 __atomic_exchange_1((volatile kmp_uint8 *)(p), (kmp_uint8)(v), \
865 __ATOMIC_SEQ_CST)
866#else
867#define KMP_XCHG_FIXED8(p, v) \
868 __sync_lock_test_and_set((volatile kmp_uint8 *)(p), (kmp_uint8)(v))
869#endif
870#define KMP_XCHG_FIXED16(p, v) \
871 __sync_lock_test_and_set((volatile kmp_uint16 *)(p), (kmp_uint16)(v))
872#define KMP_XCHG_FIXED32(p, v) \
873 __sync_lock_test_and_set((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
874#define KMP_XCHG_FIXED64(p, v) \
875 __sync_lock_test_and_set((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
876
877inline kmp_real32 KMP_XCHG_REAL32(volatile kmp_real32 *p, kmp_real32 v) {
878 volatile kmp_uint32 *up;
879 kmp_uint32 uv;
880 memcpy(&up, &p, sizeof(up));
881 memcpy(&uv, &v, sizeof(uv));
882 kmp_int32 tmp = __sync_lock_test_and_set(up, uv);
883 kmp_real32 ftmp;
884 memcpy(&ftmp, &tmp, sizeof(tmp));
885 return ftmp;
886}
887
888inline kmp_real64 KMP_XCHG_REAL64(volatile kmp_real64 *p, kmp_real64 v) {
889 volatile kmp_uint64 *up;
890 kmp_uint64 uv;
891 memcpy(&up, &p, sizeof(up));
892 memcpy(&uv, &v, sizeof(uv));
893 kmp_int64 tmp = __sync_lock_test_and_set(up, uv);
894 kmp_real64 dtmp;
895 memcpy(&dtmp, &tmp, sizeof(tmp));
896 return dtmp;
897}
898
899#else
900
901extern kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 v);
902extern kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 v);
903extern kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 v);
904extern kmp_int32 __kmp_test_then_add32(volatile kmp_int32 *p, kmp_int32 v);
905extern kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 v);
906extern kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 v);
907extern kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 v);
908extern kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 v);
909extern kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 v);
910
911extern kmp_int8 __kmp_compare_and_store8(volatile kmp_int8 *p, kmp_int8 cv,
912 kmp_int8 sv);
913extern kmp_int16 __kmp_compare_and_store16(volatile kmp_int16 *p, kmp_int16 cv,
914 kmp_int16 sv);
915extern kmp_int32 __kmp_compare_and_store32(volatile kmp_int32 *p, kmp_int32 cv,
916 kmp_int32 sv);
917extern kmp_int32 __kmp_compare_and_store64(volatile kmp_int64 *p, kmp_int64 cv,
918 kmp_int64 sv);
919extern kmp_int8 __kmp_compare_and_store_ret8(volatile kmp_int8 *p, kmp_int8 cv,
920 kmp_int8 sv);
921extern kmp_int16 __kmp_compare_and_store_ret16(volatile kmp_int16 *p,
922 kmp_int16 cv, kmp_int16 sv);
923extern kmp_int32 __kmp_compare_and_store_ret32(volatile kmp_int32 *p,
924 kmp_int32 cv, kmp_int32 sv);
925extern kmp_int64 __kmp_compare_and_store_ret64(volatile kmp_int64 *p,
926 kmp_int64 cv, kmp_int64 sv);
927
928extern kmp_int8 __kmp_xchg_fixed8(volatile kmp_int8 *p, kmp_int8 v);
929extern kmp_int16 __kmp_xchg_fixed16(volatile kmp_int16 *p, kmp_int16 v);
930extern kmp_int32 __kmp_xchg_fixed32(volatile kmp_int32 *p, kmp_int32 v);
931extern kmp_int64 __kmp_xchg_fixed64(volatile kmp_int64 *p, kmp_int64 v);
932extern kmp_real32 __kmp_xchg_real32(volatile kmp_real32 *p, kmp_real32 v);
933extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
934
935#define KMP_TEST_THEN_INC32(p) \
936 __kmp_test_then_add32((volatile kmp_int32 *)(p), 1)
937#define KMP_TEST_THEN_INC_ACQ32(p) \
938 __kmp_test_then_add32((volatile kmp_int32 *)(p), 1)
939#define KMP_TEST_THEN_INC64(p) \
940 __kmp_test_then_add64((volatile kmp_int64 *)(p), 1LL)
941#define KMP_TEST_THEN_INC_ACQ64(p) \
942 __kmp_test_then_add64((volatile kmp_int64 *)(p), 1LL)
943#define KMP_TEST_THEN_ADD4_32(p) \
944 __kmp_test_then_add32((volatile kmp_int32 *)(p), 4)
945#define KMP_TEST_THEN_ADD4_ACQ32(p) \
946 __kmp_test_then_add32((volatile kmp_int32 *)(p), 4)
947#define KMP_TEST_THEN_ADD4_64(p) \
948 __kmp_test_then_add64((volatile kmp_int64 *)(p), 4LL)
949#define KMP_TEST_THEN_ADD4_ACQ64(p) \
950 __kmp_test_then_add64((volatile kmp_int64 *)(p), 4LL)
951#define KMP_TEST_THEN_DEC32(p) \
952 __kmp_test_then_add32((volatile kmp_int32 *)(p), -1)
953#define KMP_TEST_THEN_DEC_ACQ32(p) \
954 __kmp_test_then_add32((volatile kmp_int32 *)(p), -1)
955#define KMP_TEST_THEN_DEC64(p) \
956 __kmp_test_then_add64((volatile kmp_int64 *)(p), -1LL)
957#define KMP_TEST_THEN_DEC_ACQ64(p) \
958 __kmp_test_then_add64((volatile kmp_int64 *)(p), -1LL)
959#define KMP_TEST_THEN_ADD8(p, v) \
960 __kmp_test_then_add8((volatile kmp_int8 *)(p), (kmp_int8)(v))
961#define KMP_TEST_THEN_ADD32(p, v) \
962 __kmp_test_then_add32((volatile kmp_int32 *)(p), (kmp_int32)(v))
963#define KMP_TEST_THEN_ADD64(p, v) \
964 __kmp_test_then_add64((volatile kmp_int64 *)(p), (kmp_int64)(v))
965
966#define KMP_TEST_THEN_OR8(p, v) \
967 __kmp_test_then_or8((volatile kmp_int8 *)(p), (kmp_int8)(v))
968#define KMP_TEST_THEN_AND8(p, v) \
969 __kmp_test_then_and8((volatile kmp_int8 *)(p), (kmp_int8)(v))
970#define KMP_TEST_THEN_OR32(p, v) \
971 __kmp_test_then_or32((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
972#define KMP_TEST_THEN_AND32(p, v) \
973 __kmp_test_then_and32((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
974#define KMP_TEST_THEN_OR64(p, v) \
975 __kmp_test_then_or64((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
976#define KMP_TEST_THEN_AND64(p, v) \
977 __kmp_test_then_and64((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
978
979#define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
980 __kmp_compare_and_store8((volatile kmp_int8 *)(p), (kmp_int8)(cv), \
981 (kmp_int8)(sv))
982#define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
983 __kmp_compare_and_store8((volatile kmp_int8 *)(p), (kmp_int8)(cv), \
984 (kmp_int8)(sv))
985#define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
986 __kmp_compare_and_store16((volatile kmp_int16 *)(p), (kmp_int16)(cv), \
987 (kmp_int16)(sv))
988#define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
989 __kmp_compare_and_store16((volatile kmp_int16 *)(p), (kmp_int16)(cv), \
990 (kmp_int16)(sv))
991#define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
992 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
993 (kmp_int32)(sv))
994#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
995 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
996 (kmp_int32)(sv))
997#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
998 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
999 (kmp_int64)(sv))
1000#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
1001 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
1002 (kmp_int64)(sv))
1003
1004#if KMP_ARCH_X86
1005#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
1006 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
1007 (kmp_int32)(sv))
1008#else /* 64 bit pointers */
1009#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
1010 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
1011 (kmp_int64)(sv))
1012#endif /* KMP_ARCH_X86 */
1013
1014#define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
1015 __kmp_compare_and_store_ret8((p), (cv), (sv))
1016#define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
1017 __kmp_compare_and_store_ret16((p), (cv), (sv))
1018#define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
1019 __kmp_compare_and_store_ret32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
1020 (kmp_int32)(sv))
1021#define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
1022 __kmp_compare_and_store_ret64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
1023 (kmp_int64)(sv))
1024
1025#define KMP_XCHG_FIXED8(p, v) \
1026 __kmp_xchg_fixed8((volatile kmp_int8 *)(p), (kmp_int8)(v));
1027#define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16((p), (v));
1028#define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32((p), (v));
1029#define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64((p), (v));
1030#define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32((p), (v));
1031#define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
1032
1033#endif /* KMP_ASM_INTRINS */
1034
1035/* ------------- relaxed consistency memory model stuff ------------------ */
1036
1037#if KMP_OS_WINDOWS
1038#ifdef __ABSOFT_WIN
1039#define KMP_MB() asm("nop")
1040#define KMP_IMB() asm("nop")
1041#else
1042#define KMP_MB() /* _asm{ nop } */
1043#define KMP_IMB() /* _asm{ nop } */
1044#endif
1045#endif /* KMP_OS_WINDOWS */
1046
1047#if KMP_ARCH_PPC64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS || \
1048 KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
1049 KMP_ARCH_VE || KMP_ARCH_S390X
1050#if KMP_OS_WINDOWS
1051#undef KMP_MB
1052#define KMP_MB() std::atomic_thread_fence(std::memory_order_seq_cst)
1053#else /* !KMP_OS_WINDOWS */
1054#define KMP_MB() __sync_synchronize()
1055#endif
1056#endif
1057
1058#ifndef KMP_MB
1059#define KMP_MB() /* nothing to do */
1060#endif
1061
1062#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1063#if KMP_MIC
1064// fence-style instructions do not exist, but lock; xaddl $0,(%rsp) can be used.
1065// We shouldn't need it, though, since the ABI rules require that
1066// * If the compiler generates NGO stores it also generates the fence
1067// * If users hand-code NGO stores they should insert the fence
1068// therefore no incomplete unordered stores should be visible.
1069#define KMP_MFENCE() /* Nothing */
1070#define KMP_SFENCE() /* Nothing */
1071#else
1072#if KMP_COMPILER_ICC || KMP_COMPILER_ICX
1073#define KMP_MFENCE_() _mm_mfence()
1074#define KMP_SFENCE_() _mm_sfence()
1075#elif KMP_COMPILER_MSVC
1076#define KMP_MFENCE_() MemoryBarrier()
1077#define KMP_SFENCE_() MemoryBarrier()
1078#else
1079#define KMP_MFENCE_() __sync_synchronize()
1080#define KMP_SFENCE_() __sync_synchronize()
1081#endif
1082#define KMP_MFENCE() \
1083 if (UNLIKELY(!__kmp_cpuinfo.initialized)) { \
1084 __kmp_query_cpuid(&__kmp_cpuinfo); \
1085 } \
1086 if (__kmp_cpuinfo.flags.sse2) { \
1087 KMP_MFENCE_(); \
1088 }
1089#define KMP_SFENCE() KMP_SFENCE_()
1090#endif
1091#else
1092#define KMP_MFENCE() KMP_MB()
1093#define KMP_SFENCE() KMP_MB()
1094#endif
1095
1096#ifndef KMP_IMB
1097#define KMP_IMB() /* nothing to do */
1098#endif
1099
1100#ifndef KMP_ST_REL32
1101#define KMP_ST_REL32(A, D) (*(A) = (D))
1102#endif
1103
1104#ifndef KMP_ST_REL64
1105#define KMP_ST_REL64(A, D) (*(A) = (D))
1106#endif
1107
1108#ifndef KMP_LD_ACQ32
1109#define KMP_LD_ACQ32(A) (*(A))
1110#endif
1111
1112#ifndef KMP_LD_ACQ64
1113#define KMP_LD_ACQ64(A) (*(A))
1114#endif
1115
1116/* ------------------------------------------------------------------------ */
1117// FIXME - maybe this should this be
1118//
1119// #define TCR_4(a) (*(volatile kmp_int32 *)(&a))
1120// #define TCW_4(a,b) (a) = (*(volatile kmp_int32 *)&(b))
1121//
1122// #define TCR_8(a) (*(volatile kmp_int64 *)(a))
1123// #define TCW_8(a,b) (a) = (*(volatile kmp_int64 *)(&b))
1124//
1125// I'm fairly certain this is the correct thing to do, but I'm afraid
1126// of performance regressions.
1127
1128#define TCR_1(a) (a)
1129#define TCW_1(a, b) (a) = (b)
1130#define TCR_4(a) (a)
1131#define TCW_4(a, b) (a) = (b)
1132#define TCI_4(a) (++(a))
1133#define TCD_4(a) (--(a))
1134#define TCR_8(a) (a)
1135#define TCW_8(a, b) (a) = (b)
1136#define TCI_8(a) (++(a))
1137#define TCD_8(a) (--(a))
1138#define TCR_SYNC_4(a) (a)
1139#define TCW_SYNC_4(a, b) (a) = (b)
1140#define TCX_SYNC_4(a, b, c) \
1141 KMP_COMPARE_AND_STORE_REL32((volatile kmp_int32 *)(volatile void *)&(a), \
1142 (kmp_int32)(b), (kmp_int32)(c))
1143#define TCR_SYNC_8(a) (a)
1144#define TCW_SYNC_8(a, b) (a) = (b)
1145#define TCX_SYNC_8(a, b, c) \
1146 KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)(volatile void *)&(a), \
1147 (kmp_int64)(b), (kmp_int64)(c))
1148
1149#if KMP_ARCH_X86 || KMP_ARCH_MIPS
1150// What about ARM?
1151#define TCR_PTR(a) ((void *)TCR_4(a))
1152#define TCW_PTR(a, b) TCW_4((a), (b))
1153#define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_4(a))
1154#define TCW_SYNC_PTR(a, b) TCW_SYNC_4((a), (b))
1155#define TCX_SYNC_PTR(a, b, c) ((void *)TCX_SYNC_4((a), (b), (c)))
1156
1157#else /* 64 bit pointers */
1158
1159#define TCR_PTR(a) ((void *)TCR_8(a))
1160#define TCW_PTR(a, b) TCW_8((a), (b))
1161#define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_8(a))
1162#define TCW_SYNC_PTR(a, b) TCW_SYNC_8((a), (b))
1163#define TCX_SYNC_PTR(a, b, c) ((void *)TCX_SYNC_8((a), (b), (c)))
1164
1165#endif /* KMP_ARCH_X86 */
1166
1167/* If these FTN_{TRUE,FALSE} values change, may need to change several places
1168 where they are used to check that language is Fortran, not C. */
1169
1170#ifndef FTN_TRUE
1171#define FTN_TRUE TRUE
1172#endif
1173
1174#ifndef FTN_FALSE
1175#define FTN_FALSE FALSE
1176#endif
1177
1178typedef void (*microtask_t)(int *gtid, int *npr, ...);
1179
1180#ifdef USE_VOLATILE_CAST
1181#define VOLATILE_CAST(x) (volatile x)
1182#else
1183#define VOLATILE_CAST(x) (x)
1184#endif
1185
1186#define KMP_WAIT __kmp_wait_4
1187#define KMP_WAIT_PTR __kmp_wait_4_ptr
1188#define KMP_EQ __kmp_eq_4
1189#define KMP_NEQ __kmp_neq_4
1190#define KMP_LT __kmp_lt_4
1191#define KMP_GE __kmp_ge_4
1192#define KMP_LE __kmp_le_4
1193
1194/* Workaround for Intel(R) 64 code gen bug when taking address of static array
1195 * (Intel(R) 64 Tracker #138) */
1196#if (KMP_ARCH_X86_64 || KMP_ARCH_PPC64) && KMP_OS_LINUX
1197#define STATIC_EFI2_WORKAROUND
1198#else
1199#define STATIC_EFI2_WORKAROUND static
1200#endif
1201
1202// Support of BGET usage
1203#ifndef KMP_USE_BGET
1204#define KMP_USE_BGET 1
1205#endif
1206
1207// Switches for OSS builds
1208#ifndef USE_CMPXCHG_FIX
1209#define USE_CMPXCHG_FIX 1
1210#endif
1211
1212// Enable dynamic user lock
1213#define KMP_USE_DYNAMIC_LOCK 1
1214
1215// Enable Intel(R) Transactional Synchronization Extensions (Intel(R) TSX) if
1216// dynamic user lock is turned on
1217#if KMP_USE_DYNAMIC_LOCK
1218// Visual studio can't handle the asm sections in this code
1219#define KMP_USE_TSX (KMP_ARCH_X86 || KMP_ARCH_X86_64) && !KMP_COMPILER_MSVC
1220#ifdef KMP_USE_ADAPTIVE_LOCKS
1221#undef KMP_USE_ADAPTIVE_LOCKS
1222#endif
1223#define KMP_USE_ADAPTIVE_LOCKS KMP_USE_TSX
1224#endif
1225
1226// Enable tick time conversion of ticks to seconds
1227#if KMP_STATS_ENABLED
1228#define KMP_HAVE_TICK_TIME \
1229 (KMP_OS_LINUX && (KMP_MIC || KMP_ARCH_X86 || KMP_ARCH_X86_64))
1230#endif
1231
1232// Warning levels
1233enum kmp_warnings_level {
1234 kmp_warnings_off = 0, /* No warnings */
1235 kmp_warnings_low, /* Minimal warnings (default) */
1236 kmp_warnings_explicit = 6, /* Explicitly set to ON - more warnings */
1237 kmp_warnings_verbose /* reserved */
1238};
1239
1240#ifdef __cplusplus
1241} // extern "C"
1242#endif // __cplusplus
1243
1244// Safe C API
1245#include "kmp_safe_c_api.h"
1246
1247// Macros for C++11 atomic functions
1248#define KMP_ATOMIC_LD(p, order) (p)->load(std::memory_order_##order)
1249#define KMP_ATOMIC_OP(op, p, v, order) (p)->op(v, std::memory_order_##order)
1250
1251// For non-default load/store
1252#define KMP_ATOMIC_LD_ACQ(p) KMP_ATOMIC_LD(p, acquire)
1253#define KMP_ATOMIC_LD_RLX(p) KMP_ATOMIC_LD(p, relaxed)
1254#define KMP_ATOMIC_ST_REL(p, v) KMP_ATOMIC_OP(store, p, v, release)
1255#define KMP_ATOMIC_ST_RLX(p, v) KMP_ATOMIC_OP(store, p, v, relaxed)
1256
1257// For non-default fetch_<op>
1258#define KMP_ATOMIC_ADD(p, v) KMP_ATOMIC_OP(fetch_add, p, v, acq_rel)
1259#define KMP_ATOMIC_SUB(p, v) KMP_ATOMIC_OP(fetch_sub, p, v, acq_rel)
1260#define KMP_ATOMIC_AND(p, v) KMP_ATOMIC_OP(fetch_and, p, v, acq_rel)
1261#define KMP_ATOMIC_OR(p, v) KMP_ATOMIC_OP(fetch_or, p, v, acq_rel)
1262#define KMP_ATOMIC_INC(p) KMP_ATOMIC_OP(fetch_add, p, 1, acq_rel)
1263#define KMP_ATOMIC_DEC(p) KMP_ATOMIC_OP(fetch_sub, p, 1, acq_rel)
1264#define KMP_ATOMIC_ADD_RLX(p, v) KMP_ATOMIC_OP(fetch_add, p, v, relaxed)
1265#define KMP_ATOMIC_INC_RLX(p) KMP_ATOMIC_OP(fetch_add, p, 1, relaxed)
1266
1267// Callers of the following functions cannot see the side effect on "expected".
1268template <typename T>
1269bool __kmp_atomic_compare_store(std::atomic<T> *p, T expected, T desired) {
1270 return p->compare_exchange_strong(
1271 expected, desired, std::memory_order_acq_rel, std::memory_order_relaxed);
1272}
1273
1274template <typename T>
1275bool __kmp_atomic_compare_store_acq(std::atomic<T> *p, T expected, T desired) {
1276 return p->compare_exchange_strong(
1277 expected, desired, std::memory_order_acquire, std::memory_order_relaxed);
1278}
1279
1280template <typename T>
1281bool __kmp_atomic_compare_store_rel(std::atomic<T> *p, T expected, T desired) {
1282 return p->compare_exchange_strong(
1283 expected, desired, std::memory_order_release, std::memory_order_relaxed);
1284}
1285
1286// Symbol lookup on Linux/Windows
1287#if KMP_OS_WINDOWS
1288extern void *__kmp_lookup_symbol(const char *name, bool next = false);
1289#define KMP_DLSYM(name) __kmp_lookup_symbol(name)
1290#define KMP_DLSYM_NEXT(name) __kmp_lookup_symbol(name, true)
1291#else
1292#define KMP_DLSYM(name) dlsym(RTLD_DEFAULT, name)
1293#define KMP_DLSYM_NEXT(name) dlsym(RTLD_NEXT, name)
1294#endif
1295
1296#endif /* KMP_OS_H */