LLVM OpenMP* Runtime Library
Loading...
Searching...
No Matches
kmp_os.h
1/*
2 * kmp_os.h -- KPTS runtime header file.
3 */
4
5//===----------------------------------------------------------------------===//
6//
7// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8// See https://llvm.org/LICENSE.txt for license information.
9// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef KMP_OS_H
14#define KMP_OS_H
15
16#include "kmp_config.h"
17#include <atomic>
18#include <stdarg.h>
19#include <stdlib.h>
20#include <string.h>
21
22#define KMP_FTN_PLAIN 1
23#define KMP_FTN_APPEND 2
24#define KMP_FTN_UPPER 3
25/*
26#define KMP_FTN_PREPEND 4
27#define KMP_FTN_UAPPEND 5
28*/
29
30#define KMP_PTR_SKIP (sizeof(void *))
31
32/* -------------------------- Compiler variations ------------------------ */
33
34#define KMP_OFF 0
35#define KMP_ON 1
36
37#define KMP_MEM_CONS_VOLATILE 0
38#define KMP_MEM_CONS_FENCE 1
39
40#ifndef KMP_MEM_CONS_MODEL
41#define KMP_MEM_CONS_MODEL KMP_MEM_CONS_VOLATILE
42#endif
43
44#ifndef __has_cpp_attribute
45#define __has_cpp_attribute(x) 0
46#endif
47
48#ifndef __has_attribute
49#define __has_attribute(x) 0
50#endif
51
52/* ------------------------- Compiler recognition ---------------------- */
53#define KMP_COMPILER_ICC 0
54#define KMP_COMPILER_GCC 0
55#define KMP_COMPILER_CLANG 0
56#define KMP_COMPILER_MSVC 0
57#define KMP_COMPILER_ICX 0
58
59#if __INTEL_CLANG_COMPILER
60#undef KMP_COMPILER_ICX
61#define KMP_COMPILER_ICX 1
62#elif defined(__INTEL_COMPILER)
63#undef KMP_COMPILER_ICC
64#define KMP_COMPILER_ICC 1
65#elif defined(__clang__)
66#undef KMP_COMPILER_CLANG
67#define KMP_COMPILER_CLANG 1
68#elif defined(__GNUC__)
69#undef KMP_COMPILER_GCC
70#define KMP_COMPILER_GCC 1
71#elif defined(_MSC_VER)
72#undef KMP_COMPILER_MSVC
73#define KMP_COMPILER_MSVC 1
74#else
75#error Unknown compiler
76#endif
77
78#if (KMP_OS_LINUX || KMP_OS_WINDOWS || KMP_OS_FREEBSD) && !KMP_OS_WASI
79#define KMP_AFFINITY_SUPPORTED 1
80#if KMP_OS_WINDOWS && KMP_ARCH_X86_64
81#define KMP_GROUP_AFFINITY 1
82#else
83#define KMP_GROUP_AFFINITY 0
84#endif
85#else
86#define KMP_AFFINITY_SUPPORTED 0
87#define KMP_GROUP_AFFINITY 0
88#endif
89
90#if (KMP_OS_LINUX || (KMP_OS_FREEBSD && __FreeBSD_version >= 1301000))
91#define KMP_HAVE_SCHED_GETCPU 1
92#else
93#define KMP_HAVE_SCHED_GETCPU 0
94#endif
95
96/* Check for quad-precision extension. */
97#define KMP_HAVE_QUAD 0
98#if KMP_ARCH_X86 || KMP_ARCH_X86_64
99#if KMP_COMPILER_ICC || KMP_COMPILER_ICX
100/* _Quad is already defined for icc */
101#undef KMP_HAVE_QUAD
102#define KMP_HAVE_QUAD 1
103#elif KMP_COMPILER_CLANG
104/* Clang doesn't support a software-implemented
105 128-bit extended precision type yet */
106typedef long double _Quad;
107#elif KMP_COMPILER_GCC
108/* GCC on NetBSD lacks __multc3/__divtc3 builtins needed for quad until
109 NetBSD 10.0 which ships with GCC 10.5 */
110#if (!KMP_OS_NETBSD || __GNUC__ >= 10)
111typedef __float128 _Quad;
112#undef KMP_HAVE_QUAD
113#define KMP_HAVE_QUAD 1
114#endif
115#elif KMP_COMPILER_MSVC
116typedef long double _Quad;
117#endif
118#else
119#if __LDBL_MAX_EXP__ >= 16384 && KMP_COMPILER_GCC
120typedef long double _Quad;
121#undef KMP_HAVE_QUAD
122#define KMP_HAVE_QUAD 1
123#endif
124#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
125
126#define KMP_USE_X87CONTROL 0
127#if KMP_OS_WINDOWS
128#define KMP_END_OF_LINE "\r\n"
129typedef char kmp_int8;
130typedef unsigned char kmp_uint8;
131typedef short kmp_int16;
132typedef unsigned short kmp_uint16;
133typedef int kmp_int32;
134typedef unsigned int kmp_uint32;
135#define KMP_INT32_SPEC "d"
136#define KMP_UINT32_SPEC "u"
137#ifndef KMP_STRUCT64
138typedef __int64 kmp_int64;
139typedef unsigned __int64 kmp_uint64;
140#define KMP_INT64_SPEC "I64d"
141#define KMP_UINT64_SPEC "I64u"
142#else
143struct kmp_struct64 {
144 kmp_int32 a, b;
145};
146typedef struct kmp_struct64 kmp_int64;
147typedef struct kmp_struct64 kmp_uint64;
148/* Not sure what to use for KMP_[U]INT64_SPEC here */
149#endif
150#if KMP_ARCH_X86 && KMP_MSVC_COMPAT
151#undef KMP_USE_X87CONTROL
152#define KMP_USE_X87CONTROL 1
153#endif
154#if KMP_ARCH_X86_64 || KMP_ARCH_AARCH64
155#define KMP_INTPTR 1
156typedef __int64 kmp_intptr_t;
157typedef unsigned __int64 kmp_uintptr_t;
158#define KMP_INTPTR_SPEC "I64d"
159#define KMP_UINTPTR_SPEC "I64u"
160#endif
161#endif /* KMP_OS_WINDOWS */
162
163#if KMP_OS_UNIX
164#define KMP_END_OF_LINE "\n"
165typedef char kmp_int8;
166typedef unsigned char kmp_uint8;
167typedef short kmp_int16;
168typedef unsigned short kmp_uint16;
169typedef int kmp_int32;
170typedef unsigned int kmp_uint32;
171typedef long long kmp_int64;
172typedef unsigned long long kmp_uint64;
173#define KMP_INT32_SPEC "d"
174#define KMP_UINT32_SPEC "u"
175#define KMP_INT64_SPEC "lld"
176#define KMP_UINT64_SPEC "llu"
177#endif /* KMP_OS_UNIX */
178
179#if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS || KMP_ARCH_WASM || \
180 KMP_ARCH_PPC
181#define KMP_SIZE_T_SPEC KMP_UINT32_SPEC
182#elif KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 || \
183 KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
184 KMP_ARCH_VE || KMP_ARCH_S390X
185#define KMP_SIZE_T_SPEC KMP_UINT64_SPEC
186#else
187#error "Can't determine size_t printf format specifier."
188#endif
189
190#if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_WASM || KMP_ARCH_PPC
191#define KMP_SIZE_T_MAX (0xFFFFFFFF)
192#else
193#define KMP_SIZE_T_MAX (0xFFFFFFFFFFFFFFFF)
194#endif
195
196typedef size_t kmp_size_t;
197typedef float kmp_real32;
198typedef double kmp_real64;
199
200#ifndef KMP_INTPTR
201#define KMP_INTPTR 1
202typedef long kmp_intptr_t;
203typedef unsigned long kmp_uintptr_t;
204#define KMP_INTPTR_SPEC "ld"
205#define KMP_UINTPTR_SPEC "lu"
206#endif
207
208#ifdef BUILD_I8
209typedef kmp_int64 kmp_int;
210typedef kmp_uint64 kmp_uint;
211#else
212typedef kmp_int32 kmp_int;
213typedef kmp_uint32 kmp_uint;
214#endif /* BUILD_I8 */
215#define KMP_INT_MAX ((kmp_int32)0x7FFFFFFF)
216#define KMP_INT_MIN ((kmp_int32)0x80000000)
217
218// stdarg handling
219#if (KMP_ARCH_ARM || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64 || KMP_ARCH_WASM) && \
220 (KMP_OS_FREEBSD || KMP_OS_LINUX || KMP_OS_WASI)
221typedef va_list *kmp_va_list;
222#define kmp_va_deref(ap) (*(ap))
223#define kmp_va_addr_of(ap) (&(ap))
224#else
225typedef va_list kmp_va_list;
226#define kmp_va_deref(ap) (ap)
227#define kmp_va_addr_of(ap) (ap)
228#endif
229
230#ifdef __cplusplus
231// macros to cast out qualifiers and to re-interpret types
232#define CCAST(type, var) const_cast<type>(var)
233#define RCAST(type, var) reinterpret_cast<type>(var)
234//-------------------------------------------------------------------------
235// template for debug prints specification ( d, u, lld, llu ), and to obtain
236// signed/unsigned flavors of a type
237template <typename T> struct traits_t {};
238// int
239template <> struct traits_t<signed int> {
240 typedef signed int signed_t;
241 typedef unsigned int unsigned_t;
242 typedef double floating_t;
243 static char const *spec;
244 static const signed_t max_value = 0x7fffffff;
245 static const signed_t min_value = 0x80000000;
246 static const int type_size = sizeof(signed_t);
247};
248// unsigned int
249template <> struct traits_t<unsigned int> {
250 typedef signed int signed_t;
251 typedef unsigned int unsigned_t;
252 typedef double floating_t;
253 static char const *spec;
254 static const unsigned_t max_value = 0xffffffff;
255 static const unsigned_t min_value = 0x00000000;
256 static const int type_size = sizeof(unsigned_t);
257};
258// long
259template <> struct traits_t<signed long> {
260 typedef signed long signed_t;
261 typedef unsigned long unsigned_t;
262 typedef long double floating_t;
263 static char const *spec;
264 static const int type_size = sizeof(signed_t);
265};
266// long long
267template <> struct traits_t<signed long long> {
268 typedef signed long long signed_t;
269 typedef unsigned long long unsigned_t;
270 typedef long double floating_t;
271 static char const *spec;
272 static const signed_t max_value = 0x7fffffffffffffffLL;
273 static const signed_t min_value = 0x8000000000000000LL;
274 static const int type_size = sizeof(signed_t);
275};
276// unsigned long long
277template <> struct traits_t<unsigned long long> {
278 typedef signed long long signed_t;
279 typedef unsigned long long unsigned_t;
280 typedef long double floating_t;
281 static char const *spec;
282 static const unsigned_t max_value = 0xffffffffffffffffLL;
283 static const unsigned_t min_value = 0x0000000000000000LL;
284 static const int type_size = sizeof(unsigned_t);
285};
286//-------------------------------------------------------------------------
287#else
288#define CCAST(type, var) (type)(var)
289#define RCAST(type, var) (type)(var)
290#endif // __cplusplus
291
292#define KMP_EXPORT extern /* export declaration in guide libraries */
293
294#if __GNUC__ >= 4 && !defined(__MINGW32__)
295#define __forceinline __inline
296#endif
297
298/* Check if the OS/arch can support user-level mwait */
299// All mwait code tests for UMWAIT first, so it should only fall back to ring3
300// MWAIT for KNL.
301#define KMP_HAVE_MWAIT \
302 ((KMP_ARCH_X86 || KMP_ARCH_X86_64) && (KMP_OS_LINUX || KMP_OS_WINDOWS) && \
303 !KMP_MIC2)
304#define KMP_HAVE_UMWAIT \
305 ((KMP_ARCH_X86 || KMP_ARCH_X86_64) && (KMP_OS_LINUX || KMP_OS_WINDOWS) && \
306 !KMP_MIC)
307
308#if KMP_OS_WINDOWS
309// Don't include everything related to NT status code, we'll do that explicitly
310#define WIN32_NO_STATUS
311#include <windows.h>
312
313static inline int KMP_GET_PAGE_SIZE(void) {
314 SYSTEM_INFO si;
315 GetSystemInfo(&si);
316 return si.dwPageSize;
317}
318#else
319#define KMP_GET_PAGE_SIZE() getpagesize()
320#endif
321
322#define PAGE_ALIGNED(_addr) \
323 (!((size_t)_addr & (size_t)(KMP_GET_PAGE_SIZE() - 1)))
324#define ALIGN_TO_PAGE(x) \
325 (void *)(((size_t)(x)) & ~((size_t)(KMP_GET_PAGE_SIZE() - 1)))
326
327/* ---------- Support for cache alignment, padding, etc. ----------------*/
328
329#ifdef __cplusplus
330extern "C" {
331#endif // __cplusplus
332
333#define INTERNODE_CACHE_LINE 4096 /* for multi-node systems */
334
335/* Define the default size of the cache line */
336#ifndef CACHE_LINE
337#define CACHE_LINE 128 /* cache line size in bytes */
338#else
339#if (CACHE_LINE < 64) && !defined(KMP_OS_DARWIN)
340// 2006-02-13: This produces too many warnings on OS X*. Disable for now
341#warning CACHE_LINE is too small.
342#endif
343#endif /* CACHE_LINE */
344
345#define KMP_CACHE_PREFETCH(ADDR) /* nothing */
346
347// Define attribute that indicates that the fall through from the previous
348// case label is intentional and should not be diagnosed by a compiler
349// Code from libcxx/include/__config
350// Use a function like macro to imply that it must be followed by a semicolon
351#if __cplusplus > 201402L && __has_cpp_attribute(fallthrough)
352#define KMP_FALLTHROUGH() [[fallthrough]]
353// icc cannot properly tell this attribute is absent so force off
354#elif KMP_COMPILER_ICC
355#define KMP_FALLTHROUGH() ((void)0)
356#elif __has_cpp_attribute(clang::fallthrough)
357#define KMP_FALLTHROUGH() [[clang::fallthrough]]
358#elif __has_attribute(fallthrough) || __GNUC__ >= 7
359#define KMP_FALLTHROUGH() __attribute__((__fallthrough__))
360#else
361#define KMP_FALLTHROUGH() ((void)0)
362#endif
363
364#if KMP_HAVE_ATTRIBUTE_WAITPKG
365#define KMP_ATTRIBUTE_TARGET_WAITPKG __attribute__((target("waitpkg")))
366#else
367#define KMP_ATTRIBUTE_TARGET_WAITPKG /* Nothing */
368#endif
369
370#if KMP_HAVE_ATTRIBUTE_RTM
371#define KMP_ATTRIBUTE_TARGET_RTM __attribute__((target("rtm")))
372#else
373#define KMP_ATTRIBUTE_TARGET_RTM /* Nothing */
374#endif
375
376// Define attribute that indicates a function does not return
377#if __cplusplus >= 201103L
378#define KMP_NORETURN [[noreturn]]
379#elif KMP_OS_WINDOWS
380#define KMP_NORETURN __declspec(noreturn)
381#else
382#define KMP_NORETURN __attribute__((noreturn))
383#endif
384
385#if KMP_OS_WINDOWS && KMP_MSVC_COMPAT
386#define KMP_ALIGN(bytes) __declspec(align(bytes))
387#define KMP_THREAD_LOCAL __declspec(thread)
388#define KMP_ALIAS /* Nothing */
389#else
390#define KMP_ALIGN(bytes) __attribute__((aligned(bytes)))
391#define KMP_THREAD_LOCAL __thread
392#define KMP_ALIAS(alias_of) __attribute__((alias(alias_of)))
393#endif
394
395#if KMP_HAVE_WEAK_ATTRIBUTE && !KMP_DYNAMIC_LIB
396#define KMP_WEAK_ATTRIBUTE_EXTERNAL __attribute__((weak))
397#else
398#define KMP_WEAK_ATTRIBUTE_EXTERNAL /* Nothing */
399#endif
400
401#if KMP_HAVE_WEAK_ATTRIBUTE
402#define KMP_WEAK_ATTRIBUTE_INTERNAL __attribute__((weak))
403#else
404#define KMP_WEAK_ATTRIBUTE_INTERNAL /* Nothing */
405#endif
406
407// Define KMP_VERSION_SYMBOL and KMP_EXPAND_NAME
408#ifndef KMP_STR
409#define KMP_STR(x) _KMP_STR(x)
410#define _KMP_STR(x) #x
411#endif
412
413#ifdef KMP_USE_VERSION_SYMBOLS
414// If using versioned symbols, KMP_EXPAND_NAME prepends
415// __kmp_api_ to the real API name
416#define KMP_EXPAND_NAME(api_name) _KMP_EXPAND_NAME(api_name)
417#define _KMP_EXPAND_NAME(api_name) __kmp_api_##api_name
418#define KMP_VERSION_SYMBOL(api_name, ver_num, ver_str) \
419 _KMP_VERSION_SYMBOL(api_name, ver_num, ver_str, "VERSION")
420#define _KMP_VERSION_SYMBOL(api_name, ver_num, ver_str, default_ver) \
421 __typeof__(__kmp_api_##api_name) __kmp_api_##api_name##_##ver_num##_alias \
422 __attribute__((alias(KMP_STR(__kmp_api_##api_name)))); \
423 __asm__( \
424 ".symver " KMP_STR(__kmp_api_##api_name##_##ver_num##_alias) "," KMP_STR( \
425 api_name) "@" ver_str "\n\t"); \
426 __asm__(".symver " KMP_STR(__kmp_api_##api_name) "," KMP_STR( \
427 api_name) "@@" default_ver "\n\t")
428
429#define KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str) \
430 _KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str, "VERSION")
431#define _KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, ver_str, \
432 default_ver) \
433 __typeof__(__kmp_api_##apic_name) __kmp_api_##apic_name##_##ver_num##_alias \
434 __attribute__((alias(KMP_STR(__kmp_api_##apic_name)))); \
435 __asm__(".symver " KMP_STR(__kmp_api_##apic_name) "," KMP_STR( \
436 apic_name) "@@" default_ver "\n\t"); \
437 __asm__( \
438 ".symver " KMP_STR(__kmp_api_##apic_name##_##ver_num##_alias) "," KMP_STR( \
439 api_name) "@" ver_str "\n\t")
440
441#else // KMP_USE_VERSION_SYMBOLS
442#define KMP_EXPAND_NAME(api_name) api_name
443#define KMP_VERSION_SYMBOL(api_name, ver_num, ver_str) /* Nothing */
444#define KMP_VERSION_OMPC_SYMBOL(apic_name, api_name, ver_num, \
445 ver_str) /* Nothing */
446#endif // KMP_USE_VERSION_SYMBOLS
447
448/* Temporary note: if performance testing of this passes, we can remove
449 all references to KMP_DO_ALIGN and replace with KMP_ALIGN. */
450#define KMP_DO_ALIGN(bytes) KMP_ALIGN(bytes)
451#define KMP_ALIGN_CACHE KMP_ALIGN(CACHE_LINE)
452#define KMP_ALIGN_CACHE_INTERNODE KMP_ALIGN(INTERNODE_CACHE_LINE)
453
454/* General purpose fence types for memory operations */
455enum kmp_mem_fence_type {
456 kmp_no_fence, /* No memory fence */
457 kmp_acquire_fence, /* Acquire (read) memory fence */
458 kmp_release_fence, /* Release (write) memory fence */
459 kmp_full_fence /* Full (read+write) memory fence */
460};
461
462// Synchronization primitives
463
464#if KMP_ASM_INTRINS && KMP_OS_WINDOWS && !((KMP_ARCH_AARCH64 || KMP_ARCH_ARM) && (KMP_COMPILER_CLANG || KMP_COMPILER_GCC))
465
466#if KMP_MSVC_COMPAT && !KMP_COMPILER_CLANG
467#pragma intrinsic(InterlockedExchangeAdd)
468#pragma intrinsic(InterlockedCompareExchange)
469#pragma intrinsic(InterlockedExchange)
470#if !KMP_32_BIT_ARCH
471#pragma intrinsic(InterlockedExchange64)
472#endif
473#endif
474
475// Using InterlockedIncrement / InterlockedDecrement causes a library loading
476// ordering problem, so we use InterlockedExchangeAdd instead.
477#define KMP_TEST_THEN_INC32(p) InterlockedExchangeAdd((volatile long *)(p), 1)
478#define KMP_TEST_THEN_INC_ACQ32(p) \
479 InterlockedExchangeAdd((volatile long *)(p), 1)
480#define KMP_TEST_THEN_ADD4_32(p) InterlockedExchangeAdd((volatile long *)(p), 4)
481#define KMP_TEST_THEN_ADD4_ACQ32(p) \
482 InterlockedExchangeAdd((volatile long *)(p), 4)
483#define KMP_TEST_THEN_DEC32(p) InterlockedExchangeAdd((volatile long *)(p), -1)
484#define KMP_TEST_THEN_DEC_ACQ32(p) \
485 InterlockedExchangeAdd((volatile long *)(p), -1)
486#define KMP_TEST_THEN_ADD32(p, v) \
487 InterlockedExchangeAdd((volatile long *)(p), (v))
488
489#define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
490 InterlockedCompareExchange((volatile long *)(p), (long)(sv), (long)(cv))
491
492#define KMP_XCHG_FIXED32(p, v) \
493 InterlockedExchange((volatile long *)(p), (long)(v))
494#define KMP_XCHG_FIXED64(p, v) \
495 InterlockedExchange64((volatile kmp_int64 *)(p), (kmp_int64)(v))
496
497inline kmp_real32 KMP_XCHG_REAL32(volatile kmp_real32 *p, kmp_real32 v) {
498 kmp_int32 tmp = InterlockedExchange((volatile long *)p, *(long *)&v);
499 return *(kmp_real32 *)&tmp;
500}
501
502#define KMP_TEST_THEN_OR8(p, v) __kmp_test_then_or8((p), (v))
503#define KMP_TEST_THEN_AND8(p, v) __kmp_test_then_and8((p), (v))
504#define KMP_TEST_THEN_OR32(p, v) __kmp_test_then_or32((p), (v))
505#define KMP_TEST_THEN_AND32(p, v) __kmp_test_then_and32((p), (v))
506#define KMP_TEST_THEN_OR64(p, v) __kmp_test_then_or64((p), (v))
507#define KMP_TEST_THEN_AND64(p, v) __kmp_test_then_and64((p), (v))
508
509extern kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 v);
510extern kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 v);
511extern kmp_int32 __kmp_test_then_add32(volatile kmp_int32 *p, kmp_int32 v);
512extern kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 v);
513extern kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 v);
514extern kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 v);
515extern kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 v);
516extern kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 v);
517
518#if KMP_ARCH_AARCH64 && KMP_COMPILER_MSVC && !KMP_COMPILER_CLANG
519#define KMP_TEST_THEN_INC64(p) _InterlockedExchangeAdd64((p), 1LL)
520#define KMP_TEST_THEN_INC_ACQ64(p) _InterlockedExchangeAdd64_acq((p), 1LL)
521#define KMP_TEST_THEN_ADD4_64(p) _InterlockedExchangeAdd64((p), 4LL)
522// #define KMP_TEST_THEN_ADD4_ACQ64(p) _InterlockedExchangeAdd64_acq((p), 4LL)
523// #define KMP_TEST_THEN_DEC64(p) _InterlockedExchangeAdd64((p), -1LL)
524// #define KMP_TEST_THEN_DEC_ACQ64(p) _InterlockedExchangeAdd64_acq((p), -1LL)
525// #define KMP_TEST_THEN_ADD8(p, v) _InterlockedExchangeAdd8((p), (v))
526#define KMP_TEST_THEN_ADD64(p, v) _InterlockedExchangeAdd64((p), (v))
527
528#define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
529 __kmp_compare_and_store_acq8((p), (cv), (sv))
530#define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
531 __kmp_compare_and_store_rel8((p), (cv), (sv))
532#define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
533 __kmp_compare_and_store_acq16((p), (cv), (sv))
534/*
535#define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
536 __kmp_compare_and_store_rel16((p), (cv), (sv))
537*/
538#define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
539 __kmp_compare_and_store_acq32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
540 (kmp_int32)(sv))
541#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
542 __kmp_compare_and_store_rel32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
543 (kmp_int32)(sv))
544#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
545 __kmp_compare_and_store_acq64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
546 (kmp_int64)(sv))
547#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
548 __kmp_compare_and_store_rel64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
549 (kmp_int64)(sv))
550#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
551 __kmp_compare_and_store_ptr((void *volatile *)(p), (void *)(cv), (void *)(sv))
552
553// KMP_COMPARE_AND_STORE expects this order: pointer, compare, exchange
554// _InterlockedCompareExchange expects this order: pointer, exchange, compare
555// KMP_COMPARE_AND_STORE also returns a bool indicating a successful write. A
556// write is successful if the return value of _InterlockedCompareExchange is the
557// same as the compare value.
558inline kmp_int8 __kmp_compare_and_store_acq8(volatile kmp_int8 *p, kmp_int8 cv,
559 kmp_int8 sv) {
560 return _InterlockedCompareExchange8_acq(p, sv, cv) == cv;
561}
562
563inline kmp_int8 __kmp_compare_and_store_rel8(volatile kmp_int8 *p, kmp_int8 cv,
564 kmp_int8 sv) {
565 return _InterlockedCompareExchange8_rel(p, sv, cv) == cv;
566}
567
568inline kmp_int16 __kmp_compare_and_store_acq16(volatile kmp_int16 *p,
569 kmp_int16 cv, kmp_int16 sv) {
570 return _InterlockedCompareExchange16_acq(p, sv, cv) == cv;
571}
572
573inline kmp_int16 __kmp_compare_and_store_rel16(volatile kmp_int16 *p,
574 kmp_int16 cv, kmp_int16 sv) {
575 return _InterlockedCompareExchange16_rel(p, sv, cv) == cv;
576}
577
578inline kmp_int32 __kmp_compare_and_store_acq32(volatile kmp_int32 *p,
579 kmp_int32 cv, kmp_int32 sv) {
580 return _InterlockedCompareExchange_acq((volatile long *)p, sv, cv) == cv;
581}
582
583inline kmp_int32 __kmp_compare_and_store_rel32(volatile kmp_int32 *p,
584 kmp_int32 cv, kmp_int32 sv) {
585 return _InterlockedCompareExchange_rel((volatile long *)p, sv, cv) == cv;
586}
587
588inline kmp_int32 __kmp_compare_and_store_acq64(volatile kmp_int64 *p,
589 kmp_int64 cv, kmp_int64 sv) {
590 return _InterlockedCompareExchange64_acq(p, sv, cv) == cv;
591}
592
593inline kmp_int32 __kmp_compare_and_store_rel64(volatile kmp_int64 *p,
594 kmp_int64 cv, kmp_int64 sv) {
595 return _InterlockedCompareExchange64_rel(p, sv, cv) == cv;
596}
597
598inline kmp_int32 __kmp_compare_and_store_ptr(void *volatile *p, void *cv,
599 void *sv) {
600 return _InterlockedCompareExchangePointer(p, sv, cv) == cv;
601}
602
603// The _RET versions return the value instead of a bool
604
605#define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
606 _InterlockedCompareExchange8((p), (sv), (cv))
607#define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
608 _InterlockedCompareExchange16((p), (sv), (cv))
609
610#define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
611 _InterlockedCompareExchange64((volatile kmp_int64 *)(p), (kmp_int64)(sv), \
612 (kmp_int64)(cv))
613
614
615#define KMP_XCHG_FIXED8(p, v) \
616 _InterlockedExchange8((volatile kmp_int8 *)(p), (kmp_int8)(v));
617#define KMP_XCHG_FIXED16(p, v) _InterlockedExchange16((p), (v));
618#define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
619
620inline kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v) {
621 kmp_int64 tmp = _InterlockedExchange64((volatile kmp_int64 *)p, *(kmp_int64
622 *)&v); return *(kmp_real64 *)&tmp;
623}
624
625#else // !KMP_ARCH_AARCH64
626
627// Routines that we still need to implement in assembly.
628extern kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 v);
629
630extern kmp_int8 __kmp_compare_and_store8(volatile kmp_int8 *p, kmp_int8 cv,
631 kmp_int8 sv);
632extern kmp_int16 __kmp_compare_and_store16(volatile kmp_int16 *p, kmp_int16 cv,
633 kmp_int16 sv);
634extern kmp_int32 __kmp_compare_and_store32(volatile kmp_int32 *p, kmp_int32 cv,
635 kmp_int32 sv);
636extern kmp_int32 __kmp_compare_and_store64(volatile kmp_int64 *p, kmp_int64 cv,
637 kmp_int64 sv);
638extern kmp_int8 __kmp_compare_and_store_ret8(volatile kmp_int8 *p, kmp_int8 cv,
639 kmp_int8 sv);
640extern kmp_int16 __kmp_compare_and_store_ret16(volatile kmp_int16 *p,
641 kmp_int16 cv, kmp_int16 sv);
642extern kmp_int32 __kmp_compare_and_store_ret32(volatile kmp_int32 *p,
643 kmp_int32 cv, kmp_int32 sv);
644extern kmp_int64 __kmp_compare_and_store_ret64(volatile kmp_int64 *p,
645 kmp_int64 cv, kmp_int64 sv);
646
647extern kmp_int8 __kmp_xchg_fixed8(volatile kmp_int8 *p, kmp_int8 v);
648extern kmp_int16 __kmp_xchg_fixed16(volatile kmp_int16 *p, kmp_int16 v);
649extern kmp_int32 __kmp_xchg_fixed32(volatile kmp_int32 *p, kmp_int32 v);
650extern kmp_int64 __kmp_xchg_fixed64(volatile kmp_int64 *p, kmp_int64 v);
651extern kmp_real32 __kmp_xchg_real32(volatile kmp_real32 *p, kmp_real32 v);
652extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
653
654//#define KMP_TEST_THEN_INC32(p) __kmp_test_then_add32((p), 1)
655//#define KMP_TEST_THEN_INC_ACQ32(p) __kmp_test_then_add32((p), 1)
656#define KMP_TEST_THEN_INC64(p) __kmp_test_then_add64((p), 1LL)
657#define KMP_TEST_THEN_INC_ACQ64(p) __kmp_test_then_add64((p), 1LL)
658//#define KMP_TEST_THEN_ADD4_32(p) __kmp_test_then_add32((p), 4)
659//#define KMP_TEST_THEN_ADD4_ACQ32(p) __kmp_test_then_add32((p), 4)
660#define KMP_TEST_THEN_ADD4_64(p) __kmp_test_then_add64((p), 4LL)
661#define KMP_TEST_THEN_ADD4_ACQ64(p) __kmp_test_then_add64((p), 4LL)
662//#define KMP_TEST_THEN_DEC32(p) __kmp_test_then_add32((p), -1)
663//#define KMP_TEST_THEN_DEC_ACQ32(p) __kmp_test_then_add32((p), -1)
664#define KMP_TEST_THEN_DEC64(p) __kmp_test_then_add64((p), -1LL)
665#define KMP_TEST_THEN_DEC_ACQ64(p) __kmp_test_then_add64((p), -1LL)
666//#define KMP_TEST_THEN_ADD32(p, v) __kmp_test_then_add32((p), (v))
667#define KMP_TEST_THEN_ADD8(p, v) __kmp_test_then_add8((p), (v))
668#define KMP_TEST_THEN_ADD64(p, v) __kmp_test_then_add64((p), (v))
669
670
671#define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
672 __kmp_compare_and_store8((p), (cv), (sv))
673#define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
674 __kmp_compare_and_store8((p), (cv), (sv))
675#define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
676 __kmp_compare_and_store16((p), (cv), (sv))
677#define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
678 __kmp_compare_and_store16((p), (cv), (sv))
679#define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
680 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
681 (kmp_int32)(sv))
682#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
683 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
684 (kmp_int32)(sv))
685#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
686 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
687 (kmp_int64)(sv))
688#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
689 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
690 (kmp_int64)(sv))
691
692#if KMP_ARCH_X86
693#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
694 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
695 (kmp_int32)(sv))
696#else /* 64 bit pointers */
697#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
698 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
699 (kmp_int64)(sv))
700#endif /* KMP_ARCH_X86 */
701
702#define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
703 __kmp_compare_and_store_ret8((p), (cv), (sv))
704#define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
705 __kmp_compare_and_store_ret16((p), (cv), (sv))
706#define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
707 __kmp_compare_and_store_ret64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
708 (kmp_int64)(sv))
709
710#define KMP_XCHG_FIXED8(p, v) \
711 __kmp_xchg_fixed8((volatile kmp_int8 *)(p), (kmp_int8)(v));
712#define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16((p), (v));
713//#define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32((p), (v));
714//#define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64((p), (v));
715//#define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32((p), (v));
716#define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
717#endif
718
719#elif (KMP_ASM_INTRINS && KMP_OS_UNIX) || !(KMP_ARCH_X86 || KMP_ARCH_X86_64)
720
721/* cast p to correct type so that proper intrinsic will be used */
722#define KMP_TEST_THEN_INC32(p) \
723 __sync_fetch_and_add((volatile kmp_int32 *)(p), 1)
724#define KMP_TEST_THEN_INC_ACQ32(p) \
725 __sync_fetch_and_add((volatile kmp_int32 *)(p), 1)
726#if KMP_ARCH_MIPS
727#define KMP_TEST_THEN_INC64(p) \
728 __atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
729#define KMP_TEST_THEN_INC_ACQ64(p) \
730 __atomic_fetch_add((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
731#else
732#define KMP_TEST_THEN_INC64(p) \
733 __sync_fetch_and_add((volatile kmp_int64 *)(p), 1LL)
734#define KMP_TEST_THEN_INC_ACQ64(p) \
735 __sync_fetch_and_add((volatile kmp_int64 *)(p), 1LL)
736#endif
737#define KMP_TEST_THEN_ADD4_32(p) \
738 __sync_fetch_and_add((volatile kmp_int32 *)(p), 4)
739#define KMP_TEST_THEN_ADD4_ACQ32(p) \
740 __sync_fetch_and_add((volatile kmp_int32 *)(p), 4)
741#if KMP_ARCH_MIPS
742#define KMP_TEST_THEN_ADD4_64(p) \
743 __atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST)
744#define KMP_TEST_THEN_ADD4_ACQ64(p) \
745 __atomic_fetch_add((volatile kmp_int64 *)(p), 4LL, __ATOMIC_SEQ_CST)
746#define KMP_TEST_THEN_DEC64(p) \
747 __atomic_fetch_sub((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
748#define KMP_TEST_THEN_DEC_ACQ64(p) \
749 __atomic_fetch_sub((volatile kmp_int64 *)(p), 1LL, __ATOMIC_SEQ_CST)
750#else
751#define KMP_TEST_THEN_ADD4_64(p) \
752 __sync_fetch_and_add((volatile kmp_int64 *)(p), 4LL)
753#define KMP_TEST_THEN_ADD4_ACQ64(p) \
754 __sync_fetch_and_add((volatile kmp_int64 *)(p), 4LL)
755#define KMP_TEST_THEN_DEC64(p) \
756 __sync_fetch_and_sub((volatile kmp_int64 *)(p), 1LL)
757#define KMP_TEST_THEN_DEC_ACQ64(p) \
758 __sync_fetch_and_sub((volatile kmp_int64 *)(p), 1LL)
759#endif
760#define KMP_TEST_THEN_DEC32(p) \
761 __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1)
762#define KMP_TEST_THEN_DEC_ACQ32(p) \
763 __sync_fetch_and_sub((volatile kmp_int32 *)(p), 1)
764#define KMP_TEST_THEN_ADD8(p, v) \
765 __sync_fetch_and_add((volatile kmp_int8 *)(p), (kmp_int8)(v))
766#define KMP_TEST_THEN_ADD32(p, v) \
767 __sync_fetch_and_add((volatile kmp_int32 *)(p), (kmp_int32)(v))
768#if KMP_ARCH_MIPS
769#define KMP_TEST_THEN_ADD64(p, v) \
770 __atomic_fetch_add((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
771 __ATOMIC_SEQ_CST)
772#else
773#define KMP_TEST_THEN_ADD64(p, v) \
774 __sync_fetch_and_add((volatile kmp_int64 *)(p), (kmp_int64)(v))
775#endif
776
777#define KMP_TEST_THEN_OR8(p, v) \
778 __sync_fetch_and_or((volatile kmp_int8 *)(p), (kmp_int8)(v))
779#define KMP_TEST_THEN_AND8(p, v) \
780 __sync_fetch_and_and((volatile kmp_int8 *)(p), (kmp_int8)(v))
781#define KMP_TEST_THEN_OR32(p, v) \
782 __sync_fetch_and_or((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
783#define KMP_TEST_THEN_AND32(p, v) \
784 __sync_fetch_and_and((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
785#if KMP_ARCH_MIPS
786#define KMP_TEST_THEN_OR64(p, v) \
787 __atomic_fetch_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
788 __ATOMIC_SEQ_CST)
789#define KMP_TEST_THEN_AND64(p, v) \
790 __atomic_fetch_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v), \
791 __ATOMIC_SEQ_CST)
792#else
793#define KMP_TEST_THEN_OR64(p, v) \
794 __sync_fetch_and_or((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
795#define KMP_TEST_THEN_AND64(p, v) \
796 __sync_fetch_and_and((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
797#endif
798
799#define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
800 __sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
801 (kmp_uint8)(sv))
802#define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
803 __sync_bool_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
804 (kmp_uint8)(sv))
805#define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
806 __sync_bool_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
807 (kmp_uint16)(sv))
808#define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
809 __sync_bool_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
810 (kmp_uint16)(sv))
811#define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
812 __sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
813 (kmp_uint32)(sv))
814#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
815 __sync_bool_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
816 (kmp_uint32)(sv))
817#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
818 __sync_bool_compare_and_swap((void *volatile *)(p), (void *)(cv), \
819 (void *)(sv))
820
821#define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
822 __sync_val_compare_and_swap((volatile kmp_uint8 *)(p), (kmp_uint8)(cv), \
823 (kmp_uint8)(sv))
824#define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
825 __sync_val_compare_and_swap((volatile kmp_uint16 *)(p), (kmp_uint16)(cv), \
826 (kmp_uint16)(sv))
827#define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
828 __sync_val_compare_and_swap((volatile kmp_uint32 *)(p), (kmp_uint32)(cv), \
829 (kmp_uint32)(sv))
830#if KMP_ARCH_MIPS
831static inline bool mips_sync_bool_compare_and_swap(volatile kmp_uint64 *p,
832 kmp_uint64 cv,
833 kmp_uint64 sv) {
834 return __atomic_compare_exchange(p, &cv, &sv, false, __ATOMIC_SEQ_CST,
835 __ATOMIC_SEQ_CST);
836}
837static inline bool mips_sync_val_compare_and_swap(volatile kmp_uint64 *p,
838 kmp_uint64 cv,
839 kmp_uint64 sv) {
840 __atomic_compare_exchange(p, &cv, &sv, false, __ATOMIC_SEQ_CST,
841 __ATOMIC_SEQ_CST);
842 return cv;
843}
844#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
845 mips_sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), \
846 (kmp_uint64)(cv), (kmp_uint64)(sv))
847#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
848 mips_sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), \
849 (kmp_uint64)(cv), (kmp_uint64)(sv))
850#define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
851 mips_sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
852 (kmp_uint64)(sv))
853#else
854#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
855 __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
856 (kmp_uint64)(sv))
857#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
858 __sync_bool_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
859 (kmp_uint64)(sv))
860#define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
861 __sync_val_compare_and_swap((volatile kmp_uint64 *)(p), (kmp_uint64)(cv), \
862 (kmp_uint64)(sv))
863#endif
864
865#if KMP_OS_DARWIN && defined(__INTEL_COMPILER) && __INTEL_COMPILER >= 1800
866#define KMP_XCHG_FIXED8(p, v) \
867 __atomic_exchange_1((volatile kmp_uint8 *)(p), (kmp_uint8)(v), \
868 __ATOMIC_SEQ_CST)
869#else
870#define KMP_XCHG_FIXED8(p, v) \
871 __sync_lock_test_and_set((volatile kmp_uint8 *)(p), (kmp_uint8)(v))
872#endif
873#define KMP_XCHG_FIXED16(p, v) \
874 __sync_lock_test_and_set((volatile kmp_uint16 *)(p), (kmp_uint16)(v))
875#define KMP_XCHG_FIXED32(p, v) \
876 __sync_lock_test_and_set((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
877#define KMP_XCHG_FIXED64(p, v) \
878 __sync_lock_test_and_set((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
879
880inline kmp_real32 KMP_XCHG_REAL32(volatile kmp_real32 *p, kmp_real32 v) {
881 volatile kmp_uint32 *up;
882 kmp_uint32 uv;
883 memcpy(&up, &p, sizeof(up));
884 memcpy(&uv, &v, sizeof(uv));
885 kmp_int32 tmp = __sync_lock_test_and_set(up, uv);
886 kmp_real32 ftmp;
887 memcpy(&ftmp, &tmp, sizeof(tmp));
888 return ftmp;
889}
890
891inline kmp_real64 KMP_XCHG_REAL64(volatile kmp_real64 *p, kmp_real64 v) {
892 volatile kmp_uint64 *up;
893 kmp_uint64 uv;
894 memcpy(&up, &p, sizeof(up));
895 memcpy(&uv, &v, sizeof(uv));
896 kmp_int64 tmp = __sync_lock_test_and_set(up, uv);
897 kmp_real64 dtmp;
898 memcpy(&dtmp, &tmp, sizeof(tmp));
899 return dtmp;
900}
901
902#else
903
904extern kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 v);
905extern kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 v);
906extern kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 v);
907extern kmp_int32 __kmp_test_then_add32(volatile kmp_int32 *p, kmp_int32 v);
908extern kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 v);
909extern kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 v);
910extern kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 v);
911extern kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 v);
912extern kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 v);
913
914extern kmp_int8 __kmp_compare_and_store8(volatile kmp_int8 *p, kmp_int8 cv,
915 kmp_int8 sv);
916extern kmp_int16 __kmp_compare_and_store16(volatile kmp_int16 *p, kmp_int16 cv,
917 kmp_int16 sv);
918extern kmp_int32 __kmp_compare_and_store32(volatile kmp_int32 *p, kmp_int32 cv,
919 kmp_int32 sv);
920extern kmp_int32 __kmp_compare_and_store64(volatile kmp_int64 *p, kmp_int64 cv,
921 kmp_int64 sv);
922extern kmp_int8 __kmp_compare_and_store_ret8(volatile kmp_int8 *p, kmp_int8 cv,
923 kmp_int8 sv);
924extern kmp_int16 __kmp_compare_and_store_ret16(volatile kmp_int16 *p,
925 kmp_int16 cv, kmp_int16 sv);
926extern kmp_int32 __kmp_compare_and_store_ret32(volatile kmp_int32 *p,
927 kmp_int32 cv, kmp_int32 sv);
928extern kmp_int64 __kmp_compare_and_store_ret64(volatile kmp_int64 *p,
929 kmp_int64 cv, kmp_int64 sv);
930
931extern kmp_int8 __kmp_xchg_fixed8(volatile kmp_int8 *p, kmp_int8 v);
932extern kmp_int16 __kmp_xchg_fixed16(volatile kmp_int16 *p, kmp_int16 v);
933extern kmp_int32 __kmp_xchg_fixed32(volatile kmp_int32 *p, kmp_int32 v);
934extern kmp_int64 __kmp_xchg_fixed64(volatile kmp_int64 *p, kmp_int64 v);
935extern kmp_real32 __kmp_xchg_real32(volatile kmp_real32 *p, kmp_real32 v);
936extern kmp_real64 __kmp_xchg_real64(volatile kmp_real64 *p, kmp_real64 v);
937
938#define KMP_TEST_THEN_INC32(p) \
939 __kmp_test_then_add32((volatile kmp_int32 *)(p), 1)
940#define KMP_TEST_THEN_INC_ACQ32(p) \
941 __kmp_test_then_add32((volatile kmp_int32 *)(p), 1)
942#define KMP_TEST_THEN_INC64(p) \
943 __kmp_test_then_add64((volatile kmp_int64 *)(p), 1LL)
944#define KMP_TEST_THEN_INC_ACQ64(p) \
945 __kmp_test_then_add64((volatile kmp_int64 *)(p), 1LL)
946#define KMP_TEST_THEN_ADD4_32(p) \
947 __kmp_test_then_add32((volatile kmp_int32 *)(p), 4)
948#define KMP_TEST_THEN_ADD4_ACQ32(p) \
949 __kmp_test_then_add32((volatile kmp_int32 *)(p), 4)
950#define KMP_TEST_THEN_ADD4_64(p) \
951 __kmp_test_then_add64((volatile kmp_int64 *)(p), 4LL)
952#define KMP_TEST_THEN_ADD4_ACQ64(p) \
953 __kmp_test_then_add64((volatile kmp_int64 *)(p), 4LL)
954#define KMP_TEST_THEN_DEC32(p) \
955 __kmp_test_then_add32((volatile kmp_int32 *)(p), -1)
956#define KMP_TEST_THEN_DEC_ACQ32(p) \
957 __kmp_test_then_add32((volatile kmp_int32 *)(p), -1)
958#define KMP_TEST_THEN_DEC64(p) \
959 __kmp_test_then_add64((volatile kmp_int64 *)(p), -1LL)
960#define KMP_TEST_THEN_DEC_ACQ64(p) \
961 __kmp_test_then_add64((volatile kmp_int64 *)(p), -1LL)
962#define KMP_TEST_THEN_ADD8(p, v) \
963 __kmp_test_then_add8((volatile kmp_int8 *)(p), (kmp_int8)(v))
964#define KMP_TEST_THEN_ADD32(p, v) \
965 __kmp_test_then_add32((volatile kmp_int32 *)(p), (kmp_int32)(v))
966#define KMP_TEST_THEN_ADD64(p, v) \
967 __kmp_test_then_add64((volatile kmp_int64 *)(p), (kmp_int64)(v))
968
969#define KMP_TEST_THEN_OR8(p, v) \
970 __kmp_test_then_or8((volatile kmp_int8 *)(p), (kmp_int8)(v))
971#define KMP_TEST_THEN_AND8(p, v) \
972 __kmp_test_then_and8((volatile kmp_int8 *)(p), (kmp_int8)(v))
973#define KMP_TEST_THEN_OR32(p, v) \
974 __kmp_test_then_or32((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
975#define KMP_TEST_THEN_AND32(p, v) \
976 __kmp_test_then_and32((volatile kmp_uint32 *)(p), (kmp_uint32)(v))
977#define KMP_TEST_THEN_OR64(p, v) \
978 __kmp_test_then_or64((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
979#define KMP_TEST_THEN_AND64(p, v) \
980 __kmp_test_then_and64((volatile kmp_uint64 *)(p), (kmp_uint64)(v))
981
982#define KMP_COMPARE_AND_STORE_ACQ8(p, cv, sv) \
983 __kmp_compare_and_store8((volatile kmp_int8 *)(p), (kmp_int8)(cv), \
984 (kmp_int8)(sv))
985#define KMP_COMPARE_AND_STORE_REL8(p, cv, sv) \
986 __kmp_compare_and_store8((volatile kmp_int8 *)(p), (kmp_int8)(cv), \
987 (kmp_int8)(sv))
988#define KMP_COMPARE_AND_STORE_ACQ16(p, cv, sv) \
989 __kmp_compare_and_store16((volatile kmp_int16 *)(p), (kmp_int16)(cv), \
990 (kmp_int16)(sv))
991#define KMP_COMPARE_AND_STORE_REL16(p, cv, sv) \
992 __kmp_compare_and_store16((volatile kmp_int16 *)(p), (kmp_int16)(cv), \
993 (kmp_int16)(sv))
994#define KMP_COMPARE_AND_STORE_ACQ32(p, cv, sv) \
995 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
996 (kmp_int32)(sv))
997#define KMP_COMPARE_AND_STORE_REL32(p, cv, sv) \
998 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
999 (kmp_int32)(sv))
1000#define KMP_COMPARE_AND_STORE_ACQ64(p, cv, sv) \
1001 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
1002 (kmp_int64)(sv))
1003#define KMP_COMPARE_AND_STORE_REL64(p, cv, sv) \
1004 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
1005 (kmp_int64)(sv))
1006
1007#if KMP_ARCH_X86
1008#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
1009 __kmp_compare_and_store32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
1010 (kmp_int32)(sv))
1011#else /* 64 bit pointers */
1012#define KMP_COMPARE_AND_STORE_PTR(p, cv, sv) \
1013 __kmp_compare_and_store64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
1014 (kmp_int64)(sv))
1015#endif /* KMP_ARCH_X86 */
1016
1017#define KMP_COMPARE_AND_STORE_RET8(p, cv, sv) \
1018 __kmp_compare_and_store_ret8((p), (cv), (sv))
1019#define KMP_COMPARE_AND_STORE_RET16(p, cv, sv) \
1020 __kmp_compare_and_store_ret16((p), (cv), (sv))
1021#define KMP_COMPARE_AND_STORE_RET32(p, cv, sv) \
1022 __kmp_compare_and_store_ret32((volatile kmp_int32 *)(p), (kmp_int32)(cv), \
1023 (kmp_int32)(sv))
1024#define KMP_COMPARE_AND_STORE_RET64(p, cv, sv) \
1025 __kmp_compare_and_store_ret64((volatile kmp_int64 *)(p), (kmp_int64)(cv), \
1026 (kmp_int64)(sv))
1027
1028#define KMP_XCHG_FIXED8(p, v) \
1029 __kmp_xchg_fixed8((volatile kmp_int8 *)(p), (kmp_int8)(v));
1030#define KMP_XCHG_FIXED16(p, v) __kmp_xchg_fixed16((p), (v));
1031#define KMP_XCHG_FIXED32(p, v) __kmp_xchg_fixed32((p), (v));
1032#define KMP_XCHG_FIXED64(p, v) __kmp_xchg_fixed64((p), (v));
1033#define KMP_XCHG_REAL32(p, v) __kmp_xchg_real32((p), (v));
1034#define KMP_XCHG_REAL64(p, v) __kmp_xchg_real64((p), (v));
1035
1036#endif /* KMP_ASM_INTRINS */
1037
1038/* ------------- relaxed consistency memory model stuff ------------------ */
1039
1040#if KMP_OS_WINDOWS
1041#ifdef __ABSOFT_WIN
1042#define KMP_MB() asm("nop")
1043#define KMP_IMB() asm("nop")
1044#else
1045#define KMP_MB() /* _asm{ nop } */
1046#define KMP_IMB() /* _asm{ nop } */
1047#endif
1048#endif /* KMP_OS_WINDOWS */
1049
1050#if KMP_ARCH_PPC64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS || \
1051 KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64 || KMP_ARCH_LOONGARCH64 || \
1052 KMP_ARCH_VE || KMP_ARCH_S390X || KMP_ARCH_PPC
1053#if KMP_OS_WINDOWS
1054#undef KMP_MB
1055#define KMP_MB() std::atomic_thread_fence(std::memory_order_seq_cst)
1056#else /* !KMP_OS_WINDOWS */
1057#define KMP_MB() __sync_synchronize()
1058#endif
1059#endif
1060
1061#ifndef KMP_MB
1062#define KMP_MB() /* nothing to do */
1063#endif
1064
1065#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1066#if KMP_MIC
1067// fence-style instructions do not exist, but lock; xaddl $0,(%rsp) can be used.
1068// We shouldn't need it, though, since the ABI rules require that
1069// * If the compiler generates NGO stores it also generates the fence
1070// * If users hand-code NGO stores they should insert the fence
1071// therefore no incomplete unordered stores should be visible.
1072#define KMP_MFENCE() /* Nothing */
1073#define KMP_SFENCE() /* Nothing */
1074#else
1075#if KMP_COMPILER_ICC || KMP_COMPILER_ICX
1076#define KMP_MFENCE_() _mm_mfence()
1077#define KMP_SFENCE_() _mm_sfence()
1078#elif KMP_COMPILER_MSVC
1079#define KMP_MFENCE_() MemoryBarrier()
1080#define KMP_SFENCE_() MemoryBarrier()
1081#else
1082#define KMP_MFENCE_() __sync_synchronize()
1083#define KMP_SFENCE_() __sync_synchronize()
1084#endif
1085#define KMP_MFENCE() \
1086 if (UNLIKELY(!__kmp_cpuinfo.initialized)) { \
1087 __kmp_query_cpuid(&__kmp_cpuinfo); \
1088 } \
1089 if (__kmp_cpuinfo.flags.sse2) { \
1090 KMP_MFENCE_(); \
1091 }
1092#define KMP_SFENCE() KMP_SFENCE_()
1093#endif
1094#else
1095#define KMP_MFENCE() KMP_MB()
1096#define KMP_SFENCE() KMP_MB()
1097#endif
1098
1099#ifndef KMP_IMB
1100#define KMP_IMB() /* nothing to do */
1101#endif
1102
1103#ifndef KMP_ST_REL32
1104#define KMP_ST_REL32(A, D) (*(A) = (D))
1105#endif
1106
1107#ifndef KMP_ST_REL64
1108#define KMP_ST_REL64(A, D) (*(A) = (D))
1109#endif
1110
1111#ifndef KMP_LD_ACQ32
1112#define KMP_LD_ACQ32(A) (*(A))
1113#endif
1114
1115#ifndef KMP_LD_ACQ64
1116#define KMP_LD_ACQ64(A) (*(A))
1117#endif
1118
1119/* ------------------------------------------------------------------------ */
1120// FIXME - maybe this should this be
1121//
1122// #define TCR_4(a) (*(volatile kmp_int32 *)(&a))
1123// #define TCW_4(a,b) (a) = (*(volatile kmp_int32 *)&(b))
1124//
1125// #define TCR_8(a) (*(volatile kmp_int64 *)(a))
1126// #define TCW_8(a,b) (a) = (*(volatile kmp_int64 *)(&b))
1127//
1128// I'm fairly certain this is the correct thing to do, but I'm afraid
1129// of performance regressions.
1130
1131#define TCR_1(a) (a)
1132#define TCW_1(a, b) (a) = (b)
1133#define TCR_4(a) (a)
1134#define TCW_4(a, b) (a) = (b)
1135#define TCI_4(a) (++(a))
1136#define TCD_4(a) (--(a))
1137#define TCR_8(a) (a)
1138#define TCW_8(a, b) (a) = (b)
1139#define TCI_8(a) (++(a))
1140#define TCD_8(a) (--(a))
1141#define TCR_SYNC_4(a) (a)
1142#define TCW_SYNC_4(a, b) (a) = (b)
1143#define TCX_SYNC_4(a, b, c) \
1144 KMP_COMPARE_AND_STORE_REL32((volatile kmp_int32 *)(volatile void *)&(a), \
1145 (kmp_int32)(b), (kmp_int32)(c))
1146#define TCR_SYNC_8(a) (a)
1147#define TCW_SYNC_8(a, b) (a) = (b)
1148#define TCX_SYNC_8(a, b, c) \
1149 KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)(volatile void *)&(a), \
1150 (kmp_int64)(b), (kmp_int64)(c))
1151
1152#if KMP_ARCH_X86 || KMP_ARCH_MIPS || KMP_ARCH_WASM || KMP_ARCH_PPC
1153// What about ARM?
1154#define TCR_PTR(a) ((void *)TCR_4(a))
1155#define TCW_PTR(a, b) TCW_4((a), (b))
1156#define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_4(a))
1157#define TCW_SYNC_PTR(a, b) TCW_SYNC_4((a), (b))
1158#define TCX_SYNC_PTR(a, b, c) ((void *)TCX_SYNC_4((a), (b), (c)))
1159
1160#else /* 64 bit pointers */
1161
1162#define TCR_PTR(a) ((void *)TCR_8(a))
1163#define TCW_PTR(a, b) TCW_8((a), (b))
1164#define TCR_SYNC_PTR(a) ((void *)TCR_SYNC_8(a))
1165#define TCW_SYNC_PTR(a, b) TCW_SYNC_8((a), (b))
1166#define TCX_SYNC_PTR(a, b, c) ((void *)TCX_SYNC_8((a), (b), (c)))
1167
1168#endif /* KMP_ARCH_X86 */
1169
1170/* If these FTN_{TRUE,FALSE} values change, may need to change several places
1171 where they are used to check that language is Fortran, not C. */
1172
1173#ifndef FTN_TRUE
1174#define FTN_TRUE TRUE
1175#endif
1176
1177#ifndef FTN_FALSE
1178#define FTN_FALSE FALSE
1179#endif
1180
1181typedef void (*microtask_t)(int *gtid, int *npr, ...);
1182
1183#ifdef USE_VOLATILE_CAST
1184#define VOLATILE_CAST(x) (volatile x)
1185#else
1186#define VOLATILE_CAST(x) (x)
1187#endif
1188
1189#define KMP_WAIT __kmp_wait_4
1190#define KMP_WAIT_PTR __kmp_wait_4_ptr
1191#define KMP_EQ __kmp_eq_4
1192#define KMP_NEQ __kmp_neq_4
1193#define KMP_LT __kmp_lt_4
1194#define KMP_GE __kmp_ge_4
1195#define KMP_LE __kmp_le_4
1196
1197/* Workaround for Intel(R) 64 code gen bug when taking address of static array
1198 * (Intel(R) 64 Tracker #138) */
1199#if (KMP_ARCH_X86_64 || KMP_ARCH_PPC64) && KMP_OS_LINUX
1200#define STATIC_EFI2_WORKAROUND
1201#else
1202#define STATIC_EFI2_WORKAROUND static
1203#endif
1204
1205// Support of BGET usage
1206#ifndef KMP_USE_BGET
1207#define KMP_USE_BGET 1
1208#endif
1209
1210// Switches for OSS builds
1211#ifndef USE_CMPXCHG_FIX
1212#define USE_CMPXCHG_FIX 1
1213#endif
1214
1215// Enable dynamic user lock
1216#define KMP_USE_DYNAMIC_LOCK 1
1217
1218// Enable Intel(R) Transactional Synchronization Extensions (Intel(R) TSX) if
1219// dynamic user lock is turned on
1220#if KMP_USE_DYNAMIC_LOCK
1221// Visual studio can't handle the asm sections in this code
1222#define KMP_USE_TSX (KMP_ARCH_X86 || KMP_ARCH_X86_64) && !KMP_COMPILER_MSVC
1223#ifdef KMP_USE_ADAPTIVE_LOCKS
1224#undef KMP_USE_ADAPTIVE_LOCKS
1225#endif
1226#define KMP_USE_ADAPTIVE_LOCKS KMP_USE_TSX
1227#endif
1228
1229// Enable tick time conversion of ticks to seconds
1230#if KMP_STATS_ENABLED
1231#define KMP_HAVE_TICK_TIME \
1232 (KMP_OS_LINUX && (KMP_MIC || KMP_ARCH_X86 || KMP_ARCH_X86_64))
1233#endif
1234
1235// Warning levels
1236enum kmp_warnings_level {
1237 kmp_warnings_off = 0, /* No warnings */
1238 kmp_warnings_low, /* Minimal warnings (default) */
1239 kmp_warnings_explicit = 6, /* Explicitly set to ON - more warnings */
1240 kmp_warnings_verbose /* reserved */
1241};
1242
1243#ifdef __cplusplus
1244} // extern "C"
1245#endif // __cplusplus
1246
1247// Safe C API
1248#include "kmp_safe_c_api.h"
1249
1250// Macros for C++11 atomic functions
1251#define KMP_ATOMIC_LD(p, order) (p)->load(std::memory_order_##order)
1252#define KMP_ATOMIC_OP(op, p, v, order) (p)->op(v, std::memory_order_##order)
1253
1254// For non-default load/store
1255#define KMP_ATOMIC_LD_ACQ(p) KMP_ATOMIC_LD(p, acquire)
1256#define KMP_ATOMIC_LD_RLX(p) KMP_ATOMIC_LD(p, relaxed)
1257#define KMP_ATOMIC_ST_REL(p, v) KMP_ATOMIC_OP(store, p, v, release)
1258#define KMP_ATOMIC_ST_RLX(p, v) KMP_ATOMIC_OP(store, p, v, relaxed)
1259
1260// For non-default fetch_<op>
1261#define KMP_ATOMIC_ADD(p, v) KMP_ATOMIC_OP(fetch_add, p, v, acq_rel)
1262#define KMP_ATOMIC_SUB(p, v) KMP_ATOMIC_OP(fetch_sub, p, v, acq_rel)
1263#define KMP_ATOMIC_AND(p, v) KMP_ATOMIC_OP(fetch_and, p, v, acq_rel)
1264#define KMP_ATOMIC_OR(p, v) KMP_ATOMIC_OP(fetch_or, p, v, acq_rel)
1265#define KMP_ATOMIC_INC(p) KMP_ATOMIC_OP(fetch_add, p, 1, acq_rel)
1266#define KMP_ATOMIC_DEC(p) KMP_ATOMIC_OP(fetch_sub, p, 1, acq_rel)
1267#define KMP_ATOMIC_ADD_RLX(p, v) KMP_ATOMIC_OP(fetch_add, p, v, relaxed)
1268#define KMP_ATOMIC_INC_RLX(p) KMP_ATOMIC_OP(fetch_add, p, 1, relaxed)
1269
1270// Callers of the following functions cannot see the side effect on "expected".
1271template <typename T>
1272bool __kmp_atomic_compare_store(std::atomic<T> *p, T expected, T desired) {
1273 return p->compare_exchange_strong(
1274 expected, desired, std::memory_order_acq_rel, std::memory_order_relaxed);
1275}
1276
1277template <typename T>
1278bool __kmp_atomic_compare_store_acq(std::atomic<T> *p, T expected, T desired) {
1279 return p->compare_exchange_strong(
1280 expected, desired, std::memory_order_acquire, std::memory_order_relaxed);
1281}
1282
1283template <typename T>
1284bool __kmp_atomic_compare_store_rel(std::atomic<T> *p, T expected, T desired) {
1285 return p->compare_exchange_strong(
1286 expected, desired, std::memory_order_release, std::memory_order_relaxed);
1287}
1288
1289// Symbol lookup on Linux/Windows
1290#if KMP_OS_WINDOWS
1291extern void *__kmp_lookup_symbol(const char *name, bool next = false);
1292#define KMP_DLSYM(name) __kmp_lookup_symbol(name)
1293#define KMP_DLSYM_NEXT(name) __kmp_lookup_symbol(name, true)
1294#elif KMP_OS_WASI
1295#define KMP_DLSYM(name) nullptr
1296#define KMP_DLSYM_NEXT(name) nullptr
1297#else
1298#define KMP_DLSYM(name) dlsym(RTLD_DEFAULT, name)
1299#define KMP_DLSYM_NEXT(name) dlsym(RTLD_NEXT, name)
1300#endif
1301
1302// MSVC doesn't have this, but clang/clang-cl does.
1303#ifndef __has_builtin
1304#define __has_builtin(x) 0
1305#endif
1306
1307// Same as LLVM_BUILTIN_UNREACHABLE. States that it is UB to reach this point.
1308#if __has_builtin(__builtin_unreachable) || defined(__GNUC__)
1309#define KMP_BUILTIN_UNREACHABLE __builtin_unreachable()
1310#elif defined(_MSC_VER)
1311#define KMP_BUILTIN_UNREACHABLE __assume(false)
1312#else
1313#define KMP_BUILTIN_UNREACHABLE
1314#endif
1315
1316#endif /* KMP_OS_H */