17#include "kmp_config.h"
27#ifndef KMP_STATIC_STEAL_ENABLED
28#define KMP_STATIC_STEAL_ENABLED 1
31#define TASK_CURRENT_NOT_QUEUED 0
32#define TASK_CURRENT_QUEUED 1
34#ifdef BUILD_TIED_TASK_STACK
35#define TASK_STACK_EMPTY 0
36#define TASK_STACK_BLOCK_BITS 5
38#define TASK_STACK_BLOCK_SIZE (1 << TASK_STACK_BLOCK_BITS)
40#define TASK_STACK_INDEX_MASK (TASK_STACK_BLOCK_SIZE - 1)
43#define TASK_NOT_PUSHED 1
44#define TASK_SUCCESSFULLY_PUSHED 0
47#define TASK_EXPLICIT 1
48#define TASK_IMPLICIT 0
51#define TASK_DETACHABLE 1
52#define TASK_UNDETACHABLE 0
54#define KMP_CANCEL_THREADS
55#define KMP_THREAD_ATTR
59#if defined(__ANDROID__)
60#undef KMP_CANCEL_THREADS
83#include "kmp_safe_c_api.h"
91#undef KMP_USE_HIER_SCHED
92#define KMP_USE_HIER_SCHED KMP_AFFINITY_SUPPORTED
95#if KMP_USE_HWLOC && KMP_AFFINITY_SUPPORTED
97#ifndef HWLOC_OBJ_NUMANODE
98#define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
100#ifndef HWLOC_OBJ_PACKAGE
101#define HWLOC_OBJ_PACKAGE HWLOC_OBJ_SOCKET
105#if KMP_ARCH_X86 || KMP_ARCH_X86_64
106#include <xmmintrin.h>
110#define KMP_INTERNAL_MALLOC(sz) malloc(sz)
111#define KMP_INTERNAL_FREE(p) free(p)
112#define KMP_INTERNAL_REALLOC(p, sz) realloc((p), (sz))
113#define KMP_INTERNAL_CALLOC(n, sz) calloc((n), (sz))
115#include "kmp_debug.h"
117#include "kmp_version.h"
118#include "kmp_barrier.h"
120#include "kmp_debugger.h"
124#define KMP_HANDLE_SIGNALS (KMP_OS_UNIX || KMP_OS_WINDOWS)
126#include "kmp_wrapper_malloc.h"
129#if !defined NSIG && defined _NSIG
135#pragma weak clock_gettime
139#include "ompt-internal.h"
143#include "ompd-specific.h"
147#define UNLIKELY(x) (x)
156#ifndef USE_FAST_MEMORY
157#define USE_FAST_MEMORY 3
160#ifndef KMP_NESTED_HOT_TEAMS
161#define KMP_NESTED_HOT_TEAMS 0
162#define USE_NESTED_HOT_ARG(x)
164#if KMP_NESTED_HOT_TEAMS
165#define USE_NESTED_HOT_ARG(x) , x
167#define USE_NESTED_HOT_ARG(x)
172#ifndef USE_CMP_XCHG_FOR_BGET
173#define USE_CMP_XCHG_FOR_BGET 1
181#define KMP_NSEC_PER_SEC 1000000000L
182#define KMP_USEC_PER_SEC 1000000L
206 KMP_IDENT_BARRIER_IMPL_MASK = 0x01C0,
207 KMP_IDENT_BARRIER_IMPL_FOR = 0x0040,
208 KMP_IDENT_BARRIER_IMPL_SECTIONS = 0x00C0,
210 KMP_IDENT_BARRIER_IMPL_SINGLE = 0x0140,
211 KMP_IDENT_BARRIER_IMPL_WORKSHARE = 0x01C0,
224 KMP_IDENT_ATOMIC_HINT_UNCONTENDED = 0x010000,
225 KMP_IDENT_ATOMIC_HINT_CONTENDED = 0x020000,
226 KMP_IDENT_ATOMIC_HINT_NONSPECULATIVE = 0x040000,
227 KMP_IDENT_ATOMIC_HINT_SPECULATIVE = 0x080000,
228 KMP_IDENT_OPENMP_SPEC_VERSION_MASK = 0xFF000000
249 kmp_int32 get_openmp_version() {
250 return (((
flags & KMP_IDENT_OPENMP_SPEC_VERSION_MASK) >> 24) & 0xFF);
258typedef union kmp_team kmp_team_t;
259typedef struct kmp_taskdata kmp_taskdata_t;
260typedef union kmp_task_team kmp_task_team_t;
261typedef union kmp_team kmp_team_p;
262typedef union kmp_info kmp_info_p;
263typedef union kmp_root kmp_root_p;
265template <
bool C = false,
bool S = true>
class kmp_flag_32;
266template <
bool C = false,
bool S = true>
class kmp_flag_64;
267template <
bool C = false,
bool S = true>
class kmp_atomic_flag_64;
268class kmp_flag_oncore;
278#define KMP_PACK_64(HIGH_32, LOW_32) \
279 ((kmp_int64)((((kmp_uint64)(HIGH_32)) << 32) | (kmp_uint64)(LOW_32)))
284 while (*(_x) == ' ' || *(_x) == '\t') \
287#define SKIP_DIGITS(_x) \
289 while (*(_x) >= '0' && *(_x) <= '9') \
292#define SKIP_TOKEN(_x) \
294 while ((*(_x) >= '0' && *(_x) <= '9') || (*(_x) >= 'a' && *(_x) <= 'z') || \
295 (*(_x) >= 'A' && *(_x) <= 'Z') || *(_x) == '_') \
298#define SKIP_TO(_x, _c) \
300 while (*(_x) != '\0' && *(_x) != (_c)) \
306#define KMP_MAX(x, y) ((x) > (y) ? (x) : (y))
307#define KMP_MIN(x, y) ((x) < (y) ? (x) : (y))
312enum kmp_state_timer {
322#ifdef USE_LOAD_BALANCE
323 dynamic_load_balance,
326 dynamic_thread_limit,
332#ifndef KMP_SCHED_TYPE_DEFINED
333#define KMP_SCHED_TYPE_DEFINED
334typedef enum kmp_sched {
337 kmp_sched_static = 1,
338 kmp_sched_dynamic = 2,
339 kmp_sched_guided = 3,
341 kmp_sched_upper_std = 5,
342 kmp_sched_lower_ext = 100,
343 kmp_sched_trapezoidal = 101,
344#if KMP_STATIC_STEAL_ENABLED
345 kmp_sched_static_steal = 102,
348 kmp_sched_default = kmp_sched_static,
349 kmp_sched_monotonic = 0x80000000
359 kmp_sch_static_chunked = 33,
361 kmp_sch_dynamic_chunked = 35,
363 kmp_sch_runtime = 37,
365 kmp_sch_trapezoidal = 39,
368 kmp_sch_static_greedy = 40,
369 kmp_sch_static_balanced = 41,
371 kmp_sch_guided_iterative_chunked = 42,
372 kmp_sch_guided_analytical_chunked = 43,
374 kmp_sch_static_steal = 44,
377 kmp_sch_static_balanced_chunked = 45,
385 kmp_ord_static_chunked = 65,
387 kmp_ord_dynamic_chunked = 67,
388 kmp_ord_guided_chunked = 68,
389 kmp_ord_runtime = 69,
391 kmp_ord_trapezoidal = 71,
404 kmp_nm_static_chunked =
407 kmp_nm_dynamic_chunked = 163,
409 kmp_nm_runtime = 165,
411 kmp_nm_trapezoidal = 167,
414 kmp_nm_static_greedy = 168,
415 kmp_nm_static_balanced = 169,
417 kmp_nm_guided_iterative_chunked = 170,
418 kmp_nm_guided_analytical_chunked = 171,
419 kmp_nm_static_steal =
422 kmp_nm_ord_static_chunked = 193,
424 kmp_nm_ord_dynamic_chunked = 195,
425 kmp_nm_ord_guided_chunked = 196,
426 kmp_nm_ord_runtime = 197,
428 kmp_nm_ord_trapezoidal = 199,
450#define SCHEDULE_WITHOUT_MODIFIERS(s) \
453#define SCHEDULE_HAS_MONOTONIC(s) (((s)&kmp_sch_modifier_monotonic) != 0)
454#define SCHEDULE_HAS_NONMONOTONIC(s) (((s)&kmp_sch_modifier_nonmonotonic) != 0)
455#define SCHEDULE_HAS_NO_MODIFIERS(s) \
456 (((s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)) == 0)
457#define SCHEDULE_GET_MODIFIERS(s) \
458 ((enum sched_type)( \
459 (s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)))
460#define SCHEDULE_SET_MODIFIERS(s, m) \
461 (s = (enum sched_type)((kmp_int32)s | (kmp_int32)m))
462#define SCHEDULE_NONMONOTONIC 0
463#define SCHEDULE_MONOTONIC 1
470__kmp_sched_apply_mods_stdkind(kmp_sched_t *kind,
472 if (SCHEDULE_HAS_MONOTONIC(internal_kind)) {
473 *kind = (kmp_sched_t)((
int)*kind | (int)kmp_sched_monotonic);
479__kmp_sched_apply_mods_intkind(kmp_sched_t kind,
481 if ((
int)kind & (
int)kmp_sched_monotonic) {
482 *internal_kind = (
enum sched_type)((
int)*internal_kind |
488static inline kmp_sched_t __kmp_sched_without_mods(kmp_sched_t kind) {
489 return (kmp_sched_t)((int)kind & ~((
int)kmp_sched_monotonic));
493typedef union kmp_r_sched {
512enum clock_function_type {
513 clock_function_gettimeofday,
514 clock_function_clock_gettime
519enum mic_type { non_mic, mic1, mic2, mic3, dummy };
524#undef KMP_FAST_REDUCTION_BARRIER
525#define KMP_FAST_REDUCTION_BARRIER 1
527#undef KMP_FAST_REDUCTION_CORE_DUO
528#if KMP_ARCH_X86 || KMP_ARCH_X86_64
529#define KMP_FAST_REDUCTION_CORE_DUO 1
532enum _reduction_method {
533 reduction_method_not_defined = 0,
534 critical_reduce_block = (1 << 8),
535 atomic_reduce_block = (2 << 8),
536 tree_reduce_block = (3 << 8),
537 empty_reduce_block = (4 << 8)
552#if KMP_FAST_REDUCTION_BARRIER
553#define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
554 ((reduction_method) | (barrier_type))
556#define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
557 ((enum _reduction_method)((packed_reduction_method) & (0x0000FF00)))
559#define UNPACK_REDUCTION_BARRIER(packed_reduction_method) \
560 ((enum barrier_type)((packed_reduction_method) & (0x000000FF)))
562#define PACK_REDUCTION_METHOD_AND_BARRIER(reduction_method, barrier_type) \
565#define UNPACK_REDUCTION_METHOD(packed_reduction_method) \
566 (packed_reduction_method)
568#define UNPACK_REDUCTION_BARRIER(packed_reduction_method) (bs_plain_barrier)
571#define TEST_REDUCTION_METHOD(packed_reduction_method, which_reduction_block) \
572 ((UNPACK_REDUCTION_METHOD(packed_reduction_method)) == \
573 (which_reduction_block))
575#if KMP_FAST_REDUCTION_BARRIER
576#define TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER \
577 (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_reduction_barrier))
579#define TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER \
580 (PACK_REDUCTION_METHOD_AND_BARRIER(tree_reduce_block, bs_plain_barrier))
583typedef int PACKED_REDUCTION_METHOD_T;
591#pragma warning(disable : 271 310)
621typedef enum kmp_hw_core_type_t {
622 KMP_HW_CORE_TYPE_UNKNOWN = 0x0,
623#if KMP_ARCH_X86 || KMP_ARCH_X86_64
624 KMP_HW_CORE_TYPE_ATOM = 0x20,
625 KMP_HW_CORE_TYPE_CORE = 0x40,
626 KMP_HW_MAX_NUM_CORE_TYPES = 3,
628 KMP_HW_MAX_NUM_CORE_TYPES = 1,
632#define KMP_HW_MAX_NUM_CORE_EFFS 8
634#define KMP_DEBUG_ASSERT_VALID_HW_TYPE(type) \
635 KMP_DEBUG_ASSERT(type >= (kmp_hw_t)0 && type < KMP_HW_LAST)
636#define KMP_ASSERT_VALID_HW_TYPE(type) \
637 KMP_ASSERT(type >= (kmp_hw_t)0 && type < KMP_HW_LAST)
639#define KMP_FOREACH_HW_TYPE(type) \
640 for (kmp_hw_t type = (kmp_hw_t)0; type < KMP_HW_LAST; \
641 type = (kmp_hw_t)((int)type + 1))
643const char *__kmp_hw_get_keyword(kmp_hw_t type,
bool plural =
false);
644const char *__kmp_hw_get_catalog_string(kmp_hw_t type,
bool plural =
false);
645const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type);
648#if KMP_AFFINITY_SUPPORTED
652#if _MSC_VER < 1600 && KMP_MSVC_COMPAT
653typedef struct GROUP_AFFINITY {
659#if KMP_GROUP_AFFINITY
660extern int __kmp_num_proc_groups;
662static const int __kmp_num_proc_groups = 1;
664typedef DWORD (*kmp_GetActiveProcessorCount_t)(WORD);
665extern kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount;
667typedef WORD (*kmp_GetActiveProcessorGroupCount_t)(void);
668extern kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount;
670typedef BOOL (*kmp_GetThreadGroupAffinity_t)(HANDLE, GROUP_AFFINITY *);
671extern kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity;
673typedef BOOL (*kmp_SetThreadGroupAffinity_t)(HANDLE,
const GROUP_AFFINITY *,
675extern kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity;
679extern hwloc_topology_t __kmp_hwloc_topology;
680extern int __kmp_hwloc_error;
683extern size_t __kmp_affin_mask_size;
684#define KMP_AFFINITY_CAPABLE() (__kmp_affin_mask_size > 0)
685#define KMP_AFFINITY_DISABLE() (__kmp_affin_mask_size = 0)
686#define KMP_AFFINITY_ENABLE(mask_size) (__kmp_affin_mask_size = mask_size)
687#define KMP_CPU_SET_ITERATE(i, mask) \
688 for (i = (mask)->begin(); (int)i != (mask)->end(); i = (mask)->next(i))
689#define KMP_CPU_SET(i, mask) (mask)->set(i)
690#define KMP_CPU_ISSET(i, mask) (mask)->is_set(i)
691#define KMP_CPU_CLR(i, mask) (mask)->clear(i)
692#define KMP_CPU_ZERO(mask) (mask)->zero()
693#define KMP_CPU_COPY(dest, src) (dest)->copy(src)
694#define KMP_CPU_AND(dest, src) (dest)->bitwise_and(src)
695#define KMP_CPU_COMPLEMENT(max_bit_number, mask) (mask)->bitwise_not()
696#define KMP_CPU_UNION(dest, src) (dest)->bitwise_or(src)
697#define KMP_CPU_ALLOC(ptr) (ptr = __kmp_affinity_dispatch->allocate_mask())
698#define KMP_CPU_FREE(ptr) __kmp_affinity_dispatch->deallocate_mask(ptr)
699#define KMP_CPU_ALLOC_ON_STACK(ptr) KMP_CPU_ALLOC(ptr)
700#define KMP_CPU_FREE_FROM_STACK(ptr) KMP_CPU_FREE(ptr)
701#define KMP_CPU_INTERNAL_ALLOC(ptr) KMP_CPU_ALLOC(ptr)
702#define KMP_CPU_INTERNAL_FREE(ptr) KMP_CPU_FREE(ptr)
703#define KMP_CPU_INDEX(arr, i) __kmp_affinity_dispatch->index_mask_array(arr, i)
704#define KMP_CPU_ALLOC_ARRAY(arr, n) \
705 (arr = __kmp_affinity_dispatch->allocate_mask_array(n))
706#define KMP_CPU_FREE_ARRAY(arr, n) \
707 __kmp_affinity_dispatch->deallocate_mask_array(arr)
708#define KMP_CPU_INTERNAL_ALLOC_ARRAY(arr, n) KMP_CPU_ALLOC_ARRAY(arr, n)
709#define KMP_CPU_INTERNAL_FREE_ARRAY(arr, n) KMP_CPU_FREE_ARRAY(arr, n)
710#define __kmp_get_system_affinity(mask, abort_bool) \
711 (mask)->get_system_affinity(abort_bool)
712#define __kmp_set_system_affinity(mask, abort_bool) \
713 (mask)->set_system_affinity(abort_bool)
714#define __kmp_get_proc_group(mask) (mask)->get_proc_group()
720 void *
operator new(
size_t n);
721 void operator delete(
void *p);
722 void *
operator new[](
size_t n);
723 void operator delete[](
void *p);
726 virtual void set(
int i) {}
728 virtual bool is_set(
int i)
const {
return false; }
730 virtual void clear(
int i) {}
732 virtual void zero() {}
734 virtual void copy(
const Mask *src) {}
736 virtual void bitwise_and(
const Mask *rhs) {}
738 virtual void bitwise_or(
const Mask *rhs) {}
740 virtual void bitwise_not() {}
743 virtual int begin()
const {
return 0; }
744 virtual int end()
const {
return 0; }
745 virtual int next(
int previous)
const {
return 0; }
747 virtual int set_process_affinity(
bool abort_on_error)
const {
return -1; }
750 virtual int set_system_affinity(
bool abort_on_error)
const {
return -1; }
752 virtual int get_system_affinity(
bool abort_on_error) {
return -1; }
755 virtual int get_proc_group()
const {
return -1; }
757 void *
operator new(
size_t n);
758 void operator delete(
void *p);
760 virtual ~KMPAffinity() =
default;
762 virtual void determine_capable(
const char *env_var) {}
764 virtual void bind_thread(
int proc) {}
766 virtual Mask *allocate_mask() {
return nullptr; }
767 virtual void deallocate_mask(Mask *m) {}
768 virtual Mask *allocate_mask_array(
int num) {
return nullptr; }
769 virtual void deallocate_mask_array(Mask *m) {}
770 virtual Mask *index_mask_array(Mask *m,
int index) {
return nullptr; }
771 static void pick_api();
772 static void destroy_api();
780 virtual api_type get_api_type()
const {
786 static bool picked_api;
789typedef KMPAffinity::Mask kmp_affin_mask_t;
790extern KMPAffinity *__kmp_affinity_dispatch;
794#define KMP_AFFIN_MASK_PRINT_LEN 1024
808enum affinity_top_method {
809 affinity_top_method_all = 0,
810#if KMP_ARCH_X86 || KMP_ARCH_X86_64
811 affinity_top_method_apicid,
812 affinity_top_method_x2apicid,
813 affinity_top_method_x2apicid_1f,
815 affinity_top_method_cpuinfo,
816#if KMP_GROUP_AFFINITY
817 affinity_top_method_group,
819 affinity_top_method_flat,
821 affinity_top_method_hwloc,
823 affinity_top_method_default
826#define affinity_respect_mask_default (-1)
828extern enum affinity_type __kmp_affinity_type;
829extern kmp_hw_t __kmp_affinity_gran;
830extern int __kmp_affinity_gran_levels;
831extern int __kmp_affinity_dups;
832extern enum affinity_top_method __kmp_affinity_top_method;
833extern int __kmp_affinity_compact;
834extern int __kmp_affinity_offset;
835extern int __kmp_affinity_verbose;
836extern int __kmp_affinity_warnings;
837extern int __kmp_affinity_respect_mask;
838extern char *__kmp_affinity_proclist;
839extern kmp_affin_mask_t *__kmp_affinity_masks;
840extern unsigned __kmp_affinity_num_masks;
841extern void __kmp_affinity_bind_thread(
int which);
843extern kmp_affin_mask_t *__kmp_affin_fullMask;
844extern kmp_affin_mask_t *__kmp_affin_origMask;
845extern char *__kmp_cpuinfo_file;
846extern bool __kmp_affin_reset;
851typedef enum kmp_proc_bind_t {
861typedef struct kmp_nested_proc_bind_t {
862 kmp_proc_bind_t *bind_types;
865} kmp_nested_proc_bind_t;
867extern kmp_nested_proc_bind_t __kmp_nested_proc_bind;
868extern kmp_proc_bind_t __kmp_teams_proc_bind;
870extern int __kmp_display_affinity;
871extern char *__kmp_affinity_format;
872static const size_t KMP_AFFINITY_FORMAT_SIZE = 512;
874extern int __kmp_tool;
875extern char *__kmp_tool_libraries;
878#if KMP_AFFINITY_SUPPORTED
879#define KMP_PLACE_ALL (-1)
880#define KMP_PLACE_UNDEFINED (-2)
882#define KMP_AFFINITY_NON_PROC_BIND \
883 ((__kmp_nested_proc_bind.bind_types[0] == proc_bind_false || \
884 __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) && \
885 (__kmp_affinity_num_masks > 0 || __kmp_affinity_type == affinity_balanced))
888extern int __kmp_affinity_num_places;
890typedef enum kmp_cancel_kind_t {
899typedef struct kmp_hws_item {
904extern kmp_hws_item_t __kmp_hws_socket;
905extern kmp_hws_item_t __kmp_hws_die;
906extern kmp_hws_item_t __kmp_hws_node;
907extern kmp_hws_item_t __kmp_hws_tile;
908extern kmp_hws_item_t __kmp_hws_core;
909extern kmp_hws_item_t __kmp_hws_proc;
910extern int __kmp_hws_requested;
911extern int __kmp_hws_abs_flag;
915#define KMP_PAD(type, sz) \
916 (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
920#define KMP_GTID_DNE (-2)
921#define KMP_GTID_SHUTDOWN (-3)
922#define KMP_GTID_MONITOR (-4)
923#define KMP_GTID_UNKNOWN (-5)
924#define KMP_GTID_MIN (-6)
930typedef uintptr_t omp_uintptr_t;
933 omp_atk_sync_hint = 1,
934 omp_atk_alignment = 2,
936 omp_atk_pool_size = 4,
937 omp_atk_fallback = 5,
940 omp_atk_partition = 8
941} omp_alloctrait_key_t;
946 omp_atv_contended = 3,
947 omp_atv_uncontended = 4,
948 omp_atv_serialized = 5,
949 omp_atv_sequential = omp_atv_serialized,
955 omp_atv_default_mem_fb = 11,
956 omp_atv_null_fb = 12,
957 omp_atv_abort_fb = 13,
958 omp_atv_allocator_fb = 14,
959 omp_atv_environment = 15,
960 omp_atv_nearest = 16,
961 omp_atv_blocked = 17,
962 omp_atv_interleaved = 18
963} omp_alloctrait_value_t;
964#define omp_atv_default ((omp_uintptr_t)-1)
966typedef void *omp_memspace_handle_t;
967extern omp_memspace_handle_t
const omp_default_mem_space;
968extern omp_memspace_handle_t
const omp_large_cap_mem_space;
969extern omp_memspace_handle_t
const omp_const_mem_space;
970extern omp_memspace_handle_t
const omp_high_bw_mem_space;
971extern omp_memspace_handle_t
const omp_low_lat_mem_space;
972extern omp_memspace_handle_t
const llvm_omp_target_host_mem_space;
973extern omp_memspace_handle_t
const llvm_omp_target_shared_mem_space;
974extern omp_memspace_handle_t
const llvm_omp_target_device_mem_space;
977 omp_alloctrait_key_t key;
981typedef void *omp_allocator_handle_t;
982extern omp_allocator_handle_t
const omp_null_allocator;
983extern omp_allocator_handle_t
const omp_default_mem_alloc;
984extern omp_allocator_handle_t
const omp_large_cap_mem_alloc;
985extern omp_allocator_handle_t
const omp_const_mem_alloc;
986extern omp_allocator_handle_t
const omp_high_bw_mem_alloc;
987extern omp_allocator_handle_t
const omp_low_lat_mem_alloc;
988extern omp_allocator_handle_t
const omp_cgroup_mem_alloc;
989extern omp_allocator_handle_t
const omp_pteam_mem_alloc;
990extern omp_allocator_handle_t
const omp_thread_mem_alloc;
991extern omp_allocator_handle_t
const llvm_omp_target_host_mem_alloc;
992extern omp_allocator_handle_t
const llvm_omp_target_shared_mem_alloc;
993extern omp_allocator_handle_t
const llvm_omp_target_device_mem_alloc;
994extern omp_allocator_handle_t
const kmp_max_mem_alloc;
995extern omp_allocator_handle_t __kmp_def_allocator;
1000extern int __kmp_memkind_available;
1002typedef omp_memspace_handle_t kmp_memspace_t;
1004typedef struct kmp_allocator_t {
1005 omp_memspace_handle_t memspace;
1008 omp_alloctrait_value_t fb;
1009 kmp_allocator_t *fb_data;
1010 kmp_uint64 pool_size;
1011 kmp_uint64 pool_used;
1014extern omp_allocator_handle_t __kmpc_init_allocator(
int gtid,
1015 omp_memspace_handle_t,
1017 omp_alloctrait_t traits[]);
1018extern void __kmpc_destroy_allocator(
int gtid, omp_allocator_handle_t al);
1019extern void __kmpc_set_default_allocator(
int gtid, omp_allocator_handle_t al);
1020extern omp_allocator_handle_t __kmpc_get_default_allocator(
int gtid);
1022extern void *__kmpc_alloc(
int gtid,
size_t sz, omp_allocator_handle_t al);
1023extern void *__kmpc_aligned_alloc(
int gtid,
size_t align,
size_t sz,
1024 omp_allocator_handle_t al);
1025extern void *__kmpc_calloc(
int gtid,
size_t nmemb,
size_t sz,
1026 omp_allocator_handle_t al);
1027extern void *__kmpc_realloc(
int gtid,
void *ptr,
size_t sz,
1028 omp_allocator_handle_t al,
1029 omp_allocator_handle_t free_al);
1030extern void __kmpc_free(
int gtid,
void *ptr, omp_allocator_handle_t al);
1032extern void *__kmp_alloc(
int gtid,
size_t align,
size_t sz,
1033 omp_allocator_handle_t al);
1034extern void *__kmp_calloc(
int gtid,
size_t align,
size_t nmemb,
size_t sz,
1035 omp_allocator_handle_t al);
1036extern void *__kmp_realloc(
int gtid,
void *ptr,
size_t sz,
1037 omp_allocator_handle_t al,
1038 omp_allocator_handle_t free_al);
1039extern void ___kmpc_free(
int gtid,
void *ptr, omp_allocator_handle_t al);
1041extern void __kmp_init_memkind();
1042extern void __kmp_fini_memkind();
1043extern void __kmp_init_target_mem();
1047#define KMP_UINT64_MAX \
1048 (~((kmp_uint64)1 << ((sizeof(kmp_uint64) * (1 << 3)) - 1)))
1050#define KMP_MIN_NTH 1
1053#if defined(PTHREAD_THREADS_MAX) && PTHREAD_THREADS_MAX < INT_MAX
1054#define KMP_MAX_NTH PTHREAD_THREADS_MAX
1056#define KMP_MAX_NTH INT_MAX
1060#ifdef PTHREAD_STACK_MIN
1061#define KMP_MIN_STKSIZE PTHREAD_STACK_MIN
1063#define KMP_MIN_STKSIZE ((size_t)(32 * 1024))
1066#define KMP_MAX_STKSIZE (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
1069#define KMP_DEFAULT_STKSIZE ((size_t)(2 * 1024 * 1024))
1070#elif KMP_ARCH_X86_64
1071#define KMP_DEFAULT_STKSIZE ((size_t)(4 * 1024 * 1024))
1072#define KMP_BACKUP_STKSIZE ((size_t)(2 * 1024 * 1024))
1074#define KMP_DEFAULT_STKSIZE ((size_t)(1024 * 1024))
1077#define KMP_DEFAULT_MALLOC_POOL_INCR ((size_t)(1024 * 1024))
1078#define KMP_MIN_MALLOC_POOL_INCR ((size_t)(4 * 1024))
1079#define KMP_MAX_MALLOC_POOL_INCR \
1080 (~((size_t)1 << ((sizeof(size_t) * (1 << 3)) - 1)))
1082#define KMP_MIN_STKOFFSET (0)
1083#define KMP_MAX_STKOFFSET KMP_MAX_STKSIZE
1085#define KMP_DEFAULT_STKOFFSET KMP_MIN_STKOFFSET
1087#define KMP_DEFAULT_STKOFFSET CACHE_LINE
1090#define KMP_MIN_STKPADDING (0)
1091#define KMP_MAX_STKPADDING (2 * 1024 * 1024)
1093#define KMP_BLOCKTIME_MULTIPLIER \
1095#define KMP_MIN_BLOCKTIME (0)
1096#define KMP_MAX_BLOCKTIME \
1100#define KMP_DEFAULT_BLOCKTIME (__kmp_is_hybrid_cpu() ? (0) : (200))
1103#define KMP_DEFAULT_MONITOR_STKSIZE ((size_t)(64 * 1024))
1104#define KMP_MIN_MONITOR_WAKEUPS (1)
1105#define KMP_MAX_MONITOR_WAKEUPS (1000)
1109#define KMP_WAKEUPS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
1110 (((blocktime) == KMP_MAX_BLOCKTIME) ? (monitor_wakeups) \
1111 : ((blocktime) == KMP_MIN_BLOCKTIME) ? KMP_MAX_MONITOR_WAKEUPS \
1112 : ((monitor_wakeups) > (KMP_BLOCKTIME_MULTIPLIER / (blocktime))) \
1113 ? (monitor_wakeups) \
1114 : (KMP_BLOCKTIME_MULTIPLIER) / (blocktime))
1118#define KMP_INTERVALS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
1119 (((blocktime) + (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) - 1) / \
1120 (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)))
1122#define KMP_BLOCKTIME(team, tid) \
1123 (get__bt_set(team, tid) ? get__blocktime(team, tid) : __kmp_dflt_blocktime)
1124#if KMP_OS_UNIX && (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1126extern kmp_uint64 __kmp_ticks_per_msec;
1127#if KMP_COMPILER_ICC || KMP_COMPILER_ICX
1128#define KMP_NOW() ((kmp_uint64)_rdtsc())
1130#define KMP_NOW() __kmp_hardware_timestamp()
1132#define KMP_NOW_MSEC() (KMP_NOW() / __kmp_ticks_per_msec)
1133#define KMP_BLOCKTIME_INTERVAL(team, tid) \
1134 (KMP_BLOCKTIME(team, tid) * __kmp_ticks_per_msec)
1135#define KMP_BLOCKING(goal, count) ((goal) > KMP_NOW())
1138extern kmp_uint64 __kmp_now_nsec();
1139#define KMP_NOW() __kmp_now_nsec()
1140#define KMP_NOW_MSEC() (KMP_NOW() / KMP_USEC_PER_SEC)
1141#define KMP_BLOCKTIME_INTERVAL(team, tid) \
1142 (KMP_BLOCKTIME(team, tid) * KMP_USEC_PER_SEC)
1143#define KMP_BLOCKING(goal, count) ((count) % 1000 != 0 || (goal) > KMP_NOW())
1147#define KMP_MIN_STATSCOLS 40
1148#define KMP_MAX_STATSCOLS 4096
1149#define KMP_DEFAULT_STATSCOLS 80
1151#define KMP_MIN_INTERVAL 0
1152#define KMP_MAX_INTERVAL (INT_MAX - 1)
1153#define KMP_DEFAULT_INTERVAL 0
1155#define KMP_MIN_CHUNK 1
1156#define KMP_MAX_CHUNK (INT_MAX - 1)
1157#define KMP_DEFAULT_CHUNK 1
1159#define KMP_MIN_DISP_NUM_BUFF 1
1160#define KMP_DFLT_DISP_NUM_BUFF 7
1161#define KMP_MAX_DISP_NUM_BUFF 4096
1163#define KMP_MAX_ORDERED 8
1165#define KMP_MAX_FIELDS 32
1167#define KMP_MAX_BRANCH_BITS 31
1169#define KMP_MAX_ACTIVE_LEVELS_LIMIT INT_MAX
1171#define KMP_MAX_DEFAULT_DEVICE_LIMIT INT_MAX
1173#define KMP_MAX_TASK_PRIORITY_LIMIT INT_MAX
1178#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1179#define KMP_TLS_GTID_MIN 5
1181#define KMP_TLS_GTID_MIN INT_MAX
1184#define KMP_MASTER_TID(tid) (0 == (tid))
1185#define KMP_WORKER_TID(tid) (0 != (tid))
1187#define KMP_MASTER_GTID(gtid) (0 == __kmp_tid_from_gtid((gtid)))
1188#define KMP_WORKER_GTID(gtid) (0 != __kmp_tid_from_gtid((gtid)))
1189#define KMP_INITIAL_GTID(gtid) (0 == (gtid))
1193#define TRUE (!FALSE)
1199#define KMP_INIT_WAIT 64U
1200#define KMP_NEXT_WAIT 32U
1202#define KMP_INIT_WAIT 1024U
1203#define KMP_NEXT_WAIT 512U
1206#define KMP_INIT_WAIT 1024U
1207#define KMP_NEXT_WAIT 512U
1208#elif KMP_OS_DRAGONFLY
1210#define KMP_INIT_WAIT 1024U
1211#define KMP_NEXT_WAIT 512U
1214#define KMP_INIT_WAIT 1024U
1215#define KMP_NEXT_WAIT 512U
1218#define KMP_INIT_WAIT 1024U
1219#define KMP_NEXT_WAIT 512U
1222#define KMP_INIT_WAIT 1024U
1223#define KMP_NEXT_WAIT 512U
1226#define KMP_INIT_WAIT 1024U
1227#define KMP_NEXT_WAIT 512U
1230#if KMP_ARCH_X86 || KMP_ARCH_X86_64
1231typedef struct kmp_cpuid {
1238typedef struct kmp_cpuinfo_flags_t {
1241 unsigned hybrid : 1;
1242 unsigned reserved : 29;
1243} kmp_cpuinfo_flags_t;
1245typedef struct kmp_cpuinfo {
1252 kmp_cpuinfo_flags_t flags;
1256 kmp_uint64 frequency;
1257 char name[3 *
sizeof(kmp_cpuid_t)];
1260extern void __kmp_query_cpuid(kmp_cpuinfo_t *p);
1265static inline void __kmp_x86_cpuid(
int leaf,
int subleaf,
struct kmp_cpuid *p) {
1266 __asm__ __volatile__(
"cpuid"
1267 :
"=a"(p->eax),
"=b"(p->ebx),
"=c"(p->ecx),
"=d"(p->edx)
1268 :
"a"(leaf),
"c"(subleaf));
1271static inline void __kmp_load_x87_fpu_control_word(
const kmp_int16 *p) {
1272 __asm__ __volatile__(
"fldcw %0" : :
"m"(*p));
1275static inline void __kmp_store_x87_fpu_control_word(kmp_int16 *p) {
1276 __asm__ __volatile__(
"fstcw %0" :
"=m"(*p));
1278static inline void __kmp_clear_x87_fpu_status_word() {
1281 struct x87_fpu_state {
1290 struct x87_fpu_state fpu_state = {0, 0, 0, 0, 0, 0, 0};
1291 __asm__ __volatile__(
"fstenv %0\n\t"
1292 "andw $0x7f00, %1\n\t"
1294 :
"+m"(fpu_state),
"+m"(fpu_state.sw));
1296 __asm__ __volatile__(
"fnclex");
1300static inline void __kmp_load_mxcsr(
const kmp_uint32 *p) { _mm_setcsr(*p); }
1301static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = _mm_getcsr(); }
1303static inline void __kmp_load_mxcsr(
const kmp_uint32 *p) {}
1304static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = 0; }
1308extern void __kmp_x86_cpuid(
int mode,
int mode2,
struct kmp_cpuid *p);
1309extern void __kmp_load_x87_fpu_control_word(
const kmp_int16 *p);
1310extern void __kmp_store_x87_fpu_control_word(kmp_int16 *p);
1311extern void __kmp_clear_x87_fpu_status_word();
1312static inline void __kmp_load_mxcsr(
const kmp_uint32 *p) { _mm_setcsr(*p); }
1313static inline void __kmp_store_mxcsr(kmp_uint32 *p) { *p = _mm_getcsr(); }
1316#define KMP_X86_MXCSR_MASK 0xffffffc0
1321#if KMP_HAVE_WAITPKG_INTRINSICS
1322#if KMP_HAVE_IMMINTRIN_H
1323#include <immintrin.h>
1324#elif KMP_HAVE_INTRIN_H
1329KMP_ATTRIBUTE_TARGET_WAITPKG
1330static inline int __kmp_tpause(uint32_t hint, uint64_t counter) {
1331#if !KMP_HAVE_WAITPKG_INTRINSICS
1332 uint32_t timeHi = uint32_t(counter >> 32);
1333 uint32_t timeLo = uint32_t(counter & 0xffffffff);
1335 __asm__
volatile(
"#tpause\n.byte 0x66, 0x0F, 0xAE, 0xF1\n"
1341 :
"a"(timeLo),
"d"(timeHi),
"c"(hint)
1345 return _tpause(hint, counter);
1348KMP_ATTRIBUTE_TARGET_WAITPKG
1349static inline void __kmp_umonitor(
void *cacheline) {
1350#if !KMP_HAVE_WAITPKG_INTRINSICS
1351 __asm__
volatile(
"# umonitor\n.byte 0xF3, 0x0F, 0xAE, 0x01 "
1356 _umonitor(cacheline);
1359KMP_ATTRIBUTE_TARGET_WAITPKG
1360static inline int __kmp_umwait(uint32_t hint, uint64_t counter) {
1361#if !KMP_HAVE_WAITPKG_INTRINSICS
1362 uint32_t timeHi = uint32_t(counter >> 32);
1363 uint32_t timeLo = uint32_t(counter & 0xffffffff);
1365 __asm__
volatile(
"#umwait\n.byte 0xF2, 0x0F, 0xAE, 0xF1\n"
1371 :
"a"(timeLo),
"d"(timeHi),
"c"(hint)
1375 return _umwait(hint, counter);
1380#include <pmmintrin.h>
1385__attribute__((target(
"sse3")))
1388__kmp_mm_monitor(
void *cacheline,
unsigned extensions,
unsigned hints) {
1389 _mm_monitor(cacheline, extensions, hints);
1392__attribute__((target(
"sse3")))
1395__kmp_mm_mwait(
unsigned extensions,
unsigned hints) {
1396 _mm_mwait(extensions, hints);
1401extern void __kmp_x86_pause(
void);
1407static inline void __kmp_x86_pause(
void) { _mm_delay_32(300); }
1409static inline void __kmp_x86_pause(
void) { _mm_pause(); }
1411#define KMP_CPU_PAUSE() __kmp_x86_pause()
1413#define KMP_PPC64_PRI_LOW() __asm__ volatile("or 1, 1, 1")
1414#define KMP_PPC64_PRI_MED() __asm__ volatile("or 2, 2, 2")
1415#define KMP_PPC64_PRI_LOC_MB() __asm__ volatile("" : : : "memory")
1416#define KMP_CPU_PAUSE() \
1418 KMP_PPC64_PRI_LOW(); \
1419 KMP_PPC64_PRI_MED(); \
1420 KMP_PPC64_PRI_LOC_MB(); \
1423#define KMP_CPU_PAUSE()
1426#define KMP_INIT_YIELD(count) \
1427 { (count) = __kmp_yield_init; }
1429#define KMP_INIT_BACKOFF(time) \
1430 { (time) = __kmp_pause_init; }
1432#define KMP_OVERSUBSCRIBED \
1433 (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc))
1435#define KMP_TRY_YIELD \
1436 ((__kmp_use_yield == 1) || (__kmp_use_yield == 2 && (KMP_OVERSUBSCRIBED)))
1438#define KMP_TRY_YIELD_OVERSUB \
1439 ((__kmp_use_yield == 1 || __kmp_use_yield == 2) && (KMP_OVERSUBSCRIBED))
1441#define KMP_YIELD(cond) \
1444 if ((cond) && (KMP_TRY_YIELD)) \
1448#define KMP_YIELD_OVERSUB() \
1451 if ((KMP_TRY_YIELD_OVERSUB)) \
1457#define KMP_YIELD_SPIN(count) \
1460 if (KMP_TRY_YIELD) { \
1464 (count) = __kmp_yield_next; \
1475#define KMP_YIELD_OVERSUB_ELSE_SPIN(count, time) \
1477 if (__kmp_tpause_enabled) { \
1478 if (KMP_OVERSUBSCRIBED) { \
1479 __kmp_tpause(0, (time)); \
1481 __kmp_tpause(__kmp_tpause_hint, (time)); \
1486 if ((KMP_TRY_YIELD_OVERSUB)) { \
1488 } else if (__kmp_use_yield == 1) { \
1492 (count) = __kmp_yield_next; \
1498#define KMP_YIELD_OVERSUB_ELSE_SPIN(count, time) \
1501 if ((KMP_TRY_YIELD_OVERSUB)) \
1503 else if (__kmp_use_yield == 1) { \
1507 (count) = __kmp_yield_next; \
1527 ct_ordered_in_parallel,
1535#define IS_CONS_TYPE_ORDERED(ct) ((ct) == ct_pdo_ordered)
1539 enum cons_type type;
1546 int p_top, w_top, s_top;
1547 int stack_size, stack_top;
1548 struct cons_data *stack_data;
1551struct kmp_region_info {
1553 int offset[KMP_MAX_FIELDS];
1554 int length[KMP_MAX_FIELDS];
1561typedef HANDLE kmp_thread_t;
1562typedef DWORD kmp_key_t;
1566typedef pthread_t kmp_thread_t;
1567typedef pthread_key_t kmp_key_t;
1570extern kmp_key_t __kmp_gtid_threadprivate_key;
1572typedef struct kmp_sys_info {
1586typedef int kmp_itt_mark_t;
1587#define KMP_ITT_DEBUG 0
1590typedef kmp_int32 kmp_critical_name[8];
1601typedef void (*
kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid, ...);
1602typedef void (*kmpc_micro_bound)(kmp_int32 *bound_tid, kmp_int32 *bound_nth,
1619typedef void *(*kmpc_ctor)(
void *);
1632typedef void *(*kmpc_cctor)(
void *,
void *);
1642typedef void *(*kmpc_ctor_vec)(
void *, size_t);
1654typedef void *(*kmpc_cctor_vec)(
void *,
void *,
1662typedef struct kmp_cached_addr {
1664 void ***compiler_cache;
1666 struct kmp_cached_addr *next;
1669struct private_data {
1670 struct private_data *next;
1676struct private_common {
1677 struct private_common *next;
1678 struct private_common *link;
1684struct shared_common {
1685 struct shared_common *next;
1686 struct private_data *pod_init;
1706#define KMP_HASH_TABLE_LOG2 9
1707#define KMP_HASH_TABLE_SIZE \
1708 (1 << KMP_HASH_TABLE_LOG2)
1709#define KMP_HASH_SHIFT 3
1710#define KMP_HASH(x) \
1711 ((((kmp_uintptr_t)x) >> KMP_HASH_SHIFT) & (KMP_HASH_TABLE_SIZE - 1))
1713struct common_table {
1714 struct private_common *data[KMP_HASH_TABLE_SIZE];
1717struct shared_table {
1718 struct shared_common *data[KMP_HASH_TABLE_SIZE];
1723#if KMP_USE_HIER_SCHED
1726typedef struct kmp_hier_private_bdata_t {
1727 kmp_int32 num_active;
1729 kmp_uint64 wait_val[2];
1730} kmp_hier_private_bdata_t;
1733typedef struct kmp_sched_flags {
1734 unsigned ordered : 1;
1735 unsigned nomerge : 1;
1736 unsigned contains_last : 1;
1737#if KMP_USE_HIER_SCHED
1738 unsigned use_hier : 1;
1739 unsigned unused : 28;
1741 unsigned unused : 29;
1745KMP_BUILD_ASSERT(
sizeof(kmp_sched_flags_t) == 4);
1747#if KMP_STATIC_STEAL_ENABLED
1748typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
1755 kmp_lock_t *steal_lock;
1762 struct KMP_ALIGN(32) {
1769 kmp_uint32 ordered_lower;
1770 kmp_uint32 ordered_upper;
1772 kmp_int32 last_upper;
1774} dispatch_private_info32_t;
1776typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
1783 kmp_lock_t *steal_lock;
1792 struct KMP_ALIGN(32) {
1799 kmp_uint64 ordered_lower;
1800 kmp_uint64 ordered_upper;
1802 kmp_int64 last_upper;
1804} dispatch_private_info64_t;
1806typedef struct KMP_ALIGN_CACHE dispatch_private_info32 {
1819 kmp_uint32 ordered_lower;
1820 kmp_uint32 ordered_upper;
1822 kmp_int32 last_upper;
1824} dispatch_private_info32_t;
1826typedef struct KMP_ALIGN_CACHE dispatch_private_info64 {
1840 kmp_uint64 ordered_lower;
1841 kmp_uint64 ordered_upper;
1843 kmp_int64 last_upper;
1845} dispatch_private_info64_t;
1848typedef struct KMP_ALIGN_CACHE dispatch_private_info {
1849 union private_info {
1850 dispatch_private_info32_t p32;
1851 dispatch_private_info64_t p64;
1854 kmp_sched_flags_t flags;
1855 std::atomic<kmp_uint32> steal_flag;
1856 kmp_int32 ordered_bumped;
1858 struct dispatch_private_info *next;
1859 kmp_int32 type_size;
1860#if KMP_USE_HIER_SCHED
1864 enum cons_type pushed_ws;
1865} dispatch_private_info_t;
1867typedef struct dispatch_shared_info32 {
1870 volatile kmp_uint32 iteration;
1871 volatile kmp_int32 num_done;
1872 volatile kmp_uint32 ordered_iteration;
1874 kmp_int32 ordered_dummy[KMP_MAX_ORDERED - 1];
1875} dispatch_shared_info32_t;
1877typedef struct dispatch_shared_info64 {
1880 volatile kmp_uint64 iteration;
1881 volatile kmp_int64 num_done;
1882 volatile kmp_uint64 ordered_iteration;
1884 kmp_int64 ordered_dummy[KMP_MAX_ORDERED - 3];
1885} dispatch_shared_info64_t;
1887typedef struct dispatch_shared_info {
1889 dispatch_shared_info32_t s32;
1890 dispatch_shared_info64_t s64;
1892 volatile kmp_uint32 buffer_index;
1893 volatile kmp_int32 doacross_buf_idx;
1894 volatile kmp_uint32 *doacross_flags;
1895 kmp_int32 doacross_num_done;
1896#if KMP_USE_HIER_SCHED
1905} dispatch_shared_info_t;
1907typedef struct kmp_disp {
1909 void (*th_deo_fcn)(
int *gtid,
int *cid,
ident_t *);
1911 void (*th_dxo_fcn)(
int *gtid,
int *cid,
ident_t *);
1913 dispatch_shared_info_t *th_dispatch_sh_current;
1914 dispatch_private_info_t *th_dispatch_pr_current;
1916 dispatch_private_info_t *th_disp_buffer;
1917 kmp_uint32 th_disp_index;
1918 kmp_int32 th_doacross_buf_idx;
1919 volatile kmp_uint32 *th_doacross_flags;
1920 kmp_int64 *th_doacross_info;
1921#if KMP_USE_INTERNODE_ALIGNMENT
1922 char more_padding[INTERNODE_CACHE_LINE];
1930#define KMP_INIT_BARRIER_STATE 0
1931#define KMP_BARRIER_SLEEP_BIT 0
1932#define KMP_BARRIER_UNUSED_BIT 1
1933#define KMP_BARRIER_BUMP_BIT 2
1935#define KMP_BARRIER_SLEEP_STATE (1 << KMP_BARRIER_SLEEP_BIT)
1936#define KMP_BARRIER_UNUSED_STATE (1 << KMP_BARRIER_UNUSED_BIT)
1937#define KMP_BARRIER_STATE_BUMP (1 << KMP_BARRIER_BUMP_BIT)
1939#if (KMP_BARRIER_SLEEP_BIT >= KMP_BARRIER_BUMP_BIT)
1940#error "Barrier sleep bit must be smaller than barrier bump bit"
1942#if (KMP_BARRIER_UNUSED_BIT >= KMP_BARRIER_BUMP_BIT)
1943#error "Barrier unused bit must be smaller than barrier bump bit"
1947#define KMP_BARRIER_NOT_WAITING 0
1948#define KMP_BARRIER_OWN_FLAG \
1950#define KMP_BARRIER_PARENT_FLAG \
1952#define KMP_BARRIER_SWITCH_TO_OWN_FLAG \
1954#define KMP_BARRIER_SWITCHING \
1957#define KMP_NOT_SAFE_TO_REAP \
1959#define KMP_SAFE_TO_REAP 1
1971 bs_plain_barrier = 0,
1973 bs_forkjoin_barrier,
1974#if KMP_FAST_REDUCTION_BARRIER
1975 bs_reduction_barrier,
1981#if !KMP_FAST_REDUCTION_BARRIER
1982#define bs_reduction_barrier bs_plain_barrier
1985typedef enum kmp_bar_pat {
1992 bp_hierarchical_bar = 3,
1997#define KMP_BARRIER_ICV_PUSH 1
2000typedef struct kmp_internal_control {
2001 int serial_nesting_level;
2014 int max_active_levels;
2017 kmp_proc_bind_t proc_bind;
2018 kmp_int32 default_device;
2019 struct kmp_internal_control *next;
2020} kmp_internal_control_t;
2022static inline void copy_icvs(kmp_internal_control_t *dst,
2023 kmp_internal_control_t *src) {
2028typedef struct KMP_ALIGN_CACHE kmp_bstate {
2033 kmp_internal_control_t th_fixed_icvs;
2036 volatile kmp_uint64 b_go;
2037 KMP_ALIGN_CACHE
volatile kmp_uint64
2039 kmp_uint32 *skip_per_level;
2040 kmp_uint32 my_level;
2041 kmp_int32 parent_tid;
2044 struct kmp_bstate *parent_bar;
2046 kmp_uint64 leaf_state;
2048 kmp_uint8 base_leaf_kids;
2049 kmp_uint8 leaf_kids;
2051 kmp_uint8 wait_flag;
2052 kmp_uint8 use_oncore_barrier;
2057 KMP_ALIGN_CACHE kmp_uint b_worker_arrived;
2061union KMP_ALIGN_CACHE kmp_barrier_union {
2063 char b_pad[KMP_PAD(kmp_bstate_t, CACHE_LINE)];
2067typedef union kmp_barrier_union kmp_balign_t;
2070union KMP_ALIGN_CACHE kmp_barrier_team_union {
2072 char b_pad[CACHE_LINE];
2074 kmp_uint64 b_arrived;
2080 kmp_uint b_master_arrived;
2081 kmp_uint b_team_arrived;
2086typedef union kmp_barrier_team_union kmp_balign_team_t;
2093typedef struct kmp_win32_mutex {
2095 CRITICAL_SECTION cs;
2098typedef struct kmp_win32_cond {
2103 kmp_win32_mutex_t waiters_count_lock_;
2110 int wait_generation_count_;
2119union KMP_ALIGN_CACHE kmp_cond_union {
2121 char c_pad[CACHE_LINE];
2122 pthread_cond_t c_cond;
2125typedef union kmp_cond_union kmp_cond_align_t;
2127union KMP_ALIGN_CACHE kmp_mutex_union {
2129 char m_pad[CACHE_LINE];
2130 pthread_mutex_t m_mutex;
2133typedef union kmp_mutex_union kmp_mutex_align_t;
2137typedef struct kmp_desc_base {
2139 size_t ds_stacksize;
2141 kmp_thread_t ds_thread;
2142 volatile int ds_tid;
2145 volatile int ds_alive;
2162typedef union KMP_ALIGN_CACHE kmp_desc {
2164 char ds_pad[KMP_PAD(kmp_desc_base_t, CACHE_LINE)];
2168typedef struct kmp_local {
2169 volatile int this_construct;
2174#if !USE_CMP_XCHG_FOR_BGET
2175#ifdef USE_QUEUING_LOCK_FOR_BGET
2176 kmp_lock_t bget_lock;
2178 kmp_bootstrap_lock_t bget_lock;
2185 PACKED_REDUCTION_METHOD_T
2186 packed_reduction_method;
2191#define KMP_CHECK_UPDATE(a, b) \
2194#define KMP_CHECK_UPDATE_SYNC(a, b) \
2196 TCW_SYNC_PTR((a), (b))
2198#define get__blocktime(xteam, xtid) \
2199 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime)
2200#define get__bt_set(xteam, xtid) \
2201 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set)
2203#define get__bt_intervals(xteam, xtid) \
2204 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals)
2207#define get__dynamic_2(xteam, xtid) \
2208 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.dynamic)
2209#define get__nproc_2(xteam, xtid) \
2210 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nproc)
2211#define get__sched_2(xteam, xtid) \
2212 ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.sched)
2214#define set__blocktime_team(xteam, xtid, xval) \
2215 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime) = \
2219#define set__bt_intervals_team(xteam, xtid, xval) \
2220 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals) = \
2224#define set__bt_set_team(xteam, xtid, xval) \
2225 (((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set) = (xval))
2227#define set__dynamic(xthread, xval) \
2228 (((xthread)->th.th_current_task->td_icvs.dynamic) = (xval))
2229#define get__dynamic(xthread) \
2230 (((xthread)->th.th_current_task->td_icvs.dynamic) ? (FTN_TRUE) : (FTN_FALSE))
2232#define set__nproc(xthread, xval) \
2233 (((xthread)->th.th_current_task->td_icvs.nproc) = (xval))
2235#define set__thread_limit(xthread, xval) \
2236 (((xthread)->th.th_current_task->td_icvs.thread_limit) = (xval))
2238#define set__max_active_levels(xthread, xval) \
2239 (((xthread)->th.th_current_task->td_icvs.max_active_levels) = (xval))
2241#define get__max_active_levels(xthread) \
2242 ((xthread)->th.th_current_task->td_icvs.max_active_levels)
2244#define set__sched(xthread, xval) \
2245 (((xthread)->th.th_current_task->td_icvs.sched) = (xval))
2247#define set__proc_bind(xthread, xval) \
2248 (((xthread)->th.th_current_task->td_icvs.proc_bind) = (xval))
2249#define get__proc_bind(xthread) \
2250 ((xthread)->th.th_current_task->td_icvs.proc_bind)
2254typedef enum kmp_tasking_mode {
2255 tskm_immediate_exec = 0,
2256 tskm_extra_barrier = 1,
2257 tskm_task_teams = 2,
2259} kmp_tasking_mode_t;
2261extern kmp_tasking_mode_t
2263extern int __kmp_task_stealing_constraint;
2264extern int __kmp_enable_task_throttling;
2265extern kmp_int32 __kmp_default_device;
2268extern kmp_int32 __kmp_max_task_priority;
2270extern kmp_uint64 __kmp_taskloop_min_tasks;
2274#define KMP_TASK_TO_TASKDATA(task) (((kmp_taskdata_t *)task) - 1)
2275#define KMP_TASKDATA_TO_TASK(taskdata) (kmp_task_t *)(taskdata + 1)
2279#define KMP_TASKING_ENABLED(task_team) \
2280 (TRUE == TCR_SYNC_4((task_team)->tt.tt_found_tasks))
2288typedef kmp_int32 (*kmp_routine_entry_t)(kmp_int32,
void *);
2290typedef union kmp_cmplrdata {
2301typedef struct kmp_task {
2308 kmp_cmplrdata_t data2;
2317typedef struct kmp_taskgroup {
2318 std::atomic<kmp_int32> count;
2319 std::atomic<kmp_int32>
2321 struct kmp_taskgroup *parent;
2324 kmp_int32 reduce_num_data;
2325 uintptr_t *gomp_data;
2329typedef union kmp_depnode kmp_depnode_t;
2330typedef struct kmp_depnode_list kmp_depnode_list_t;
2331typedef struct kmp_dephash_entry kmp_dephash_entry_t;
2334#define KMP_DEP_IN 0x1
2335#define KMP_DEP_OUT 0x2
2336#define KMP_DEP_INOUT 0x3
2337#define KMP_DEP_MTX 0x4
2338#define KMP_DEP_SET 0x8
2339#define KMP_DEP_ALL 0x80
2341typedef struct kmp_depend_info {
2342 kmp_intptr_t base_addr;
2351 unsigned unused : 3;
2358struct kmp_depnode_list {
2359 kmp_depnode_t *node;
2360 kmp_depnode_list_t *next;
2364#define MAX_MTX_DEPS 4
2366typedef struct kmp_base_depnode {
2367 kmp_depnode_list_t *successors;
2369 kmp_lock_t *mtx_locks[MAX_MTX_DEPS];
2370 kmp_int32 mtx_num_locks;
2372#if KMP_SUPPORT_GRAPH_OUTPUT
2375 std::atomic<kmp_int32> npredecessors;
2376 std::atomic<kmp_int32> nrefs;
2377} kmp_base_depnode_t;
2379union KMP_ALIGN_CACHE kmp_depnode {
2381 char dn_pad[KMP_PAD(kmp_base_depnode_t, CACHE_LINE)];
2382 kmp_base_depnode_t dn;
2385struct kmp_dephash_entry {
2387 kmp_depnode_t *last_out;
2388 kmp_depnode_list_t *last_set;
2389 kmp_depnode_list_t *prev_set;
2390 kmp_uint8 last_flag;
2391 kmp_lock_t *mtx_lock;
2392 kmp_dephash_entry_t *next_in_bucket;
2395typedef struct kmp_dephash {
2396 kmp_dephash_entry_t **buckets;
2398 kmp_depnode_t *last_all;
2400 kmp_uint32 nelements;
2401 kmp_uint32 nconflicts;
2404typedef struct kmp_task_affinity_info {
2405 kmp_intptr_t base_addr;
2410 kmp_int32 reserved : 30;
2412} kmp_task_affinity_info_t;
2414typedef enum kmp_event_type_t {
2415 KMP_EVENT_UNINITIALIZED = 0,
2416 KMP_EVENT_ALLOW_COMPLETION = 1
2420 kmp_event_type_t type;
2421 kmp_tas_lock_t lock;
2427#ifdef BUILD_TIED_TASK_STACK
2430typedef struct kmp_stack_block {
2431 kmp_taskdata_t *sb_block[TASK_STACK_BLOCK_SIZE];
2432 struct kmp_stack_block *sb_next;
2433 struct kmp_stack_block *sb_prev;
2436typedef struct kmp_task_stack {
2437 kmp_stack_block_t ts_first_block;
2438 kmp_taskdata_t **ts_top;
2439 kmp_int32 ts_entries;
2444typedef struct kmp_tasking_flags {
2446 unsigned tiedness : 1;
2448 unsigned merged_if0 : 1;
2450 unsigned destructors_thunk : 1;
2454 unsigned priority_specified : 1;
2456 unsigned detachable : 1;
2457 unsigned hidden_helper : 1;
2458 unsigned reserved : 8;
2461 unsigned tasktype : 1;
2462 unsigned task_serial : 1;
2463 unsigned tasking_ser : 1;
2465 unsigned team_serial : 1;
2469 unsigned started : 1;
2470 unsigned executing : 1;
2471 unsigned complete : 1;
2473 unsigned native : 1;
2474 unsigned reserved31 : 7;
2476} kmp_tasking_flags_t;
2478struct kmp_taskdata {
2479 kmp_int32 td_task_id;
2480 kmp_tasking_flags_t td_flags;
2481 kmp_team_t *td_team;
2482 kmp_info_p *td_alloc_thread;
2484 kmp_taskdata_t *td_parent;
2486 std::atomic<kmp_int32> td_untied_count;
2490 kmp_uint32 td_taskwait_counter;
2491 kmp_int32 td_taskwait_thread;
2492 KMP_ALIGN_CACHE kmp_internal_control_t
2494 KMP_ALIGN_CACHE std::atomic<kmp_int32>
2495 td_allocated_child_tasks;
2497 std::atomic<kmp_int32>
2498 td_incomplete_child_tasks;
2505 kmp_task_team_t *td_task_team;
2506 size_t td_size_alloc;
2507#if defined(KMP_GOMP_COMPAT)
2509 kmp_int32 td_size_loop_bounds;
2511 kmp_taskdata_t *td_last_tied;
2512#if defined(KMP_GOMP_COMPAT)
2514 void (*td_copy_func)(
void *,
void *);
2516 kmp_event_t td_allow_completion_event;
2518 ompt_task_info_t ompt_task_info;
2523KMP_BUILD_ASSERT(
sizeof(kmp_taskdata_t) %
sizeof(
void *) == 0);
2526typedef struct kmp_base_thread_data {
2530 kmp_bootstrap_lock_t td_deque_lock;
2533 kmp_int32 td_deque_size;
2534 kmp_uint32 td_deque_head;
2535 kmp_uint32 td_deque_tail;
2536 kmp_int32 td_deque_ntasks;
2538 kmp_int32 td_deque_last_stolen;
2539#ifdef BUILD_TIED_TASK_STACK
2540 kmp_task_stack_t td_susp_tied_tasks;
2543} kmp_base_thread_data_t;
2545#define TASK_DEQUE_BITS 8
2546#define INITIAL_TASK_DEQUE_SIZE (1 << TASK_DEQUE_BITS)
2548#define TASK_DEQUE_SIZE(td) ((td).td_deque_size)
2549#define TASK_DEQUE_MASK(td) ((td).td_deque_size - 1)
2551typedef union KMP_ALIGN_CACHE kmp_thread_data {
2552 kmp_base_thread_data_t td;
2554 char td_pad[KMP_PAD(kmp_base_thread_data_t, CACHE_LINE)];
2557typedef struct kmp_task_pri {
2558 kmp_thread_data_t td;
2564typedef struct kmp_base_task_team {
2565 kmp_bootstrap_lock_t
2570 kmp_bootstrap_lock_t tt_task_pri_lock;
2571 kmp_task_pri_t *tt_task_pri_list;
2573 kmp_task_team_t *tt_next;
2577 kmp_int32 tt_found_tasks;
2581 kmp_int32 tt_max_threads;
2582 kmp_int32 tt_found_proxy_tasks;
2583 kmp_int32 tt_untied_task_encountered;
2584 std::atomic<kmp_int32> tt_num_task_pri;
2587 kmp_int32 tt_hidden_helper_task_encountered;
2590 std::atomic<kmp_int32> tt_unfinished_threads;
2595} kmp_base_task_team_t;
2597union KMP_ALIGN_CACHE kmp_task_team {
2598 kmp_base_task_team_t tt;
2600 char tt_pad[KMP_PAD(kmp_base_task_team_t, CACHE_LINE)];
2603#if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
2606typedef struct kmp_free_list {
2607 void *th_free_list_self;
2608 void *th_free_list_sync;
2610 void *th_free_list_other;
2614#if KMP_NESTED_HOT_TEAMS
2617typedef struct kmp_hot_team_ptr {
2618 kmp_team_p *hot_team;
2619 kmp_int32 hot_team_nth;
2620} kmp_hot_team_ptr_t;
2622typedef struct kmp_teams_size {
2638typedef struct kmp_cg_root {
2639 kmp_info_p *cg_root;
2642 kmp_int32 cg_thread_limit;
2643 kmp_int32 cg_nthreads;
2644 struct kmp_cg_root *up;
2649typedef struct KMP_ALIGN_CACHE kmp_base_info {
2655 kmp_team_p *th_team;
2656 kmp_root_p *th_root;
2657 kmp_info_p *th_next_pool;
2658 kmp_disp_t *th_dispatch;
2664 kmp_info_p *th_team_master;
2665 int th_team_serialized;
2666 microtask_t th_teams_microtask;
2675 int th_team_bt_intervals;
2678 kmp_uint64 th_team_bt_intervals;
2681#if KMP_AFFINITY_SUPPORTED
2682 kmp_affin_mask_t *th_affin_mask;
2684 omp_allocator_handle_t th_def_allocator;
2688#if KMP_NESTED_HOT_TEAMS
2689 kmp_hot_team_ptr_t *th_hot_teams;
2695#if KMP_AFFINITY_SUPPORTED
2696 int th_current_place;
2702 int th_prev_num_threads;
2704 kmp_uint64 th_bar_arrive_time;
2705 kmp_uint64 th_bar_min_time;
2706 kmp_uint64 th_frame_time;
2708 kmp_local_t th_local;
2709 struct private_common *th_pri_head;
2714 KMP_ALIGN_CACHE kmp_team_p
2718 ompt_thread_info_t ompt_thread_info;
2722 struct common_table *th_pri_common;
2724 volatile kmp_uint32 th_spin_here;
2727 volatile void *th_sleep_loc;
2728 flag_type th_sleep_loc_type;
2735 kmp_task_team_t *th_task_team;
2736 kmp_taskdata_t *th_current_task;
2737 kmp_uint8 th_task_state;
2738 kmp_uint8 *th_task_state_memo_stack;
2740 kmp_uint32 th_task_state_top;
2741 kmp_uint32 th_task_state_stack_sz;
2742 kmp_uint32 th_reap_state;
2747 kmp_uint8 th_active_in_pool;
2749 std::atomic<kmp_uint32> th_used_in_team;
2752 struct cons_header *th_cons;
2753#if KMP_USE_HIER_SCHED
2755 kmp_hier_private_bdata_t *th_hier_bar_data;
2759 KMP_ALIGN_CACHE kmp_balign_t th_bar[bs_last_barrier];
2761 KMP_ALIGN_CACHE
volatile kmp_int32
2764#if (USE_FAST_MEMORY == 3) || (USE_FAST_MEMORY == 5)
2766 kmp_free_list_t th_free_lists[NUM_LISTS];
2771 kmp_win32_cond_t th_suspend_cv;
2772 kmp_win32_mutex_t th_suspend_mx;
2773 std::atomic<int> th_suspend_init;
2776 kmp_cond_align_t th_suspend_cv;
2777 kmp_mutex_align_t th_suspend_mx;
2778 std::atomic<int> th_suspend_init_count;
2782 kmp_itt_mark_t th_itt_mark_single;
2785#if KMP_STATS_ENABLED
2786 kmp_stats_list *th_stats;
2789 std::atomic<bool> th_blocking;
2791 kmp_cg_root_t *th_cg_roots;
2794typedef union KMP_ALIGN_CACHE kmp_info {
2796 char th_pad[KMP_PAD(kmp_base_info_t, CACHE_LINE)];
2802typedef struct kmp_base_data {
2803 volatile kmp_uint32 t_value;
2806typedef union KMP_ALIGN_CACHE kmp_sleep_team {
2808 char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
2812typedef union KMP_ALIGN_CACHE kmp_ordered_team {
2814 char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
2816} kmp_ordered_team_t;
2818typedef int (*launch_t)(
int gtid);
2821#define KMP_MIN_MALLOC_ARGV_ENTRIES 100
2827#if KMP_ARCH_X86 || KMP_ARCH_X86_64
2828#define KMP_INLINE_ARGV_BYTES \
2830 ((3 * KMP_PTR_SKIP + 2 * sizeof(int) + 2 * sizeof(kmp_int8) + \
2831 sizeof(kmp_int16) + sizeof(kmp_uint32)) % \
2834#define KMP_INLINE_ARGV_BYTES \
2835 (2 * CACHE_LINE - ((3 * KMP_PTR_SKIP + 2 * sizeof(int)) % CACHE_LINE))
2837#define KMP_INLINE_ARGV_ENTRIES (int)(KMP_INLINE_ARGV_BYTES / KMP_PTR_SKIP)
2839typedef struct KMP_ALIGN_CACHE kmp_base_team {
2842 KMP_ALIGN_CACHE kmp_ordered_team_t t_ordered;
2843 kmp_balign_team_t t_bar[bs_last_barrier];
2844 std::atomic<int> t_construct;
2845 char pad[
sizeof(kmp_lock_t)];
2848 std::atomic<void *> t_tg_reduce_data[2];
2849 std::atomic<int> t_tg_fini_counter[2];
2853 KMP_ALIGN_CACHE
int t_master_tid;
2854 int t_master_this_cons;
2858 kmp_team_p *t_parent;
2859 kmp_team_p *t_next_pool;
2860 kmp_disp_t *t_dispatch;
2861 kmp_task_team_t *t_task_team[2];
2862 kmp_proc_bind_t t_proc_bind;
2864 kmp_uint64 t_region_time;
2869 KMP_ALIGN_CACHE
void **t_argv;
2876 ompt_team_info_t ompt_team_info;
2877 ompt_lw_taskteam_t *ompt_serialized_team_info;
2880#if KMP_ARCH_X86 || KMP_ARCH_X86_64
2881 kmp_int8 t_fp_control_saved;
2883 kmp_int16 t_x87_fpu_control_word;
2887 void *t_inline_argv[KMP_INLINE_ARGV_ENTRIES];
2889 KMP_ALIGN_CACHE kmp_info_t **t_threads;
2891 *t_implicit_task_taskdata;
2894 KMP_ALIGN_CACHE
int t_max_argc;
2897 dispatch_shared_info_t *t_disp_buffer;
2900 kmp_r_sched_t t_sched;
2901#if KMP_AFFINITY_SUPPORTED
2905 int t_display_affinity;
2908 omp_allocator_handle_t t_def_allocator;
2911#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
2916 char dummy_padding[1024];
2919 KMP_ALIGN_CACHE kmp_internal_control_t *t_control_stack_top;
2922 std::atomic<kmp_int32> t_cancel_request;
2923 int t_master_active;
2924 void *t_copypriv_data;
2926 std::atomic<kmp_uint32> t_copyin_counter;
2931 distributedBarrier *b;
2934union KMP_ALIGN_CACHE kmp_team {
2937 char t_pad[KMP_PAD(kmp_base_team_t, CACHE_LINE)];
2940typedef union KMP_ALIGN_CACHE kmp_time_global {
2942 char dt_pad[KMP_PAD(kmp_base_data_t, CACHE_LINE)];
2946typedef struct kmp_base_global {
2948 kmp_time_global_t g_time;
2951 volatile int g_abort;
2952 volatile int g_done;
2955 enum dynamic_mode g_dynamic_mode;
2958typedef union KMP_ALIGN_CACHE kmp_global {
2959 kmp_base_global_t g;
2961 char g_pad[KMP_PAD(kmp_base_global_t, CACHE_LINE)];
2964typedef struct kmp_base_root {
2969 volatile int r_active;
2971 std::atomic<int> r_in_parallel;
2973 kmp_team_t *r_root_team;
2974 kmp_team_t *r_hot_team;
2975 kmp_info_t *r_uber_thread;
2976 kmp_lock_t r_begin_lock;
2977 volatile int r_begin;
2979#if KMP_AFFINITY_SUPPORTED
2980 int r_affinity_assigned;
2984typedef union KMP_ALIGN_CACHE kmp_root {
2987 char r_pad[KMP_PAD(kmp_base_root_t, CACHE_LINE)];
2990struct fortran_inx_info {
2998typedef struct kmp_old_threads_list_t {
2999 kmp_info_t **threads;
3000 struct kmp_old_threads_list_t *next;
3001} kmp_old_threads_list_t;
3005extern int __kmp_settings;
3006extern int __kmp_duplicate_library_ok;
3008extern int __kmp_forkjoin_frames;
3009extern int __kmp_forkjoin_frames_mode;
3011extern PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method;
3012extern int __kmp_determ_red;
3015extern int kmp_a_debug;
3016extern int kmp_b_debug;
3017extern int kmp_c_debug;
3018extern int kmp_d_debug;
3019extern int kmp_e_debug;
3020extern int kmp_f_debug;
3024#define KMP_DEBUG_BUF_LINES_INIT 512
3025#define KMP_DEBUG_BUF_LINES_MIN 1
3027#define KMP_DEBUG_BUF_CHARS_INIT 128
3028#define KMP_DEBUG_BUF_CHARS_MIN 2
3032extern int __kmp_debug_buf_lines;
3034 __kmp_debug_buf_chars;
3035extern int __kmp_debug_buf_atomic;
3038extern char *__kmp_debug_buffer;
3039extern std::atomic<int> __kmp_debug_count;
3041extern int __kmp_debug_buf_warn_chars;
3046extern int __kmp_par_range;
3048#define KMP_PAR_RANGE_ROUTINE_LEN 1024
3049extern char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN];
3050#define KMP_PAR_RANGE_FILENAME_LEN 1024
3051extern char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN];
3052extern int __kmp_par_range_lb;
3053extern int __kmp_par_range_ub;
3059extern int __kmp_storage_map_verbose;
3061extern int __kmp_storage_map_verbose_specified;
3063#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3064extern kmp_cpuinfo_t __kmp_cpuinfo;
3065static inline bool __kmp_is_hybrid_cpu() {
return __kmp_cpuinfo.flags.hybrid; }
3066#elif KMP_OS_DARWIN && KMP_ARCH_AARCH64
3067static inline bool __kmp_is_hybrid_cpu() {
return true; }
3069static inline bool __kmp_is_hybrid_cpu() {
return false; }
3072extern volatile int __kmp_init_serial;
3073extern volatile int __kmp_init_gtid;
3074extern volatile int __kmp_init_common;
3075extern volatile int __kmp_need_register_serial;
3076extern volatile int __kmp_init_middle;
3077extern volatile int __kmp_init_parallel;
3079extern volatile int __kmp_init_monitor;
3081extern volatile int __kmp_init_user_locks;
3082extern volatile int __kmp_init_hidden_helper_threads;
3083extern int __kmp_init_counter;
3084extern int __kmp_root_counter;
3085extern int __kmp_version;
3088extern kmp_cached_addr_t *__kmp_threadpriv_cache_list;
3091extern kmp_uint32 __kmp_barrier_gather_bb_dflt;
3092extern kmp_uint32 __kmp_barrier_release_bb_dflt;
3093extern kmp_bar_pat_e __kmp_barrier_gather_pat_dflt;
3094extern kmp_bar_pat_e __kmp_barrier_release_pat_dflt;
3095extern kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier];
3096extern kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier];
3097extern kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier];
3098extern kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier];
3099extern char const *__kmp_barrier_branch_bit_env_name[bs_last_barrier];
3100extern char const *__kmp_barrier_pattern_env_name[bs_last_barrier];
3101extern char const *__kmp_barrier_type_name[bs_last_barrier];
3102extern char const *__kmp_barrier_pattern_name[bp_last_bar];
3105extern kmp_bootstrap_lock_t __kmp_initz_lock;
3106extern kmp_bootstrap_lock_t __kmp_forkjoin_lock;
3107extern kmp_bootstrap_lock_t __kmp_task_team_lock;
3108extern kmp_bootstrap_lock_t
3111extern kmp_bootstrap_lock_t
3114extern kmp_bootstrap_lock_t
3115 __kmp_tp_cached_lock;
3118extern kmp_lock_t __kmp_global_lock;
3119extern kmp_queuing_lock_t __kmp_dispatch_lock;
3120extern kmp_lock_t __kmp_debug_lock;
3122extern enum library_type __kmp_library;
3128extern int __kmp_chunk;
3129extern int __kmp_force_monotonic;
3131extern size_t __kmp_stksize;
3133extern size_t __kmp_monitor_stksize;
3135extern size_t __kmp_stkoffset;
3136extern int __kmp_stkpadding;
3139 __kmp_malloc_pool_incr;
3140extern int __kmp_env_stksize;
3141extern int __kmp_env_blocktime;
3142extern int __kmp_env_checks;
3143extern int __kmp_env_consistency_check;
3144extern int __kmp_generate_warnings;
3145extern int __kmp_reserve_warn;
3148extern int __kmp_suspend_count;
3151extern kmp_int32 __kmp_use_yield;
3152extern kmp_int32 __kmp_use_yield_exp_set;
3153extern kmp_uint32 __kmp_yield_init;
3154extern kmp_uint32 __kmp_yield_next;
3155extern kmp_uint64 __kmp_pause_init;
3158extern int __kmp_allThreadsSpecified;
3160extern size_t __kmp_align_alloc;
3162extern int __kmp_xproc;
3163extern int __kmp_avail_proc;
3164extern size_t __kmp_sys_min_stksize;
3165extern int __kmp_sys_max_nth;
3167extern int __kmp_max_nth;
3169extern int __kmp_cg_max_nth;
3170extern int __kmp_teams_max_nth;
3171extern int __kmp_threads_capacity;
3173extern int __kmp_dflt_team_nth;
3175extern int __kmp_dflt_team_nth_ub;
3177extern int __kmp_tp_capacity;
3179extern int __kmp_tp_cached;
3181extern int __kmp_dflt_blocktime;
3183extern bool __kmp_wpolicy_passive;
3186 __kmp_monitor_wakeups;
3187extern int __kmp_bt_intervals;
3190#ifdef KMP_ADJUST_BLOCKTIME
3191extern int __kmp_zero_bt;
3193#ifdef KMP_DFLT_NTH_CORES
3194extern int __kmp_ncores;
3197extern int __kmp_abort_delay;
3199extern int __kmp_need_register_atfork_specified;
3200extern int __kmp_need_register_atfork;
3202extern int __kmp_gtid_mode;
3210 __kmp_adjust_gtid_mode;
3211#ifdef KMP_TDATA_GTID
3212extern KMP_THREAD_LOCAL
int __kmp_gtid;
3214extern int __kmp_tls_gtid_min;
3215extern int __kmp_foreign_tp;
3216#if KMP_ARCH_X86 || KMP_ARCH_X86_64
3217extern int __kmp_inherit_fp_control;
3218extern kmp_int16 __kmp_init_x87_fpu_control_word;
3219extern kmp_uint32 __kmp_init_mxcsr;
3224extern int __kmp_dflt_max_active_levels;
3227extern bool __kmp_dflt_max_active_levels_set;
3228extern int __kmp_dispatch_num_buffers;
3230#if KMP_NESTED_HOT_TEAMS
3231extern int __kmp_hot_teams_mode;
3232extern int __kmp_hot_teams_max_level;
3236extern enum clock_function_type __kmp_clock_function;
3237extern int __kmp_clock_function_param;
3240#if KMP_MIC_SUPPORTED
3241extern enum mic_type __kmp_mic_type;
3244#ifdef USE_LOAD_BALANCE
3245extern double __kmp_load_balance_interval;
3249typedef struct kmp_nested_nthreads_t {
3253} kmp_nested_nthreads_t;
3255extern kmp_nested_nthreads_t __kmp_nested_nth;
3257#if KMP_USE_ADAPTIVE_LOCKS
3260struct kmp_adaptive_backoff_params_t {
3262 kmp_uint32 max_soft_retries;
3265 kmp_uint32 max_badness;
3268extern kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params;
3270#if KMP_DEBUG_ADAPTIVE_LOCKS
3271extern const char *__kmp_speculative_statsfile;
3276extern int __kmp_display_env;
3277extern int __kmp_display_env_verbose;
3278extern int __kmp_omp_cancellation;
3279extern int __kmp_nteams;
3280extern int __kmp_teams_thread_limit;
3286extern kmp_info_t **__kmp_threads;
3288extern kmp_old_threads_list_t *__kmp_old_threads_list;
3290extern volatile kmp_team_t *__kmp_team_pool;
3291extern volatile kmp_info_t *__kmp_thread_pool;
3292extern kmp_info_t *__kmp_thread_pool_insert_pt;
3295extern volatile int __kmp_nth;
3298extern volatile int __kmp_all_nth;
3299extern std::atomic<int> __kmp_thread_pool_active_nth;
3301extern kmp_root_t **__kmp_root;
3305#define __kmp_get_gtid() __kmp_get_global_thread_id()
3306#define __kmp_entry_gtid() __kmp_get_global_thread_id_reg()
3307#define __kmp_get_tid() (__kmp_tid_from_gtid(__kmp_get_gtid()))
3308#define __kmp_get_team() (__kmp_threads[(__kmp_get_gtid())]->th.th_team)
3309#define __kmp_get_thread() (__kmp_thread_from_gtid(__kmp_get_gtid()))
3314#define __kmp_get_team_num_threads(gtid) \
3315 (__kmp_threads[(gtid)]->th.th_team->t.t_nproc)
3317static inline bool KMP_UBER_GTID(
int gtid) {
3318 KMP_DEBUG_ASSERT(gtid >= KMP_GTID_MIN);
3319 KMP_DEBUG_ASSERT(gtid < __kmp_threads_capacity);
3320 return (gtid >= 0 && __kmp_root[gtid] && __kmp_threads[gtid] &&
3321 __kmp_threads[gtid] == __kmp_root[gtid]->r.r_uber_thread);
3324static inline int __kmp_tid_from_gtid(
int gtid) {
3325 KMP_DEBUG_ASSERT(gtid >= 0);
3326 return __kmp_threads[gtid]->th.th_info.ds.ds_tid;
3329static inline int __kmp_gtid_from_tid(
int tid,
const kmp_team_t *team) {
3330 KMP_DEBUG_ASSERT(tid >= 0 && team);
3331 return team->t.t_threads[tid]->th.th_info.ds.ds_gtid;
3334static inline int __kmp_gtid_from_thread(
const kmp_info_t *thr) {
3335 KMP_DEBUG_ASSERT(thr);
3336 return thr->th.th_info.ds.ds_gtid;
3339static inline kmp_info_t *__kmp_thread_from_gtid(
int gtid) {
3340 KMP_DEBUG_ASSERT(gtid >= 0);
3341 return __kmp_threads[gtid];
3344static inline kmp_team_t *__kmp_team_from_gtid(
int gtid) {
3345 KMP_DEBUG_ASSERT(gtid >= 0);
3346 return __kmp_threads[gtid]->th.th_team;
3349static inline void __kmp_assert_valid_gtid(kmp_int32 gtid) {
3350 if (UNLIKELY(gtid < 0 || gtid >= __kmp_threads_capacity))
3351 KMP_FATAL(ThreadIdentInvalid);
3354#if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
3355extern int __kmp_user_level_mwait;
3356extern int __kmp_umwait_enabled;
3357extern int __kmp_mwait_enabled;
3358extern int __kmp_mwait_hints;
3362extern int __kmp_waitpkg_enabled;
3363extern int __kmp_tpause_state;
3364extern int __kmp_tpause_hint;
3365extern int __kmp_tpause_enabled;
3370extern kmp_global_t __kmp_global;
3372extern kmp_info_t __kmp_monitor;
3374extern std::atomic<kmp_int32> __kmp_team_counter;
3376extern std::atomic<kmp_int32> __kmp_task_counter;
3379#define _KMP_GEN_ID(counter) \
3380 (__kmp_debugging ? KMP_ATOMIC_INC(&counter) + 1 : ~0)
3382#define _KMP_GEN_ID(counter) (~0)
3385#define KMP_GEN_TASK_ID() _KMP_GEN_ID(__kmp_task_counter)
3386#define KMP_GEN_TEAM_ID() _KMP_GEN_ID(__kmp_team_counter)
3390extern void __kmp_print_storage_map_gtid(
int gtid,
void *p1,
void *p2,
3391 size_t size,
char const *format, ...);
3393extern void __kmp_serial_initialize(
void);
3394extern void __kmp_middle_initialize(
void);
3395extern void __kmp_parallel_initialize(
void);
3397extern void __kmp_internal_begin(
void);
3398extern void __kmp_internal_end_library(
int gtid);
3399extern void __kmp_internal_end_thread(
int gtid);
3400extern void __kmp_internal_end_atexit(
void);
3401extern void __kmp_internal_end_dtor(
void);
3402extern void __kmp_internal_end_dest(
void *);
3404extern int __kmp_register_root(
int initial_thread);
3405extern void __kmp_unregister_root(
int gtid);
3406extern void __kmp_unregister_library(
void);
3408extern int __kmp_ignore_mppbeg(
void);
3409extern int __kmp_ignore_mppend(
void);
3411extern int __kmp_enter_single(
int gtid,
ident_t *id_ref,
int push_ws);
3412extern void __kmp_exit_single(
int gtid);
3414extern void __kmp_parallel_deo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref);
3415extern void __kmp_parallel_dxo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref);
3417#ifdef USE_LOAD_BALANCE
3418extern int __kmp_get_load_balance(
int);
3421extern int __kmp_get_global_thread_id(
void);
3422extern int __kmp_get_global_thread_id_reg(
void);
3423extern void __kmp_exit_thread(
int exit_status);
3424extern void __kmp_abort(
char const *format, ...);
3425extern void __kmp_abort_thread(
void);
3426KMP_NORETURN
extern void __kmp_abort_process(
void);
3427extern void __kmp_warn(
char const *format, ...);
3429extern void __kmp_set_num_threads(
int new_nth,
int gtid);
3433static inline kmp_info_t *__kmp_entry_thread() {
3434 int gtid = __kmp_entry_gtid();
3436 return __kmp_threads[gtid];
3439extern void __kmp_set_max_active_levels(
int gtid,
int new_max_active_levels);
3440extern int __kmp_get_max_active_levels(
int gtid);
3441extern int __kmp_get_ancestor_thread_num(
int gtid,
int level);
3442extern int __kmp_get_team_size(
int gtid,
int level);
3443extern void __kmp_set_schedule(
int gtid, kmp_sched_t new_sched,
int chunk);
3444extern void __kmp_get_schedule(
int gtid, kmp_sched_t *sched,
int *chunk);
3446extern unsigned short __kmp_get_random(kmp_info_t *thread);
3447extern void __kmp_init_random(kmp_info_t *thread);
3449extern kmp_r_sched_t __kmp_get_schedule_global(
void);
3450extern void __kmp_adjust_num_threads(
int new_nproc);
3451extern void __kmp_check_stksize(
size_t *val);
3453extern void *___kmp_allocate(
size_t size KMP_SRC_LOC_DECL);
3454extern void *___kmp_page_allocate(
size_t size KMP_SRC_LOC_DECL);
3455extern void ___kmp_free(
void *ptr KMP_SRC_LOC_DECL);
3456#define __kmp_allocate(size) ___kmp_allocate((size)KMP_SRC_LOC_CURR)
3457#define __kmp_page_allocate(size) ___kmp_page_allocate((size)KMP_SRC_LOC_CURR)
3458#define __kmp_free(ptr) ___kmp_free((ptr)KMP_SRC_LOC_CURR)
3461extern void *___kmp_fast_allocate(kmp_info_t *this_thr,
3462 size_t size KMP_SRC_LOC_DECL);
3463extern void ___kmp_fast_free(kmp_info_t *this_thr,
void *ptr KMP_SRC_LOC_DECL);
3464extern void __kmp_free_fast_memory(kmp_info_t *this_thr);
3465extern void __kmp_initialize_fast_memory(kmp_info_t *this_thr);
3466#define __kmp_fast_allocate(this_thr, size) \
3467 ___kmp_fast_allocate((this_thr), (size)KMP_SRC_LOC_CURR)
3468#define __kmp_fast_free(this_thr, ptr) \
3469 ___kmp_fast_free((this_thr), (ptr)KMP_SRC_LOC_CURR)
3472extern void *___kmp_thread_malloc(kmp_info_t *th,
size_t size KMP_SRC_LOC_DECL);
3473extern void *___kmp_thread_calloc(kmp_info_t *th,
size_t nelem,
3474 size_t elsize KMP_SRC_LOC_DECL);
3475extern void *___kmp_thread_realloc(kmp_info_t *th,
void *ptr,
3476 size_t size KMP_SRC_LOC_DECL);
3477extern void ___kmp_thread_free(kmp_info_t *th,
void *ptr KMP_SRC_LOC_DECL);
3478#define __kmp_thread_malloc(th, size) \
3479 ___kmp_thread_malloc((th), (size)KMP_SRC_LOC_CURR)
3480#define __kmp_thread_calloc(th, nelem, elsize) \
3481 ___kmp_thread_calloc((th), (nelem), (elsize)KMP_SRC_LOC_CURR)
3482#define __kmp_thread_realloc(th, ptr, size) \
3483 ___kmp_thread_realloc((th), (ptr), (size)KMP_SRC_LOC_CURR)
3484#define __kmp_thread_free(th, ptr) \
3485 ___kmp_thread_free((th), (ptr)KMP_SRC_LOC_CURR)
3487extern void __kmp_push_num_threads(
ident_t *loc,
int gtid,
int num_threads);
3489extern void __kmp_push_proc_bind(
ident_t *loc,
int gtid,
3490 kmp_proc_bind_t proc_bind);
3491extern void __kmp_push_num_teams(
ident_t *loc,
int gtid,
int num_teams,
3493extern void __kmp_push_num_teams_51(
ident_t *loc,
int gtid,
int num_teams_lb,
3494 int num_teams_ub,
int num_threads);
3496extern void __kmp_yield();
3500 kmp_int32 ub, kmp_int32 st, kmp_int32 chunk);
3503 kmp_uint32 ub, kmp_int32 st,
3507 kmp_int64 ub, kmp_int64 st, kmp_int64 chunk);
3510 kmp_uint64 ub, kmp_int64 st,
3514 kmp_int32 *p_last, kmp_int32 *p_lb,
3515 kmp_int32 *p_ub, kmp_int32 *p_st);
3517 kmp_int32 *p_last, kmp_uint32 *p_lb,
3518 kmp_uint32 *p_ub, kmp_int32 *p_st);
3520 kmp_int32 *p_last, kmp_int64 *p_lb,
3521 kmp_int64 *p_ub, kmp_int64 *p_st);
3523 kmp_int32 *p_last, kmp_uint64 *p_lb,
3524 kmp_uint64 *p_ub, kmp_int64 *p_st);
3531#ifdef KMP_GOMP_COMPAT
3533extern void __kmp_aux_dispatch_init_4(
ident_t *loc, kmp_int32 gtid,
3535 kmp_int32 ub, kmp_int32 st,
3536 kmp_int32 chunk,
int push_ws);
3537extern void __kmp_aux_dispatch_init_4u(
ident_t *loc, kmp_int32 gtid,
3539 kmp_uint32 ub, kmp_int32 st,
3540 kmp_int32 chunk,
int push_ws);
3541extern void __kmp_aux_dispatch_init_8(
ident_t *loc, kmp_int32 gtid,
3543 kmp_int64 ub, kmp_int64 st,
3544 kmp_int64 chunk,
int push_ws);
3545extern void __kmp_aux_dispatch_init_8u(
ident_t *loc, kmp_int32 gtid,
3547 kmp_uint64 ub, kmp_int64 st,
3548 kmp_int64 chunk,
int push_ws);
3549extern void __kmp_aux_dispatch_fini_chunk_4(
ident_t *loc, kmp_int32 gtid);
3550extern void __kmp_aux_dispatch_fini_chunk_8(
ident_t *loc, kmp_int32 gtid);
3551extern void __kmp_aux_dispatch_fini_chunk_4u(
ident_t *loc, kmp_int32 gtid);
3552extern void __kmp_aux_dispatch_fini_chunk_8u(
ident_t *loc, kmp_int32 gtid);
3556extern kmp_uint32 __kmp_eq_4(kmp_uint32 value, kmp_uint32 checker);
3557extern kmp_uint32 __kmp_neq_4(kmp_uint32 value, kmp_uint32 checker);
3558extern kmp_uint32 __kmp_lt_4(kmp_uint32 value, kmp_uint32 checker);
3559extern kmp_uint32 __kmp_ge_4(kmp_uint32 value, kmp_uint32 checker);
3560extern kmp_uint32 __kmp_le_4(kmp_uint32 value, kmp_uint32 checker);
3561extern kmp_uint32 __kmp_wait_4(kmp_uint32
volatile *spinner, kmp_uint32 checker,
3562 kmp_uint32 (*pred)(kmp_uint32, kmp_uint32),
3564extern void __kmp_wait_4_ptr(
void *spinner, kmp_uint32 checker,
3565 kmp_uint32 (*pred)(
void *, kmp_uint32),
void *obj);
3567extern void __kmp_wait_64(kmp_info_t *this_thr, kmp_flag_64<> *flag,
3574extern void __kmp_release_64(kmp_flag_64<> *flag);
3576extern void __kmp_infinite_loop(
void);
3578extern void __kmp_cleanup(
void);
3580#if KMP_HANDLE_SIGNALS
3581extern int __kmp_handle_signals;
3582extern void __kmp_install_signals(
int parallel_init);
3583extern void __kmp_remove_signals(
void);
3586extern void __kmp_clear_system_time(
void);
3587extern void __kmp_read_system_time(
double *delta);
3589extern void __kmp_check_stack_overlap(kmp_info_t *thr);
3591extern void __kmp_expand_host_name(
char *buffer,
size_t size);
3592extern void __kmp_expand_file_name(
char *result,
size_t rlen,
char *pattern);
3594#if KMP_ARCH_X86 || KMP_ARCH_X86_64 || (KMP_OS_WINDOWS && KMP_ARCH_AARCH64)
3596__kmp_initialize_system_tick(
void);
3600__kmp_runtime_initialize(
void);
3601extern void __kmp_runtime_destroy(
void);
3603#if KMP_AFFINITY_SUPPORTED
3604extern char *__kmp_affinity_print_mask(
char *buf,
int buf_len,
3605 kmp_affin_mask_t *mask);
3606extern kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf,
3607 kmp_affin_mask_t *mask);
3608extern void __kmp_affinity_initialize(
void);
3609extern void __kmp_affinity_uninitialize(
void);
3610extern void __kmp_affinity_set_init_mask(
3611 int gtid,
int isa_root);
3612extern void __kmp_affinity_set_place(
int gtid);
3613extern void __kmp_affinity_determine_capable(
const char *env_var);
3614extern int __kmp_aux_set_affinity(
void **mask);
3615extern int __kmp_aux_get_affinity(
void **mask);
3616extern int __kmp_aux_get_affinity_max_proc();
3617extern int __kmp_aux_set_affinity_mask_proc(
int proc,
void **mask);
3618extern int __kmp_aux_unset_affinity_mask_proc(
int proc,
void **mask);
3619extern int __kmp_aux_get_affinity_mask_proc(
int proc,
void **mask);
3620extern void __kmp_balanced_affinity(kmp_info_t *th,
int team_size);
3621#if KMP_OS_LINUX || KMP_OS_FREEBSD
3622extern int kmp_set_thread_affinity_mask_initial(
void);
3624static inline void __kmp_assign_root_init_mask() {
3625 int gtid = __kmp_entry_gtid();
3626 kmp_root_t *r = __kmp_threads[gtid]->th.th_root;
3627 if (r->r.r_uber_thread == __kmp_threads[gtid] && !r->r.r_affinity_assigned) {
3628 __kmp_affinity_set_init_mask(gtid, TRUE);
3629 r->r.r_affinity_assigned = TRUE;
3632static inline void __kmp_reset_root_init_mask(
int gtid) {
3633 kmp_info_t *th = __kmp_threads[gtid];
3634 kmp_root_t *r = th->th.th_root;
3635 if (r->r.r_uber_thread == th && r->r.r_affinity_assigned) {
3636 __kmp_set_system_affinity(__kmp_affin_origMask, FALSE);
3637 KMP_CPU_COPY(th->th.th_affin_mask, __kmp_affin_origMask);
3638 r->r.r_affinity_assigned = FALSE;
3642#define __kmp_assign_root_init_mask()
3643static inline void __kmp_reset_root_init_mask(
int gtid) {}
3648extern size_t __kmp_aux_capture_affinity(
int gtid,
const char *format,
3649 kmp_str_buf_t *buffer);
3650extern void __kmp_aux_display_affinity(
int gtid,
const char *format);
3652extern void __kmp_cleanup_hierarchy();
3653extern void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar);
3657extern int __kmp_futex_determine_capable(
void);
3661extern void __kmp_gtid_set_specific(
int gtid);
3662extern int __kmp_gtid_get_specific(
void);
3664extern double __kmp_read_cpu_time(
void);
3666extern int __kmp_read_system_info(
struct kmp_sys_info *info);
3669extern void __kmp_create_monitor(kmp_info_t *th);
3672extern void *__kmp_launch_thread(kmp_info_t *thr);
3674extern void __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size);
3677extern int __kmp_still_running(kmp_info_t *th);
3678extern int __kmp_is_thread_alive(kmp_info_t *th, DWORD *exit_val);
3679extern void __kmp_free_handle(kmp_thread_t tHandle);
3683extern void __kmp_reap_monitor(kmp_info_t *th);
3685extern void __kmp_reap_worker(kmp_info_t *th);
3686extern void __kmp_terminate_thread(
int gtid);
3688extern int __kmp_try_suspend_mx(kmp_info_t *th);
3689extern void __kmp_lock_suspend_mx(kmp_info_t *th);
3690extern void __kmp_unlock_suspend_mx(kmp_info_t *th);
3692extern void __kmp_elapsed(
double *);
3693extern void __kmp_elapsed_tick(
double *);
3695extern void __kmp_enable(
int old_state);
3696extern void __kmp_disable(
int *old_state);
3698extern void __kmp_thread_sleep(
int millis);
3700extern void __kmp_common_initialize(
void);
3701extern void __kmp_common_destroy(
void);
3702extern void __kmp_common_destroy_gtid(
int gtid);
3705extern void __kmp_register_atfork(
void);
3707extern void __kmp_suspend_initialize(
void);
3708extern void __kmp_suspend_initialize_thread(kmp_info_t *th);
3709extern void __kmp_suspend_uninitialize_thread(kmp_info_t *th);
3711extern kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
3714__kmp_allocate_team(kmp_root_t *root,
int new_nproc,
int max_nproc,
3716 ompt_data_t ompt_parallel_data,
3718 kmp_proc_bind_t proc_bind, kmp_internal_control_t *new_icvs,
3719 int argc USE_NESTED_HOT_ARG(kmp_info_t *thr));
3720extern void __kmp_free_thread(kmp_info_t *);
3721extern void __kmp_free_team(kmp_root_t *,
3722 kmp_team_t *USE_NESTED_HOT_ARG(kmp_info_t *));
3723extern kmp_team_t *__kmp_reap_team(kmp_team_t *);
3727extern void __kmp_initialize_bget(kmp_info_t *th);
3728extern void __kmp_finalize_bget(kmp_info_t *th);
3730KMP_EXPORT
void *kmpc_malloc(
size_t size);
3731KMP_EXPORT
void *kmpc_aligned_malloc(
size_t size,
size_t alignment);
3732KMP_EXPORT
void *kmpc_calloc(
size_t nelem,
size_t elsize);
3733KMP_EXPORT
void *kmpc_realloc(
void *ptr,
size_t size);
3734KMP_EXPORT
void kmpc_free(
void *ptr);
3738extern int __kmp_barrier(
enum barrier_type bt,
int gtid,
int is_split,
3739 size_t reduce_size,
void *reduce_data,
3740 void (*reduce)(
void *,
void *));
3741extern void __kmp_end_split_barrier(
enum barrier_type bt,
int gtid);
3742extern int __kmp_barrier_gomp_cancel(
int gtid);
3748enum fork_context_e {
3754extern int __kmp_fork_call(
ident_t *loc,
int gtid,
3755 enum fork_context_e fork_context, kmp_int32 argc,
3756 microtask_t microtask, launch_t invoker,
3759extern void __kmp_join_call(
ident_t *loc,
int gtid
3762 enum fork_context_e fork_context
3765 int exit_teams = 0);
3767extern void __kmp_serialized_parallel(
ident_t *
id, kmp_int32 gtid);
3768extern void __kmp_internal_fork(
ident_t *
id,
int gtid, kmp_team_t *team);
3769extern void __kmp_internal_join(
ident_t *
id,
int gtid, kmp_team_t *team);
3770extern int __kmp_invoke_task_func(
int gtid);
3771extern void __kmp_run_before_invoked_task(
int gtid,
int tid,
3772 kmp_info_t *this_thr,
3774extern void __kmp_run_after_invoked_task(
int gtid,
int tid,
3775 kmp_info_t *this_thr,
3779KMP_EXPORT
int __kmpc_invoke_task_func(
int gtid);
3780extern int __kmp_invoke_teams_master(
int gtid);
3781extern void __kmp_teams_master(
int gtid);
3782extern int __kmp_aux_get_team_num();
3783extern int __kmp_aux_get_num_teams();
3784extern void __kmp_save_internal_controls(kmp_info_t *thread);
3785extern void __kmp_user_set_library(
enum library_type arg);
3786extern void __kmp_aux_set_library(
enum library_type arg);
3787extern void __kmp_aux_set_stacksize(
size_t arg);
3788extern void __kmp_aux_set_blocktime(
int arg, kmp_info_t *thread,
int tid);
3789extern void __kmp_aux_set_defaults(
char const *str,
size_t len);
3792void kmpc_set_blocktime(
int arg);
3793void ompc_set_nested(
int flag);
3794void ompc_set_dynamic(
int flag);
3795void ompc_set_num_threads(
int arg);
3797extern void __kmp_push_current_task_to_thread(kmp_info_t *this_thr,
3798 kmp_team_t *team,
int tid);
3799extern void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr);
3800extern kmp_task_t *__kmp_task_alloc(
ident_t *loc_ref, kmp_int32 gtid,
3801 kmp_tasking_flags_t *flags,
3802 size_t sizeof_kmp_task_t,
3803 size_t sizeof_shareds,
3804 kmp_routine_entry_t task_entry);
3805extern void __kmp_init_implicit_task(
ident_t *loc_ref, kmp_info_t *this_thr,
3806 kmp_team_t *team,
int tid,
3808extern void __kmp_finish_implicit_task(kmp_info_t *this_thr);
3809extern void __kmp_free_implicit_task(kmp_info_t *this_thr);
3811extern kmp_event_t *__kmpc_task_allow_completion_event(
ident_t *loc_ref,
3814extern void __kmp_fulfill_event(kmp_event_t *event);
3816extern void __kmp_free_task_team(kmp_info_t *thread,
3817 kmp_task_team_t *task_team);
3818extern void __kmp_reap_task_teams(
void);
3819extern void __kmp_wait_to_unref_task_teams(
void);
3820extern void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team,
3822extern void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team);
3823extern void __kmp_task_team_wait(kmp_info_t *this_thr, kmp_team_t *team
3830extern void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread,
3833extern int __kmp_is_address_mapped(
void *addr);
3834extern kmp_uint64 __kmp_hardware_timestamp(
void);
3837extern int __kmp_read_from_file(
char const *path,
char const *format, ...);
3845extern int __kmp_invoke_microtask(microtask_t pkfn,
int gtid,
int npr,
int argc,
3849 void **exit_frame_ptr
3862 size_t vector_length);
3866KMP_EXPORT
void *__kmpc_threadprivate(
ident_t *, kmp_int32 global_tid,
3867 void *data,
size_t size);
3891 kmp_critical_name *);
3893 kmp_critical_name *);
3895 kmp_critical_name *, uint32_t hint);
3901 kmp_int32 global_tid);
3908 kmp_int32 numberOfSections);
3911KMP_EXPORT
void KMPC_FOR_STATIC_INIT(
ident_t *loc, kmp_int32 global_tid,
3912 kmp_int32 schedtype, kmp_int32 *plastiter,
3913 kmp_int *plower, kmp_int *pupper,
3914 kmp_int *pstride, kmp_int incr,
3920 size_t cpy_size,
void *cpy_data,
3921 void (*cpy_func)(
void *,
void *),
3927extern void KMPC_SET_NUM_THREADS(
int arg);
3928extern void KMPC_SET_DYNAMIC(
int flag);
3929extern void KMPC_SET_NESTED(
int flag);
3932KMP_EXPORT kmp_int32 __kmpc_omp_task(
ident_t *loc_ref, kmp_int32 gtid,
3933 kmp_task_t *new_task);
3934KMP_EXPORT kmp_task_t *__kmpc_omp_task_alloc(
ident_t *loc_ref, kmp_int32 gtid,
3936 size_t sizeof_kmp_task_t,
3937 size_t sizeof_shareds,
3938 kmp_routine_entry_t task_entry);
3939KMP_EXPORT kmp_task_t *__kmpc_omp_target_task_alloc(
3940 ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags,
size_t sizeof_kmp_task_t,
3941 size_t sizeof_shareds, kmp_routine_entry_t task_entry, kmp_int64 device_id);
3942KMP_EXPORT
void __kmpc_omp_task_begin_if0(
ident_t *loc_ref, kmp_int32 gtid,
3944KMP_EXPORT
void __kmpc_omp_task_complete_if0(
ident_t *loc_ref, kmp_int32 gtid,
3946KMP_EXPORT kmp_int32 __kmpc_omp_task_parts(
ident_t *loc_ref, kmp_int32 gtid,
3947 kmp_task_t *new_task);
3948KMP_EXPORT kmp_int32 __kmpc_omp_taskwait(
ident_t *loc_ref, kmp_int32 gtid);
3950KMP_EXPORT kmp_int32 __kmpc_omp_taskyield(
ident_t *loc_ref, kmp_int32 gtid,
3954void __kmpc_omp_task_begin(
ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task);
3955void __kmpc_omp_task_complete(
ident_t *loc_ref, kmp_int32 gtid,
3961KMP_EXPORT
void __kmpc_taskgroup(
ident_t *loc,
int gtid);
3962KMP_EXPORT
void __kmpc_end_taskgroup(
ident_t *loc,
int gtid);
3965 ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps,
3966 kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
3967 kmp_depend_info_t *noalias_dep_list);
3970 kmp_depend_info_t *dep_list,
3971 kmp_int32 ndeps_noalias,
3972 kmp_depend_info_t *noalias_dep_list);
3973extern kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
3974 bool serialize_immediate);
3976KMP_EXPORT kmp_int32 __kmpc_cancel(
ident_t *loc_ref, kmp_int32 gtid,
3977 kmp_int32 cncl_kind);
3978KMP_EXPORT kmp_int32 __kmpc_cancellationpoint(
ident_t *loc_ref, kmp_int32 gtid,
3979 kmp_int32 cncl_kind);
3980KMP_EXPORT kmp_int32 __kmpc_cancel_barrier(
ident_t *loc_ref, kmp_int32 gtid);
3981KMP_EXPORT
int __kmp_get_cancellation_status(
int cancel_kind);
3986 kmp_int32 if_val, kmp_uint64 *lb,
3987 kmp_uint64 *ub, kmp_int64 st, kmp_int32 nogroup,
3988 kmp_int32 sched, kmp_uint64 grainsize,
3991 kmp_task_t *task, kmp_int32 if_val,
3992 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
3993 kmp_int32 nogroup, kmp_int32 sched,
3994 kmp_uint64 grainsize, kmp_int32 modifier,
4003 int num,
void *data);
4007 ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 naffins,
4008 kmp_task_affinity_info_t *affin_list);
4009KMP_EXPORT
void __kmp_set_num_teams(
int num_teams);
4010KMP_EXPORT
int __kmp_get_max_teams(
void);
4011KMP_EXPORT
void __kmp_set_teams_thread_limit(
int limit);
4012KMP_EXPORT
int __kmp_get_teams_thread_limit(
void);
4015KMP_EXPORT
void __kmpc_init_lock(
ident_t *loc, kmp_int32 gtid,
4017KMP_EXPORT
void __kmpc_init_nest_lock(
ident_t *loc, kmp_int32 gtid,
4019KMP_EXPORT
void __kmpc_destroy_lock(
ident_t *loc, kmp_int32 gtid,
4021KMP_EXPORT
void __kmpc_destroy_nest_lock(
ident_t *loc, kmp_int32 gtid,
4023KMP_EXPORT
void __kmpc_set_lock(
ident_t *loc, kmp_int32 gtid,
void **user_lock);
4024KMP_EXPORT
void __kmpc_set_nest_lock(
ident_t *loc, kmp_int32 gtid,
4026KMP_EXPORT
void __kmpc_unset_lock(
ident_t *loc, kmp_int32 gtid,
4028KMP_EXPORT
void __kmpc_unset_nest_lock(
ident_t *loc, kmp_int32 gtid,
4030KMP_EXPORT
int __kmpc_test_lock(
ident_t *loc, kmp_int32 gtid,
void **user_lock);
4031KMP_EXPORT
int __kmpc_test_nest_lock(
ident_t *loc, kmp_int32 gtid,
4034KMP_EXPORT
void __kmpc_init_lock_with_hint(
ident_t *loc, kmp_int32 gtid,
4035 void **user_lock, uintptr_t hint);
4036KMP_EXPORT
void __kmpc_init_nest_lock_with_hint(
ident_t *loc, kmp_int32 gtid,
4043 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
4044 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4045 kmp_critical_name *lck);
4047 kmp_critical_name *lck);
4049 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
4050 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4051 kmp_critical_name *lck);
4053 kmp_critical_name *lck);
4057extern PACKED_REDUCTION_METHOD_T __kmp_determine_reduction_method(
4058 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
4059 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
4060 kmp_critical_name *lck);
4063KMP_EXPORT kmp_int32 __kmp_get_reduce_method(
void);
4065KMP_EXPORT kmp_uint64 __kmpc_get_taskid();
4066KMP_EXPORT kmp_uint64 __kmpc_get_parent_taskid();
4072KMP_EXPORT
void __kmpc_pop_num_threads(
ident_t *loc, kmp_int32 global_tid);
4074 kmp_int32 num_threads);
4076KMP_EXPORT
void __kmpc_push_proc_bind(
ident_t *loc, kmp_int32 global_tid,
4079 kmp_int32 num_teams,
4080 kmp_int32 num_threads);
4083 kmp_int32 num_teams_lb,
4084 kmp_int32 num_teams_ub,
4085 kmp_int32 num_threads);
4095 const struct kmp_dim *dims);
4096KMP_EXPORT
void __kmpc_doacross_wait(
ident_t *loc, kmp_int32 gtid,
4097 const kmp_int64 *vec);
4098KMP_EXPORT
void __kmpc_doacross_post(
ident_t *loc, kmp_int32 gtid,
4099 const kmp_int64 *vec);
4100KMP_EXPORT
void __kmpc_doacross_fini(
ident_t *loc, kmp_int32 gtid);
4103 void *data,
size_t size,
4107extern int _You_must_link_with_exactly_one_OpenMP_library;
4108extern int _You_must_link_with_Intel_OpenMP_library;
4109#if KMP_OS_WINDOWS && (KMP_VERSION_MAJOR > 4)
4110extern int _You_must_link_with_Microsoft_OpenMP_library;
4115void kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
4116 void *data_addr,
size_t pc_size);
4117struct private_common *kmp_threadprivate_insert(
int gtid,
void *pc_addr,
4120void __kmp_threadprivate_resize_cache(
int newCapacity);
4121void __kmp_cleanup_threadprivate_caches();
4125#define KMPC_CONVENTION __cdecl
4127#define KMPC_CONVENTION
4131typedef enum omp_sched_t {
4132 omp_sched_static = 1,
4133 omp_sched_dynamic = 2,
4134 omp_sched_guided = 3,
4137typedef void *kmp_affinity_mask_t;
4140KMP_EXPORT
void KMPC_CONVENTION ompc_set_max_active_levels(
int);
4141KMP_EXPORT
void KMPC_CONVENTION ompc_set_schedule(omp_sched_t,
int);
4142KMP_EXPORT
int KMPC_CONVENTION ompc_get_ancestor_thread_num(
int);
4143KMP_EXPORT
int KMPC_CONVENTION ompc_get_team_size(
int);
4144KMP_EXPORT
int KMPC_CONVENTION
4145kmpc_set_affinity_mask_proc(
int, kmp_affinity_mask_t *);
4146KMP_EXPORT
int KMPC_CONVENTION
4147kmpc_unset_affinity_mask_proc(
int, kmp_affinity_mask_t *);
4148KMP_EXPORT
int KMPC_CONVENTION
4149kmpc_get_affinity_mask_proc(
int, kmp_affinity_mask_t *);
4151KMP_EXPORT
void KMPC_CONVENTION kmpc_set_stacksize(
int);
4152KMP_EXPORT
void KMPC_CONVENTION kmpc_set_stacksize_s(
size_t);
4153KMP_EXPORT
void KMPC_CONVENTION kmpc_set_library(
int);
4154KMP_EXPORT
void KMPC_CONVENTION kmpc_set_defaults(
char const *);
4155KMP_EXPORT
void KMPC_CONVENTION kmpc_set_disp_num_buffers(
int);
4156void KMP_EXPAND_NAME(ompc_set_affinity_format)(
char const *format);
4157size_t KMP_EXPAND_NAME(ompc_get_affinity_format)(
char *buffer,
size_t size);
4158void KMP_EXPAND_NAME(ompc_display_affinity)(
char const *format);
4159size_t KMP_EXPAND_NAME(ompc_capture_affinity)(
char *buffer,
size_t buf_size,
4160 char const *format);
4162enum kmp_target_offload_kind {
4167typedef enum kmp_target_offload_kind kmp_target_offload_kind_t;
4169extern kmp_target_offload_kind_t __kmp_target_offload;
4170extern int __kmpc_get_target_offload();
4173#define KMP_DEVICE_DEFAULT -1
4174#define KMP_DEVICE_ALL -11
4180typedef enum kmp_pause_status_t {
4182 kmp_soft_paused = 1,
4184} kmp_pause_status_t;
4187extern kmp_pause_status_t __kmp_pause_status;
4188extern int __kmpc_pause_resource(kmp_pause_status_t level);
4189extern int __kmp_pause_resource(kmp_pause_status_t level);
4191extern void __kmp_resume_if_soft_paused();
4195static inline void __kmp_resume_if_hard_paused() {
4196 if (__kmp_pause_status == kmp_hard_paused) {
4197 __kmp_pause_status = kmp_not_paused;
4201extern void __kmp_omp_display_env(
int verbose);
4204extern volatile int __kmp_init_hidden_helper;
4206extern volatile int __kmp_hidden_helper_team_done;
4208extern kmp_int32 __kmp_enable_hidden_helper;
4210extern kmp_info_t *__kmp_hidden_helper_main_thread;
4212extern kmp_info_t **__kmp_hidden_helper_threads;
4214extern kmp_int32 __kmp_hidden_helper_threads_num;
4216extern std::atomic<kmp_int32> __kmp_unexecuted_hidden_helper_tasks;
4218extern void __kmp_hidden_helper_initialize();
4219extern void __kmp_hidden_helper_threads_initz_routine();
4220extern void __kmp_do_initialize_hidden_helper_threads();
4221extern void __kmp_hidden_helper_threads_initz_wait();
4222extern void __kmp_hidden_helper_initz_release();
4223extern void __kmp_hidden_helper_threads_deinitz_wait();
4224extern void __kmp_hidden_helper_threads_deinitz_release();
4225extern void __kmp_hidden_helper_main_thread_wait();
4226extern void __kmp_hidden_helper_worker_thread_wait();
4227extern void __kmp_hidden_helper_worker_thread_signal();
4228extern void __kmp_hidden_helper_main_thread_release();
4231#define KMP_HIDDEN_HELPER_THREAD(gtid) \
4232 ((gtid) >= 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4234#define KMP_HIDDEN_HELPER_WORKER_THREAD(gtid) \
4235 ((gtid) > 1 && (gtid) <= __kmp_hidden_helper_threads_num)
4237#define KMP_HIDDEN_HELPER_TEAM(team) \
4238 (team->t.t_threads[0] == __kmp_hidden_helper_main_thread)
4242#define KMP_GTID_TO_SHADOW_GTID(gtid) \
4243 ((gtid) % (__kmp_hidden_helper_threads_num - 1) + 2)
4248static inline int __kmp_adjust_gtid_for_hidden_helpers(
int gtid) {
4249 int adjusted_gtid = gtid;
4250 if (__kmp_hidden_helper_threads_num > 0 && gtid > 0 &&
4251 gtid - __kmp_hidden_helper_threads_num >= 0) {
4252 adjusted_gtid -= __kmp_hidden_helper_threads_num;
4254 return adjusted_gtid;
4258typedef enum kmp_severity_t {
4259 severity_warning = 1,
4262extern void __kmpc_error(
ident_t *loc,
int severity,
const char *message);
4265KMP_EXPORT
void __kmpc_scope(
ident_t *loc, kmp_int32 gtid,
void *reserved);
4266KMP_EXPORT
void __kmpc_end_scope(
ident_t *loc, kmp_int32 gtid,
void *reserved);
4272template <
bool C,
bool S>
4273extern void __kmp_suspend_32(
int th_gtid, kmp_flag_32<C, S> *flag);
4274template <
bool C,
bool S>
4275extern void __kmp_suspend_64(
int th_gtid, kmp_flag_64<C, S> *flag);
4276template <
bool C,
bool S>
4277extern void __kmp_atomic_suspend_64(
int th_gtid,
4278 kmp_atomic_flag_64<C, S> *flag);
4279extern void __kmp_suspend_oncore(
int th_gtid, kmp_flag_oncore *flag);
4280#if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT
4281template <
bool C,
bool S>
4282extern void __kmp_mwait_32(
int th_gtid, kmp_flag_32<C, S> *flag);
4283template <
bool C,
bool S>
4284extern void __kmp_mwait_64(
int th_gtid, kmp_flag_64<C, S> *flag);
4285template <
bool C,
bool S>
4286extern void __kmp_atomic_mwait_64(
int th_gtid, kmp_atomic_flag_64<C, S> *flag);
4287extern void __kmp_mwait_oncore(
int th_gtid, kmp_flag_oncore *flag);
4289template <
bool C,
bool S>
4290extern void __kmp_resume_32(
int target_gtid, kmp_flag_32<C, S> *flag);
4291template <
bool C,
bool S>
4292extern void __kmp_resume_64(
int target_gtid, kmp_flag_64<C, S> *flag);
4293template <
bool C,
bool S>
4294extern void __kmp_atomic_resume_64(
int target_gtid,
4295 kmp_atomic_flag_64<C, S> *flag);
4296extern void __kmp_resume_oncore(
int target_gtid, kmp_flag_oncore *flag);
4298template <
bool C,
bool S>
4299int __kmp_execute_tasks_32(kmp_info_t *thread, kmp_int32 gtid,
4300 kmp_flag_32<C, S> *flag,
int final_spin,
4301 int *thread_finished,
4305 kmp_int32 is_constrained);
4306template <
bool C,
bool S>
4307int __kmp_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid,
4308 kmp_flag_64<C, S> *flag,
int final_spin,
4309 int *thread_finished,
4313 kmp_int32 is_constrained);
4314template <
bool C,
bool S>
4315int __kmp_atomic_execute_tasks_64(kmp_info_t *thread, kmp_int32 gtid,
4316 kmp_atomic_flag_64<C, S> *flag,
4317 int final_spin,
int *thread_finished,
4321 kmp_int32 is_constrained);
4322int __kmp_execute_tasks_oncore(kmp_info_t *thread, kmp_int32 gtid,
4323 kmp_flag_oncore *flag,
int final_spin,
4324 int *thread_finished,
4328 kmp_int32 is_constrained);
4330extern int __kmp_nesting_mode;
4331extern int __kmp_nesting_mode_nlevels;
4332extern int *__kmp_nesting_nth_level;
4333extern void __kmp_init_nesting_mode();
4334extern void __kmp_set_nesting_mode_threads();
4346 if (f && f != stdout && f != stderr) {
4355 const char *env_var =
nullptr)
4357 open(filename, mode, env_var);
4364 void open(
const char *filename,
const char *mode,
4365 const char *env_var =
nullptr) {
4367 f = fopen(filename, mode);
4371 __kmp_fatal(KMP_MSG(CantOpenFileForReading, filename), KMP_ERR(code),
4372 KMP_HNT(CheckEnvVar, env_var, filename), __kmp_msg_null);
4374 __kmp_fatal(KMP_MSG(CantOpenFileForReading, filename), KMP_ERR(code),
4383 f = fopen(filename, mode);
4400 operator bool() {
return bool(f); }
4401 operator FILE *() {
return f; }
4404template <
typename SourceType,
typename TargetType,
4405 bool isSourceSmaller = (
sizeof(SourceType) <
sizeof(TargetType)),
4406 bool isSourceEqual = (
sizeof(SourceType) ==
sizeof(TargetType)),
4407 bool isSourceSigned = std::is_signed<SourceType>::value,
4408 bool isTargetSigned = std::is_signed<TargetType>::value>
4409struct kmp_convert {};
4412template <
typename SourceType,
typename TargetType>
4413struct kmp_convert<SourceType, TargetType, true, false, true, true> {
4414 static TargetType to(SourceType src) {
return (TargetType)src; }
4417template <
typename SourceType,
typename TargetType>
4418struct kmp_convert<SourceType, TargetType, false, true, true, true> {
4419 static TargetType to(SourceType src) {
return src; }
4422template <
typename SourceType,
typename TargetType>
4423struct kmp_convert<SourceType, TargetType, false, false, true, true> {
4424 static TargetType to(SourceType src) {
4425 KMP_ASSERT(src <=
static_cast<SourceType
>(
4426 (std::numeric_limits<TargetType>::max)()));
4427 KMP_ASSERT(src >=
static_cast<SourceType
>(
4428 (std::numeric_limits<TargetType>::min)()));
4429 return (TargetType)src;
4435template <
typename SourceType,
typename TargetType>
4436struct kmp_convert<SourceType, TargetType, true, false, true, false> {
4437 static TargetType to(SourceType src) {
4438 KMP_ASSERT(src >= 0);
4439 return (TargetType)src;
4443template <
typename SourceType,
typename TargetType>
4444struct kmp_convert<SourceType, TargetType, false, true, true, false> {
4445 static TargetType to(SourceType src) {
4446 KMP_ASSERT(src >= 0);
4447 return (TargetType)src;
4451template <
typename SourceType,
typename TargetType>
4452struct kmp_convert<SourceType, TargetType, false, false, true, false> {
4453 static TargetType to(SourceType src) {
4454 KMP_ASSERT(src >= 0);
4455 KMP_ASSERT(src <=
static_cast<SourceType
>(
4456 (std::numeric_limits<TargetType>::max)()));
4457 return (TargetType)src;
4463template <
typename SourceType,
typename TargetType>
4464struct kmp_convert<SourceType, TargetType, true, false, false, true> {
4465 static TargetType to(SourceType src) {
return (TargetType)src; }
4468template <
typename SourceType,
typename TargetType>
4469struct kmp_convert<SourceType, TargetType, false, true, false, true> {
4470 static TargetType to(SourceType src) {
4471 KMP_ASSERT(src <=
static_cast<SourceType
>(
4472 (std::numeric_limits<TargetType>::max)()));
4473 return (TargetType)src;
4477template <
typename SourceType,
typename TargetType>
4478struct kmp_convert<SourceType, TargetType, false, false, false, true> {
4479 static TargetType to(SourceType src) {
4480 KMP_ASSERT(src <=
static_cast<SourceType
>(
4481 (std::numeric_limits<TargetType>::max)()));
4482 return (TargetType)src;
4488template <
typename SourceType,
typename TargetType>
4489struct kmp_convert<SourceType, TargetType, true, false, false, false> {
4490 static TargetType to(SourceType src) {
return (TargetType)src; }
4493template <
typename SourceType,
typename TargetType>
4494struct kmp_convert<SourceType, TargetType, false, true, false, false> {
4495 static TargetType to(SourceType src) {
return src; }
4498template <
typename SourceType,
typename TargetType>
4499struct kmp_convert<SourceType, TargetType, false, false, false, false> {
4500 static TargetType to(SourceType src) {
4501 KMP_ASSERT(src <=
static_cast<SourceType
>(
4502 (std::numeric_limits<TargetType>::max)()));
4503 return (TargetType)src;
4507template <
typename T1,
typename T2>
4508static inline void __kmp_type_convert(T1 src, T2 *dest) {
4509 *dest = kmp_convert<T1, T2>::to(src);
int try_open(const char *filename, const char *mode)
void open(const char *filename, const char *mode, const char *env_var=nullptr)
@ KMP_IDENT_WORK_SECTIONS
@ KMP_IDENT_ATOMIC_HINT_MASK
@ KMP_IDENT_WORK_DISTRIBUTE
@ KMP_IDENT_ATOMIC_REDUCE
KMP_EXPORT kmp_int32 __kmpc_ok_to_fork(ident_t *)
KMP_EXPORT void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_serialized_parallel(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_fork_call(ident_t *, kmp_int32 nargs, kmpc_micro microtask,...)
KMP_EXPORT void __kmpc_end_serialized_parallel(ident_t *, kmp_int32 global_tid)
void(* kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
KMP_EXPORT void __kmpc_push_num_teams_51(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams_lb, kmp_int32 num_teams_ub, kmp_int32 num_threads)
KMP_EXPORT void __kmpc_begin(ident_t *, kmp_int32 flags)
KMP_EXPORT void __kmpc_end(ident_t *)
KMP_EXPORT void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT void __kmpc_end_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_barrier_master_nowait(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_critical_name *lck)
KMP_EXPORT kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_flush(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_barrier_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size, void *reduce_data, void(*reduce_func)(void *lhs_data, void *rhs_data), kmp_critical_name *lck)
KMP_EXPORT void * __kmpc_task_reduction_get_th_data(int gtid, void *tg, void *d)
void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int sched, kmp_uint64 grainsize, void *task_dup)
KMP_EXPORT void * __kmpc_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
KMP_EXPORT void * __kmpc_taskred_modifier_init(ident_t *loc, int gtid, int is_ws, int num, void *data)
KMP_EXPORT void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask)
KMP_EXPORT void __kmpc_task_reduction_modifier_fini(ident_t *loc, int gtid, int is_ws)
KMP_EXPORT kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT kmp_int32 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 naffins, kmp_task_affinity_info_t *affin_list)
void __kmpc_taskloop_5(ident_t *loc, int gtid, kmp_task_t *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup, int sched, kmp_uint64 grainsize, int modifier, void *task_dup)
KMP_EXPORT void * __kmpc_task_reduction_init(int gtid, int num_data, void *data)
KMP_EXPORT void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask)
KMP_EXPORT void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
KMP_EXPORT void * __kmpc_taskred_init(int gtid, int num_data, void *data)
void(* kmpc_dtor)(void *)
void *(* kmpc_cctor)(void *, void *)
KMP_EXPORT void __kmpc_threadprivate_register(ident_t *, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
KMP_EXPORT void __kmpc_copyprivate(ident_t *loc, kmp_int32 global_tid, size_t cpy_size, void *cpy_data, void(*cpy_func)(void *, void *), kmp_int32 didit)
void *(* kmpc_cctor_vec)(void *, void *, size_t)
void *(* kmpc_ctor)(void *)
KMP_EXPORT void * __kmpc_copyprivate_light(ident_t *loc, kmp_int32 gtid, void *cpy_data)
void *(* kmpc_ctor_vec)(void *, size_t)
KMP_EXPORT void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
void(* kmpc_dtor_vec)(void *, size_t)
KMP_EXPORT void __kmpc_threadprivate_register_vec(ident_t *, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)
KMP_EXPORT kmp_int32 __kmpc_global_num_threads(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_global_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_in_parallel(ident_t *loc)
KMP_EXPORT kmp_int32 __kmpc_bound_thread_num(ident_t *)
KMP_EXPORT kmp_int32 __kmpc_bound_num_threads(ident_t *)
KMP_EXPORT void __kmpc_end_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
KMP_EXPORT void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid)
int __kmpc_dispatch_next_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st)
KMP_EXPORT void __kmpc_end_masked(ident_t *, kmp_int32 global_tid)
void __kmpc_dispatch_fini_4(ident_t *loc, kmp_int32 gtid)
KMP_EXPORT kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_critical_with_hint(ident_t *, kmp_int32 global_tid, kmp_critical_name *, uint32_t hint)
KMP_EXPORT kmp_int32 __kmpc_single(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_next_section(ident_t *loc, kmp_int32 global_tid, kmp_int32 numberOfSections)
void __kmpc_doacross_init(ident_t *loc, int gtid, int num_dims, const struct kmp_dim *dims)
int __kmpc_dispatch_next_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st)
KMP_EXPORT void __kmpc_end_master(ident_t *, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_sections(ident_t *loc, kmp_int32 global_tid)
KMP_EXPORT void __kmpc_end_single(ident_t *, kmp_int32 global_tid)
int __kmpc_dispatch_next_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st)
void __kmpc_dispatch_fini_8(ident_t *loc, kmp_int32 gtid)
KMP_EXPORT kmp_int32 __kmpc_sections_init(ident_t *loc, kmp_int32 global_tid)
int __kmpc_dispatch_next_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st)
void __kmpc_dispatch_fini_8u(ident_t *loc, kmp_int32 gtid)
KMP_EXPORT void __kmpc_ordered(ident_t *, kmp_int32 global_tid)
KMP_EXPORT kmp_int32 __kmpc_masked(ident_t *, kmp_int32 global_tid, kmp_int32 filter)
void __kmpc_dispatch_init_4(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int32 lb, kmp_int32 ub, kmp_int32 st, kmp_int32 chunk)
void __kmpc_dispatch_init_4u(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_uint32 lb, kmp_uint32 ub, kmp_int32 st, kmp_int32 chunk)
void __kmpc_dispatch_init_8u(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_uint64 lb, kmp_uint64 ub, kmp_int64 st, kmp_int64 chunk)
void __kmpc_dispatch_fini_4u(ident_t *loc, kmp_int32 gtid)
void __kmpc_dispatch_init_8(ident_t *loc, kmp_int32 gtid, enum sched_type schedule, kmp_int64 lb, kmp_int64 ub, kmp_int64 st, kmp_int64 chunk)
KMP_EXPORT void __kmpc_critical(ident_t *, kmp_int32 global_tid, kmp_critical_name *)
@ kmp_distribute_static_chunked
@ kmp_sch_modifier_monotonic
@ kmp_sch_modifier_nonmonotonic