LLVM OpenMP* Runtime Library
z_Windows_NT-586_util.cpp
1 /*
2  * z_Windows_NT-586_util.cpp -- platform specific routines.
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 
15 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64 || KMP_ARCH_ARM || \
16  KMP_ARCH_ARM64EC)
17 /* Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to
18  use compare_and_store for these routines */
19 
20 kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 d) {
21  kmp_int8 old_value, new_value;
22 
23  old_value = TCR_1(*p);
24  new_value = old_value | d;
25 
26  while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
27  KMP_CPU_PAUSE();
28  old_value = TCR_1(*p);
29  new_value = old_value | d;
30  }
31  return old_value;
32 }
33 
34 kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 d) {
35  kmp_int8 old_value, new_value;
36 
37  old_value = TCR_1(*p);
38  new_value = old_value & d;
39 
40  while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
41  KMP_CPU_PAUSE();
42  old_value = TCR_1(*p);
43  new_value = old_value & d;
44  }
45  return old_value;
46 }
47 
48 kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 d) {
49  kmp_uint32 old_value, new_value;
50 
51  old_value = TCR_4(*p);
52  new_value = old_value | d;
53 
54  while (!KMP_COMPARE_AND_STORE_REL32((volatile kmp_int32 *)p, old_value,
55  new_value)) {
56  KMP_CPU_PAUSE();
57  old_value = TCR_4(*p);
58  new_value = old_value | d;
59  }
60  return old_value;
61 }
62 
63 kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 d) {
64  kmp_uint32 old_value, new_value;
65 
66  old_value = TCR_4(*p);
67  new_value = old_value & d;
68 
69  while (!KMP_COMPARE_AND_STORE_REL32((volatile kmp_int32 *)p, old_value,
70  new_value)) {
71  KMP_CPU_PAUSE();
72  old_value = TCR_4(*p);
73  new_value = old_value & d;
74  }
75  return old_value;
76 }
77 
78 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
79 kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 d) {
80  kmp_int64 old_value, new_value;
81 
82  old_value = TCR_1(*p);
83  new_value = old_value + d;
84  while (!__kmp_compare_and_store8(p, old_value, new_value)) {
85  KMP_CPU_PAUSE();
86  old_value = TCR_1(*p);
87  new_value = old_value + d;
88  }
89  return old_value;
90 }
91 
92 #if KMP_ARCH_X86
93 kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) {
94  kmp_int64 old_value, new_value;
95 
96  old_value = TCR_8(*p);
97  new_value = old_value + d;
98  while (!__kmp_compare_and_store64(p, old_value, new_value)) {
99  KMP_CPU_PAUSE();
100  old_value = TCR_8(*p);
101  new_value = old_value + d;
102  }
103  return old_value;
104 }
105 #endif /* KMP_ARCH_X86 */
106 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
107 
108 kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 d) {
109  kmp_uint64 old_value, new_value;
110 
111  old_value = TCR_8(*p);
112  new_value = old_value | d;
113  while (!KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)p, old_value,
114  new_value)) {
115  KMP_CPU_PAUSE();
116  old_value = TCR_8(*p);
117  new_value = old_value | d;
118  }
119 
120  return old_value;
121 }
122 
123 kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 d) {
124  kmp_uint64 old_value, new_value;
125 
126  old_value = TCR_8(*p);
127  new_value = old_value & d;
128  while (!KMP_COMPARE_AND_STORE_REL64((volatile kmp_int64 *)p, old_value,
129  new_value)) {
130  KMP_CPU_PAUSE();
131  old_value = TCR_8(*p);
132  new_value = old_value & d;
133  }
134 
135  return old_value;
136 }
137 
138 #if KMP_ARCH_AARCH64 && KMP_COMPILER_MSVC
139 // For !KMP_COMPILER_MSVC, this function is provided in assembly form
140 // by z_Linux_asm.S.
141 int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int tid, int argc,
142  void *p_argv[]
143 #if OMPT_SUPPORT
144  ,
145  void **exit_frame_ptr
146 #endif
147 ) {
148 #if OMPT_SUPPORT
149  *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
150 #endif
151 
152  switch (argc) {
153  case 0:
154  (*pkfn)(&gtid, &tid);
155  break;
156  case 1:
157  (*pkfn)(&gtid, &tid, p_argv[0]);
158  break;
159  case 2:
160  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1]);
161  break;
162  case 3:
163  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2]);
164  break;
165  case 4:
166  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
167  break;
168  case 5:
169  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
170  break;
171  default: {
172  // p_argv[6] and onwards must be passed on the stack since 8 registers are
173  // already used.
174  size_t len = (argc - 6) * sizeof(void *);
175  void *argbuf = alloca(len);
176  memcpy(argbuf, &p_argv[6], len);
177  }
178  [[fallthrough]];
179  case 6:
180  (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
181  p_argv[5]);
182  break;
183  }
184 
185 #if OMPT_SUPPORT
186  *exit_frame_ptr = 0;
187 #endif
188 
189  return 1;
190 }
191 #endif
192 
193 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_AARCH64 || KMP_ARCH_ARM \
194  || KMP_ARCH_ARM64EC */