12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
16 #define TIME_QUANTUM_USEC (10 * 1000)
17 #define RB_CONDATTR_CLOCK_MONOTONIC 1
21 #define native_thread_yield() Sleep(0)
22 #define unregister_ubf_list(th)
23 #define ubf_wakeup_all_threads() do {} while (0)
24 #define ubf_threads_empty() (1)
25 #define ubf_timer_disarm() do {} while (0)
26 #define ubf_list_atfork() do {} while (0)
28 static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
35 w32_error(
const char *func)
39 if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
40 FORMAT_MESSAGE_FROM_SYSTEM |
41 FORMAT_MESSAGE_IGNORE_INSERTS,
44 MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
45 (LPTSTR) & lpMsgBuf, 0,
NULL) == 0)
46 FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
47 FORMAT_MESSAGE_FROM_SYSTEM |
48 FORMAT_MESSAGE_IGNORE_INSERTS,
51 MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
52 (LPTSTR) & lpMsgBuf, 0,
NULL);
53 rb_bug(
"%s: %s", func, (
char*)lpMsgBuf);
57 w32_mutex_lock(HANDLE lock)
62 result = w32_wait_events(&lock, 1, INFINITE, 0);
68 case WAIT_OBJECT_0 + 1:
77 rb_bug(
"win32_mutex_lock: WAIT_ABANDONED");
80 rb_bug(
"win32_mutex_lock: unknown result (%ld)", result);
88 w32_mutex_create(
void)
92 w32_error(
"rb_native_mutex_initialize");
103 if (GVL_DEBUG)
fprintf(
stderr,
"gvl acquire (%p): acquire\n", th);
116 native_thread_yield();
124 vm->
gvl.
lock = w32_mutex_create();
135 ruby_thread_from_native(
void)
137 return TlsGetValue(ruby_native_thread_key);
143 return TlsSetValue(ruby_native_thread_key, th);
149 ruby_native_thread_key = TlsAlloc();
150 ruby_thread_set_native(th);
151 DuplicateHandle(GetCurrentProcess(),
158 thread_debug(
"initial thread (th: %p, thid: %p, event: %p)\n",
166 HANDLE *targets = events;
168 const int initcount =
count;
171 thread_debug(
" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
172 events,
count, timeout, th);
178 targets[
count++] = intr;
182 w32_error(
"w32_wait_events");
187 ret = WaitForMultipleObjects(
count, targets,
FALSE, timeout);
188 thread_debug(
" WaitForMultipleObjects end (ret: %lu)\n", ret);
190 if (ret == (
DWORD)(WAIT_OBJECT_0 + initcount) && th) {
198 GetHandleInformation(targets[
i], &dmy) ?
"OK" :
"NG");
204 static void ubf_handle(
void *
ptr);
205 #define ubf_select ubf_handle
210 return w32_wait_events(events, num, timeout, ruby_thread_from_native());
220 ubf_handle, ruby_thread_from_native(),
FALSE);
225 w32_close_handle(HANDLE handle)
227 if (CloseHandle(handle) == 0) {
228 w32_error(
"w32_close_handle");
233 w32_resume_thread(HANDLE handle)
235 if (ResumeThread(handle) == (
DWORD)-1) {
236 w32_error(
"w32_resume_thread");
241 #define HAVE__BEGINTHREADEX 1
243 #undef HAVE__BEGINTHREADEX
246 #ifdef HAVE__BEGINTHREADEX
247 #define start_thread (HANDLE)_beginthreadex
248 #define thread_errno errno
249 typedef unsigned long (__stdcall *w32_thread_start_func)(
void*);
251 #define start_thread CreateThread
252 #define thread_errno rb_w32_map_errno(GetLastError())
253 typedef LPTHREAD_START_ROUTINE w32_thread_start_func;
257 w32_create_thread(
DWORD stack_size, w32_thread_start_func func,
void *val)
259 return start_thread(0, stack_size, func, val, CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 0);
265 return w32_wait_events(0, 0, msec, ruby_thread_from_native());
275 ubf_handle, ruby_thread_from_native(),
FALSE);
288 const volatile DWORD msec = rel ? hrtime2msec(*rel) : INFINITE;
304 ret = w32_wait_events(0, 0, msec, th);
320 w32_mutex_lock(lock->mutex);
322 EnterCriticalSection(&lock->crit);
331 ReleaseMutex(lock->mutex);
333 LeaveCriticalSection(&lock->crit);
342 thread_debug(
"native_mutex_trylock: %p\n", lock->mutex);
343 result = w32_wait_events(&lock->mutex, 1, 1, 0);
344 thread_debug(
"native_mutex_trylock result: %d\n", result);
361 lock->mutex = w32_mutex_create();
364 InitializeCriticalSection(&lock->crit);
372 w32_close_handle(lock->mutex);
374 DeleteCriticalSection(&lock->crit);
378 struct cond_event_entry {
379 struct cond_event_entry* next;
380 struct cond_event_entry* prev;
388 struct cond_event_entry *e = cond->next;
389 struct cond_event_entry *head = (
struct cond_event_entry*)cond;
392 struct cond_event_entry *next = e->next;
393 struct cond_event_entry *prev = e->prev;
397 e->next = e->prev = e;
407 struct cond_event_entry *e = cond->next;
408 struct cond_event_entry *head = (
struct cond_event_entry*)cond;
411 struct cond_event_entry *next = e->next;
412 struct cond_event_entry *prev = e->prev;
418 e->next = e->prev = e;
428 struct cond_event_entry entry;
429 struct cond_event_entry *head = (
struct cond_event_entry*)cond;
431 entry.event = CreateEvent(0,
FALSE,
FALSE, 0);
435 entry.prev = head->prev;
436 head->prev->next = &entry;
441 r = WaitForSingleObject(entry.event, msec);
442 if ((r != WAIT_OBJECT_0) && (r != WAIT_TIMEOUT)) {
443 rb_bug(
"rb_native_cond_wait: WaitForSingleObject returns %lu", r);
448 entry.prev->next = entry.next;
449 entry.next->prev = entry.prev;
451 w32_close_handle(entry.event);
452 return (r == WAIT_OBJECT_0) ? 0 :
ETIMEDOUT;
458 native_cond_timedwait_ms(cond, mutex, INFINITE);
463 abs_timespec_to_timeout_ms(
const struct timespec *ts)
470 tv.tv_usec = ts->
tv_nsec / 1000;
475 return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
481 unsigned long timeout_ms;
483 timeout_ms = abs_timespec_to_timeout_ms(ts);
487 return native_cond_timedwait_ms(cond, mutex, timeout_ms);
501 now.tv_sec = tv.tv_sec;
502 now.tv_nsec = tv.tv_usec * 1000;
504 timeout.tv_sec = now.tv_sec;
505 timeout.tv_nsec = now.tv_nsec;
506 timeout.tv_sec += timeout_rel.tv_sec;
507 timeout.tv_nsec += timeout_rel.tv_nsec;
509 if (timeout.tv_nsec >= 1000*1000*1000) {
511 timeout.tv_nsec -= 1000*1000*1000;
514 if (timeout.tv_sec < now.tv_sec)
524 cond->next = (
struct cond_event_entry *)cond;
525 cond->prev = (
struct cond_event_entry *)cond;
539 #define CHECK_ERR(expr) \
540 {if (!(expr)) {rb_bug("err: %lu - %s", GetLastError(), #expr);}}
545 MEMORY_BASIC_INFORMATION mi;
549 CHECK_ERR(VirtualQuery(&mi, &mi,
sizeof(mi)));
550 base = mi.AllocationBase;
551 end = mi.BaseAddress;
552 end += mi.RegionSize;
555 if (space > 1024*1024) space = 1024*1024;
560 #ifndef InterlockedExchangePointer
561 #define InterlockedExchangePointer(t, v) \
562 (void *)InterlockedExchange((long *)(t), (long)(v))
569 w32_close_handle(intr);
572 static unsigned long __stdcall
573 thread_start_func_1(
void *th_ptr)
576 volatile HANDLE thread_id = th->
thread_id;
578 native_thread_init_stack(th);
582 thread_debug(
"thread created (th: %p, thid: %p, event: %p)\n", th,
587 w32_close_handle(thread_id);
596 th->
thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
614 native_thread_join(HANDLE th)
616 w32_wait_events(&th, 1, INFINITE, 0);
619 #if USE_NATIVE_THREAD_PRIORITY
626 priority = THREAD_PRIORITY_ABOVE_NORMAL;
629 priority = THREAD_PRIORITY_BELOW_NORMAL;
632 priority = THREAD_PRIORITY_NORMAL;
635 SetThreadPriority(th->
thread_id, priority);
665 return w32_wait_events(0, 0, 0, th);
669 ubf_handle(
void *
ptr)
675 w32_error(
"ubf_handle");
681 #define native_set_another_thread_name rb_w32_set_thread_description_str
687 #define TIMER_THREAD_CREATED_P() (timer_thread.id != 0)
689 static unsigned long __stdcall
690 timer_thread_func(
void *dummy)
695 while (WaitForSingleObject(timer_thread.lock, TIME_QUANTUM_USEC/1000) ==
697 timer_thread_function();
712 rb_thread_start_unblock_thread(
void)
718 rb_thread_create_timer_thread(
void)
720 if (timer_thread.id == 0) {
721 if (!timer_thread.lock) {
722 timer_thread.lock = CreateEvent(0,
TRUE,
FALSE, 0);
725 timer_thread_func, 0);
726 w32_resume_thread(timer_thread.id);
731 native_stop_timer_thread(
void)
733 int stopped = --system_working <= 0;
735 SetEvent(timer_thread.lock);
736 native_thread_join(timer_thread.id);
737 CloseHandle(timer_thread.lock);
738 timer_thread.lock = 0;
744 native_reset_timer_thread(
void)
746 if (timer_thread.id) {
747 CloseHandle(timer_thread.id);
753 ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
758 #if defined(__MINGW32__)
760 rb_w32_stack_overflow_handler(
struct _EXCEPTION_POINTERS *exception)
762 if (exception->ExceptionRecord->ExceptionCode == EXCEPTION_STACK_OVERFLOW) {
766 return EXCEPTION_CONTINUE_SEARCH;
770 #ifdef RUBY_ALLOCA_CHKSTK
772 ruby_alloca_chkstk(
size_t len,
void *sp)
799 rb_bug(
"not implemented, should not be called");
806 rb_bug(
"not implemented, should not be called");
812 return GetCurrentThread();
821 static unsigned long __stdcall
832 rb_thread_create_mjit_thread(
void (*worker_func)(
void))
834 size_t stack_size = 4 * 1024;
835 HANDLE thread_id = w32_create_thread(stack_size,
mjit_worker, worker_func);
836 if (thread_id == 0) {
840 w32_resume_thread(thread_id);