Ruby  2.7.2p137(2020-10-01revision5445e0435260b449decf2ac16f9d09bae3cafe72)
thread_win32.c
Go to the documentation of this file.
1 /* -*-c-*- */
2 /**********************************************************************
3 
4  thread_win32.c -
5 
6  $Author$
7 
8  Copyright (C) 2004-2007 Koichi Sasada
9 
10 **********************************************************************/
11 
12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
13 
14 #include <process.h>
15 
16 #define TIME_QUANTUM_USEC (10 * 1000)
17 #define RB_CONDATTR_CLOCK_MONOTONIC 1 /* no effect */
18 
19 #undef Sleep
20 
21 #define native_thread_yield() Sleep(0)
22 #define unregister_ubf_list(th)
23 #define ubf_wakeup_all_threads() do {} while (0)
24 #define ubf_threads_empty() (1)
25 #define ubf_timer_disarm() do {} while (0)
26 #define ubf_list_atfork() do {} while (0)
27 
28 static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
29 
30 static int w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th);
33 
34 static void
35 w32_error(const char *func)
36 {
37  LPVOID lpMsgBuf;
38  DWORD err = GetLastError();
39  if (FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
40  FORMAT_MESSAGE_FROM_SYSTEM |
41  FORMAT_MESSAGE_IGNORE_INSERTS,
42  NULL,
43  err,
44  MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US),
45  (LPTSTR) & lpMsgBuf, 0, NULL) == 0)
46  FormatMessage(FORMAT_MESSAGE_ALLOCATE_BUFFER |
47  FORMAT_MESSAGE_FROM_SYSTEM |
48  FORMAT_MESSAGE_IGNORE_INSERTS,
49  NULL,
50  err,
51  MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
52  (LPTSTR) & lpMsgBuf, 0, NULL);
53  rb_bug("%s: %s", func, (char*)lpMsgBuf);
54 }
55 
56 static int
57 w32_mutex_lock(HANDLE lock)
58 {
59  DWORD result;
60  while (1) {
61  thread_debug("rb_native_mutex_lock: %p\n", lock);
62  result = w32_wait_events(&lock, 1, INFINITE, 0);
63  switch (result) {
64  case WAIT_OBJECT_0:
65  /* get mutex object */
66  thread_debug("acquire mutex: %p\n", lock);
67  return 0;
68  case WAIT_OBJECT_0 + 1:
69  /* interrupt */
70  errno = EINTR;
71  thread_debug("acquire mutex interrupted: %p\n", lock);
72  return 0;
73  case WAIT_TIMEOUT:
74  thread_debug("timeout mutex: %p\n", lock);
75  break;
76  case WAIT_ABANDONED:
77  rb_bug("win32_mutex_lock: WAIT_ABANDONED");
78  break;
79  default:
80  rb_bug("win32_mutex_lock: unknown result (%ld)", result);
81  break;
82  }
83  }
84  return 0;
85 }
86 
87 static HANDLE
88 w32_mutex_create(void)
89 {
90  HANDLE lock = CreateMutex(NULL, FALSE, NULL);
91  if (lock == NULL) {
92  w32_error("rb_native_mutex_initialize");
93  }
94  return lock;
95 }
96 
97 #define GVL_DEBUG 0
98 
99 static void
100 gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
101 {
102  w32_mutex_lock(vm->gvl.lock);
103  if (GVL_DEBUG) fprintf(stderr, "gvl acquire (%p): acquire\n", th);
104 }
105 
106 static void
107 gvl_release(rb_vm_t *vm)
108 {
109  ReleaseMutex(vm->gvl.lock);
110 }
111 
112 static void
113 gvl_yield(rb_vm_t *vm, rb_thread_t *th)
114 {
115  gvl_release(th->vm);
116  native_thread_yield();
117  gvl_acquire(vm, th);
118 }
119 
120 static void
121 gvl_init(rb_vm_t *vm)
122 {
123  if (GVL_DEBUG) fprintf(stderr, "gvl init\n");
124  vm->gvl.lock = w32_mutex_create();
125 }
126 
127 static void
128 gvl_destroy(rb_vm_t *vm)
129 {
130  if (GVL_DEBUG) fprintf(stderr, "gvl destroy\n");
131  CloseHandle(vm->gvl.lock);
132 }
133 
134 static rb_thread_t *
135 ruby_thread_from_native(void)
136 {
137  return TlsGetValue(ruby_native_thread_key);
138 }
139 
140 static int
141 ruby_thread_set_native(rb_thread_t *th)
142 {
143  return TlsSetValue(ruby_native_thread_key, th);
144 }
145 
146 void
148 {
149  ruby_native_thread_key = TlsAlloc();
150  ruby_thread_set_native(th);
151  DuplicateHandle(GetCurrentProcess(),
152  GetCurrentThread(),
153  GetCurrentProcess(),
154  &th->thread_id, 0, FALSE, DUPLICATE_SAME_ACCESS);
155 
156  th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
157 
158  thread_debug("initial thread (th: %p, thid: %p, event: %p)\n",
159  th, GET_THREAD()->thread_id,
161 }
162 
163 static int
164 w32_wait_events(HANDLE *events, int count, DWORD timeout, rb_thread_t *th)
165 {
166  HANDLE *targets = events;
167  HANDLE intr;
168  const int initcount = count;
169  DWORD ret;
170 
171  thread_debug(" w32_wait_events events:%p, count:%d, timeout:%ld, th:%p\n",
172  events, count, timeout, th);
173  if (th && (intr = th->native_thread_data.interrupt_event)) {
174  if (ResetEvent(intr) && (!RUBY_VM_INTERRUPTED(th->ec) || SetEvent(intr))) {
175  targets = ALLOCA_N(HANDLE, count + 1);
176  memcpy(targets, events, sizeof(HANDLE) * count);
177 
178  targets[count++] = intr;
179  thread_debug(" * handle: %p (count: %d, intr)\n", intr, count);
180  }
181  else if (intr == th->native_thread_data.interrupt_event) {
182  w32_error("w32_wait_events");
183  }
184  }
185 
186  thread_debug(" WaitForMultipleObjects start (count: %d)\n", count);
187  ret = WaitForMultipleObjects(count, targets, FALSE, timeout);
188  thread_debug(" WaitForMultipleObjects end (ret: %lu)\n", ret);
189 
190  if (ret == (DWORD)(WAIT_OBJECT_0 + initcount) && th) {
191  errno = EINTR;
192  }
193  if (ret == WAIT_FAILED && THREAD_DEBUG) {
194  int i;
195  DWORD dmy;
196  for (i = 0; i < count; i++) {
197  thread_debug(" * error handle %d - %s\n", i,
198  GetHandleInformation(targets[i], &dmy) ? "OK" : "NG");
199  }
200  }
201  return ret;
202 }
203 
204 static void ubf_handle(void *ptr);
205 #define ubf_select ubf_handle
206 
207 int
208 rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout)
209 {
210  return w32_wait_events(events, num, timeout, ruby_thread_from_native());
211 }
212 
213 int
214 rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
215 {
216  int ret;
217  rb_thread_t *th = GET_THREAD();
218 
219  BLOCKING_REGION(th, ret = rb_w32_wait_events_blocking(events, num, timeout),
220  ubf_handle, ruby_thread_from_native(), FALSE);
221  return ret;
222 }
223 
224 static void
225 w32_close_handle(HANDLE handle)
226 {
227  if (CloseHandle(handle) == 0) {
228  w32_error("w32_close_handle");
229  }
230 }
231 
232 static void
233 w32_resume_thread(HANDLE handle)
234 {
235  if (ResumeThread(handle) == (DWORD)-1) {
236  w32_error("w32_resume_thread");
237  }
238 }
239 
240 #ifdef _MSC_VER
241 #define HAVE__BEGINTHREADEX 1
242 #else
243 #undef HAVE__BEGINTHREADEX
244 #endif
245 
246 #ifdef HAVE__BEGINTHREADEX
247 #define start_thread (HANDLE)_beginthreadex
248 #define thread_errno errno
249 typedef unsigned long (__stdcall *w32_thread_start_func)(void*);
250 #else
251 #define start_thread CreateThread
252 #define thread_errno rb_w32_map_errno(GetLastError())
253 typedef LPTHREAD_START_ROUTINE w32_thread_start_func;
254 #endif
255 
256 static HANDLE
257 w32_create_thread(DWORD stack_size, w32_thread_start_func func, void *val)
258 {
259  return start_thread(0, stack_size, func, val, CREATE_SUSPENDED | STACK_SIZE_PARAM_IS_A_RESERVATION, 0);
260 }
261 
262 int
263 rb_w32_sleep(unsigned long msec)
264 {
265  return w32_wait_events(0, 0, msec, ruby_thread_from_native());
266 }
267 
268 int WINAPI
269 rb_w32_Sleep(unsigned long msec)
270 {
271  int ret;
272  rb_thread_t *th = GET_THREAD();
273 
274  BLOCKING_REGION(th, ret = rb_w32_sleep(msec),
275  ubf_handle, ruby_thread_from_native(), FALSE);
276  return ret;
277 }
278 
279 static DWORD
280 hrtime2msec(rb_hrtime_t hrt)
281 {
282  return (DWORD)hrt / (DWORD)RB_HRTIME_PER_MSEC;
283 }
284 
285 static void
286 native_sleep(rb_thread_t *th, rb_hrtime_t *rel)
287 {
288  const volatile DWORD msec = rel ? hrtime2msec(*rel) : INFINITE;
289 
290  GVL_UNLOCK_BEGIN(th);
291  {
292  DWORD ret;
293 
295  th->unblock.func = ubf_handle;
296  th->unblock.arg = th;
298 
299  if (RUBY_VM_INTERRUPTED(th->ec)) {
300  /* interrupted. return immediate */
301  }
302  else {
303  thread_debug("native_sleep start (%lu)\n", msec);
304  ret = w32_wait_events(0, 0, msec, th);
305  thread_debug("native_sleep done (%lu)\n", ret);
306  }
307 
309  th->unblock.func = 0;
310  th->unblock.arg = 0;
312  }
313  GVL_UNLOCK_END(th);
314 }
315 
316 void
318 {
319 #if USE_WIN32_MUTEX
320  w32_mutex_lock(lock->mutex);
321 #else
322  EnterCriticalSection(&lock->crit);
323 #endif
324 }
325 
326 void
328 {
329 #if USE_WIN32_MUTEX
330  thread_debug("release mutex: %p\n", lock->mutex);
331  ReleaseMutex(lock->mutex);
332 #else
333  LeaveCriticalSection(&lock->crit);
334 #endif
335 }
336 
337 static int
338 native_mutex_trylock(rb_nativethread_lock_t *lock)
339 {
340 #if USE_WIN32_MUTEX
341  int result;
342  thread_debug("native_mutex_trylock: %p\n", lock->mutex);
343  result = w32_wait_events(&lock->mutex, 1, 1, 0);
344  thread_debug("native_mutex_trylock result: %d\n", result);
345  switch (result) {
346  case WAIT_OBJECT_0:
347  return 0;
348  case WAIT_TIMEOUT:
349  return EBUSY;
350  }
351  return EINVAL;
352 #else
353  return TryEnterCriticalSection(&lock->crit) == 0;
354 #endif
355 }
356 
357 void
359 {
360 #if USE_WIN32_MUTEX
361  lock->mutex = w32_mutex_create();
362  /* thread_debug("initialize mutex: %p\n", lock->mutex); */
363 #else
364  InitializeCriticalSection(&lock->crit);
365 #endif
366 }
367 
368 void
370 {
371 #if USE_WIN32_MUTEX
372  w32_close_handle(lock->mutex);
373 #else
374  DeleteCriticalSection(&lock->crit);
375 #endif
376 }
377 
378 struct cond_event_entry {
379  struct cond_event_entry* next;
380  struct cond_event_entry* prev;
381  HANDLE event;
382 };
383 
384 void
386 {
387  /* cond is guarded by mutex */
388  struct cond_event_entry *e = cond->next;
389  struct cond_event_entry *head = (struct cond_event_entry*)cond;
390 
391  if (e != head) {
392  struct cond_event_entry *next = e->next;
393  struct cond_event_entry *prev = e->prev;
394 
395  prev->next = next;
396  next->prev = prev;
397  e->next = e->prev = e;
398 
399  SetEvent(e->event);
400  }
401 }
402 
403 void
405 {
406  /* cond is guarded by mutex */
407  struct cond_event_entry *e = cond->next;
408  struct cond_event_entry *head = (struct cond_event_entry*)cond;
409 
410  while (e != head) {
411  struct cond_event_entry *next = e->next;
412  struct cond_event_entry *prev = e->prev;
413 
414  SetEvent(e->event);
415 
416  prev->next = next;
417  next->prev = prev;
418  e->next = e->prev = e;
419 
420  e = next;
421  }
422 }
423 
424 static int
425 native_cond_timedwait_ms(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, unsigned long msec)
426 {
427  DWORD r;
428  struct cond_event_entry entry;
429  struct cond_event_entry *head = (struct cond_event_entry*)cond;
430 
431  entry.event = CreateEvent(0, FALSE, FALSE, 0);
432 
433  /* cond is guarded by mutex */
434  entry.next = head;
435  entry.prev = head->prev;
436  head->prev->next = &entry;
437  head->prev = &entry;
438 
439  rb_native_mutex_unlock(mutex);
440  {
441  r = WaitForSingleObject(entry.event, msec);
442  if ((r != WAIT_OBJECT_0) && (r != WAIT_TIMEOUT)) {
443  rb_bug("rb_native_cond_wait: WaitForSingleObject returns %lu", r);
444  }
445  }
446  rb_native_mutex_lock(mutex);
447 
448  entry.prev->next = entry.next;
449  entry.next->prev = entry.prev;
450 
451  w32_close_handle(entry.event);
452  return (r == WAIT_OBJECT_0) ? 0 : ETIMEDOUT;
453 }
454 
455 void
457 {
458  native_cond_timedwait_ms(cond, mutex, INFINITE);
459 }
460 
461 #if 0
462 static unsigned long
463 abs_timespec_to_timeout_ms(const struct timespec *ts)
464 {
465  struct timeval tv;
466  struct timeval now;
467 
468  gettimeofday(&now, NULL);
469  tv.tv_sec = ts->tv_sec;
470  tv.tv_usec = ts->tv_nsec / 1000;
471 
472  if (!rb_w32_time_subtract(&tv, &now))
473  return 0;
474 
475  return (tv.tv_sec * 1000) + (tv.tv_usec / 1000);
476 }
477 
478 static int
479 native_cond_timedwait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex, const struct timespec *ts)
480 {
481  unsigned long timeout_ms;
482 
483  timeout_ms = abs_timespec_to_timeout_ms(ts);
484  if (!timeout_ms)
485  return ETIMEDOUT;
486 
487  return native_cond_timedwait_ms(cond, mutex, timeout_ms);
488 }
489 
490 static struct timespec
491 native_cond_timeout(rb_nativethread_cond_t *cond, struct timespec timeout_rel)
492 {
493  int ret;
494  struct timeval tv;
495  struct timespec timeout;
496  struct timespec now;
497 
498  ret = gettimeofday(&tv, 0);
499  if (ret != 0)
500  rb_sys_fail(0);
501  now.tv_sec = tv.tv_sec;
502  now.tv_nsec = tv.tv_usec * 1000;
503 
504  timeout.tv_sec = now.tv_sec;
505  timeout.tv_nsec = now.tv_nsec;
506  timeout.tv_sec += timeout_rel.tv_sec;
507  timeout.tv_nsec += timeout_rel.tv_nsec;
508 
509  if (timeout.tv_nsec >= 1000*1000*1000) {
510  timeout.tv_sec++;
511  timeout.tv_nsec -= 1000*1000*1000;
512  }
513 
514  if (timeout.tv_sec < now.tv_sec)
515  timeout.tv_sec = TIMET_MAX;
516 
517  return timeout;
518 }
519 #endif
520 
521 void
523 {
524  cond->next = (struct cond_event_entry *)cond;
525  cond->prev = (struct cond_event_entry *)cond;
526 }
527 
528 void
530 {
531  /* */
532 }
533 
534 void
535 ruby_init_stack(volatile VALUE *addr)
536 {
537 }
538 
539 #define CHECK_ERR(expr) \
540  {if (!(expr)) {rb_bug("err: %lu - %s", GetLastError(), #expr);}}
541 
542 static void
543 native_thread_init_stack(rb_thread_t *th)
544 {
545  MEMORY_BASIC_INFORMATION mi;
546  char *base, *end;
547  DWORD size, space;
548 
549  CHECK_ERR(VirtualQuery(&mi, &mi, sizeof(mi)));
550  base = mi.AllocationBase;
551  end = mi.BaseAddress;
552  end += mi.RegionSize;
553  size = end - base;
554  space = size / 5;
555  if (space > 1024*1024) space = 1024*1024;
556  th->ec->machine.stack_start = (VALUE *)end - 1;
557  th->ec->machine.stack_maxsize = size - space;
558 }
559 
560 #ifndef InterlockedExchangePointer
561 #define InterlockedExchangePointer(t, v) \
562  (void *)InterlockedExchange((long *)(t), (long)(v))
563 #endif
564 static void
565 native_thread_destroy(rb_thread_t *th)
566 {
567  HANDLE intr = InterlockedExchangePointer(&th->native_thread_data.interrupt_event, 0);
568  thread_debug("close handle - intr: %p, thid: %p\n", intr, th->thread_id);
569  w32_close_handle(intr);
570 }
571 
572 static unsigned long __stdcall
573 thread_start_func_1(void *th_ptr)
574 {
575  rb_thread_t *th = th_ptr;
576  volatile HANDLE thread_id = th->thread_id;
577 
578  native_thread_init_stack(th);
579  th->native_thread_data.interrupt_event = CreateEvent(0, TRUE, FALSE, 0);
580 
581  /* run */
582  thread_debug("thread created (th: %p, thid: %p, event: %p)\n", th,
584 
585  thread_start_func_2(th, th->ec->machine.stack_start);
586 
587  w32_close_handle(thread_id);
588  thread_debug("thread deleted (th: %p)\n", th);
589  return 0;
590 }
591 
592 static int
593 native_thread_create(rb_thread_t *th)
594 {
596  th->thread_id = w32_create_thread(stack_size, thread_start_func_1, th);
597 
598  if ((th->thread_id) == 0) {
599  return thread_errno;
600  }
601 
602  w32_resume_thread(th->thread_id);
603 
604  if (THREAD_DEBUG) {
605  Sleep(0);
606  thread_debug("create: (th: %p, thid: %p, intr: %p), stack size: %"PRIuSIZE"\n",
607  th, th->thread_id,
608  th->native_thread_data.interrupt_event, stack_size);
609  }
610  return 0;
611 }
612 
613 static void
614 native_thread_join(HANDLE th)
615 {
616  w32_wait_events(&th, 1, INFINITE, 0);
617 }
618 
619 #if USE_NATIVE_THREAD_PRIORITY
620 
621 static void
622 native_thread_apply_priority(rb_thread_t *th)
623 {
624  int priority = th->priority;
625  if (th->priority > 0) {
626  priority = THREAD_PRIORITY_ABOVE_NORMAL;
627  }
628  else if (th->priority < 0) {
629  priority = THREAD_PRIORITY_BELOW_NORMAL;
630  }
631  else {
632  priority = THREAD_PRIORITY_NORMAL;
633  }
634 
635  SetThreadPriority(th->thread_id, priority);
636 }
637 
638 #endif /* USE_NATIVE_THREAD_PRIORITY */
639 
640 int rb_w32_select_with_thread(int, fd_set *, fd_set *, fd_set *, struct timeval *, void *); /* @internal */
641 
642 static int
643 native_fd_select(int n, rb_fdset_t *readfds, rb_fdset_t *writefds, rb_fdset_t *exceptfds, struct timeval *timeout, rb_thread_t *th)
644 {
645  fd_set *r = NULL, *w = NULL, *e = NULL;
646  if (readfds) {
647  rb_fd_resize(n - 1, readfds);
648  r = rb_fd_ptr(readfds);
649  }
650  if (writefds) {
651  rb_fd_resize(n - 1, writefds);
652  w = rb_fd_ptr(writefds);
653  }
654  if (exceptfds) {
655  rb_fd_resize(n - 1, exceptfds);
656  e = rb_fd_ptr(exceptfds);
657  }
658  return rb_w32_select_with_thread(n, r, w, e, timeout, th);
659 }
660 
661 /* @internal */
662 int
664 {
665  return w32_wait_events(0, 0, 0, th);
666 }
667 
668 static void
669 ubf_handle(void *ptr)
670 {
671  rb_thread_t *th = (rb_thread_t *)ptr;
672  thread_debug("ubf_handle: %p\n", th);
673 
674  if (!SetEvent(th->native_thread_data.interrupt_event)) {
675  w32_error("ubf_handle");
676  }
677 }
678 
679 int rb_w32_set_thread_description(HANDLE th, const WCHAR *name);
681 #define native_set_another_thread_name rb_w32_set_thread_description_str
682 
683 static struct {
684  HANDLE id;
685  HANDLE lock;
686 } timer_thread;
687 #define TIMER_THREAD_CREATED_P() (timer_thread.id != 0)
688 
689 static unsigned long __stdcall
690 timer_thread_func(void *dummy)
691 {
692  rb_vm_t *vm = GET_VM();
693  thread_debug("timer_thread\n");
694  rb_w32_set_thread_description(GetCurrentThread(), L"ruby-timer-thread");
695  while (WaitForSingleObject(timer_thread.lock, TIME_QUANTUM_USEC/1000) ==
696  WAIT_TIMEOUT) {
697  timer_thread_function();
698  ruby_sigchld_handler(vm); /* probably no-op */
700  }
701  thread_debug("timer killed\n");
702  return 0;
703 }
704 
705 void
707 {
708  /* do nothing */
709 }
710 
711 static VALUE
712 rb_thread_start_unblock_thread(void)
713 {
714  return Qfalse; /* no-op */
715 }
716 
717 static void
718 rb_thread_create_timer_thread(void)
719 {
720  if (timer_thread.id == 0) {
721  if (!timer_thread.lock) {
722  timer_thread.lock = CreateEvent(0, TRUE, FALSE, 0);
723  }
724  timer_thread.id = w32_create_thread(1024 + (THREAD_DEBUG ? BUFSIZ : 0),
725  timer_thread_func, 0);
726  w32_resume_thread(timer_thread.id);
727  }
728 }
729 
730 static int
731 native_stop_timer_thread(void)
732 {
733  int stopped = --system_working <= 0;
734  if (stopped) {
735  SetEvent(timer_thread.lock);
736  native_thread_join(timer_thread.id);
737  CloseHandle(timer_thread.lock);
738  timer_thread.lock = 0;
739  }
740  return stopped;
741 }
742 
743 static void
744 native_reset_timer_thread(void)
745 {
746  if (timer_thread.id) {
747  CloseHandle(timer_thread.id);
748  timer_thread.id = 0;
749  }
750 }
751 
752 int
753 ruby_stack_overflowed_p(const rb_thread_t *th, const void *addr)
754 {
756 }
757 
758 #if defined(__MINGW32__)
759 LONG WINAPI
760 rb_w32_stack_overflow_handler(struct _EXCEPTION_POINTERS *exception)
761 {
762  if (exception->ExceptionRecord->ExceptionCode == EXCEPTION_STACK_OVERFLOW) {
764  raise(SIGSEGV);
765  }
766  return EXCEPTION_CONTINUE_SEARCH;
767 }
768 #endif
769 
770 #ifdef RUBY_ALLOCA_CHKSTK
771 void
772 ruby_alloca_chkstk(size_t len, void *sp)
773 {
774  if (ruby_stack_length(NULL) * sizeof(VALUE) >= len) {
779  }
780  }
781 }
782 #endif
783 int
784 rb_reserved_fd_p(int fd)
785 {
786  return 0;
787 }
788 
789 int
791 {
792  return -1; /* TODO */
793 }
794 
796 void
797 rb_sigwait_fd_put(rb_thread_t *th, int fd)
798 {
799  rb_bug("not implemented, should not be called");
800 }
801 
802 NORETURN(void rb_sigwait_sleep(const rb_thread_t *, int, const rb_hrtime_t *));
803 void
804 rb_sigwait_sleep(const rb_thread_t *th, int fd, const rb_hrtime_t *rel)
805 {
806  rb_bug("not implemented, should not be called");
807 }
808 
811 {
812  return GetCurrentThread();
813 }
814 
815 static void
816 native_set_thread_name(rb_thread_t *th)
817 {
818 }
819 
820 #if USE_MJIT
821 static unsigned long __stdcall
822 mjit_worker(void *arg)
823 {
824  void (*worker_func)(void) = arg;
825  rb_w32_set_thread_description(GetCurrentThread(), L"ruby-mjitworker");
826  worker_func();
827  return 0;
828 }
829 
830 /* Launch MJIT thread. Returns FALSE if it fails to create thread. */
831 int
832 rb_thread_create_mjit_thread(void (*worker_func)(void))
833 {
834  size_t stack_size = 4 * 1024; /* 4KB is the minimum commit size */
835  HANDLE thread_id = w32_create_thread(stack_size, mjit_worker, worker_func);
836  if (thread_id == 0) {
837  return FALSE;
838  }
839 
840  w32_resume_thread(thread_id);
841  return TRUE;
842 }
843 #endif
844 
845 #endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
SIGSEGV
#define SIGSEGV
Definition: rb_mjit_min_header-2.7.2.h:2296
ruby_stack_length
size_t ruby_stack_length(VALUE **p)
Definition: gc.c:4634
rb_w32_check_interrupt
int rb_w32_check_interrupt(void *)
TRUE
#define TRUE
Definition: nkf.h:175
RUBY_VM_INTERRUPTED
#define RUBY_VM_INTERRUPTED(ec)
Definition: vm_core.h:1840
id
const int id
Definition: nkf.c:209
TIMET_MAX
#define TIMET_MAX
Definition: rb_mjit_min_header-2.7.2.h:6675
rb_nativethread_self
RUBY_SYMBOL_EXPORT_BEGIN rb_nativethread_id_t rb_nativethread_self()
native_thread_data_struct::interrupt_event
HANDLE interrupt_event
Definition: thread_win32.h:28
rb_vm_struct::thread_machine_stack_size
size_t thread_machine_stack_size
Definition: vm_core.h:664
sig
int sig
Definition: rb_mjit_min_header-2.7.2.h:10470
fd_set
Definition: rb_mjit_min_header-2.7.2.h:1269
i
uint32_t i
Definition: rb_mjit_min_header-2.7.2.h:5499
rb_w32_wait_events
int rb_w32_wait_events(HANDLE *events, int num, DWORD timeout)
NORETURN
#define NORETURN(x)
Definition: defines.h:528
VALUE
unsigned long VALUE
Definition: ruby.h:102
long
#define long
Definition: rb_mjit_min_header-2.7.2.h:2921
GET_VM
#define GET_VM()
Definition: vm_core.h:1764
sysstack_error
#define sysstack_error
Definition: vm_core.h:1735
rb_w32_time_subtract
int rb_w32_time_subtract(struct timeval *rest, const struct timeval *wait)
Definition: win32.c:3074
rb_hrtime_t
uint64_t rb_hrtime_t
Definition: hrtime.h:47
rb_thread_struct::thread_id
rb_nativethread_id_t thread_id
Definition: vm_core.h:927
DWORD
IUnknown DWORD
Definition: win32ole.c:33
ruby_sigchld_handler
void ruby_sigchld_handler(rb_vm_t *vm)
Definition: signal.c:1073
thread_debug
#define thread_debug
Definition: thread.c:330
GET_EC
#define GET_EC()
Definition: vm_core.h:1766
ptr
struct RIMemo * ptr
Definition: debug.c:65
Qfalse
#define Qfalse
Definition: ruby.h:467
NULL
#define NULL
Definition: _sdbm.c:101
rb_execution_context_struct::machine
struct rb_execution_context_struct::@13 machine
TryEnterCriticalSection
WINBASEAPI BOOL WINAPI TryEnterCriticalSection(IN OUT LPCRITICAL_SECTION lpCriticalSection)
rb_w32_Sleep
int WINAPI rb_w32_Sleep(unsigned long msec)
rb_thread_struct::ec
rb_execution_context_t * ec
Definition: vm_core.h:915
L
#define L(x)
Definition: asm.h:125
THREAD_DEBUG
#define THREAD_DEBUG
Definition: thread.c:92
timespec::tv_nsec
long tv_nsec
Definition: missing.h:62
rb_thread_struct::priority
int8_t priority
Definition: vm_core.h:937
void
void
Definition: rb_mjit_min_header-2.7.2.h:13321
rb_fdset_t
Definition: rb_mjit_min_header-2.7.2.h:5740
EBUSY
#define EBUSY
Definition: rb_mjit_min_header-2.7.2.h:11001
Init_native_thread
void Init_native_thread(rb_thread_t *th)
PRIuSIZE
#define PRIuSIZE
Definition: ruby.h:208
rb_native_cond_initialize
void rb_native_cond_initialize(rb_nativethread_cond_t *cond)
rb_threadptr_check_signal
void rb_threadptr_check_signal(rb_thread_t *mth)
Definition: thread.c:4318
GVL_UNLOCK_END
#define GVL_UNLOCK_END(th)
Definition: thread.c:174
__pthread_mutex_t
Definition: rb_mjit_min_header-2.7.2.h:1383
timespec::tv_sec
time_t tv_sec
Definition: missing.h:61
ruby_init_stack
void ruby_init_stack(volatile VALUE *)
rb_vm_struct::main_thread
struct rb_thread_struct * main_thread
Definition: vm_core.h:581
rb_vm_struct::gvl
rb_global_vm_lock_t gvl
Definition: vm_core.h:579
RB_HRTIME_PER_MSEC
#define RB_HRTIME_PER_MSEC
Definition: hrtime.h:36
RAISED_STACKOVERFLOW
@ RAISED_STACKOVERFLOW
Definition: eval_intern.h:255
mjit_worker
void mjit_worker(void)
Definition: mjit_worker.c:1194
rb_sys_fail
void rb_sys_fail(const char *mesg)
Definition: error.c:2795
BLOCKING_REGION
#define BLOCKING_REGION(th, exec, ubf, ubfarg, fail_if_interrupted)
Definition: thread.c:188
rb_vm_struct::thread_vm_stack_size
size_t thread_vm_stack_size
Definition: vm_core.h:663
rb_execution_context_struct::stack_start
VALUE * stack_start
Definition: vm_core.h:887
rb_native_cond_destroy
void rb_native_cond_destroy(rb_nativethread_cond_t *cond)
ALLOCA_N
#define ALLOCA_N(type, n)
Definition: ruby.h:1684
rb_native_cond_broadcast
void rb_native_cond_broadcast(rb_nativethread_cond_t *cond)
size
int size
Definition: encoding.c:58
rb_native_mutex_unlock
void rb_native_mutex_unlock(rb_nativethread_lock_t *lock)
FALSE
#define FALSE
Definition: nkf.h:174
rb_fd_resize
#define rb_fd_resize(n, f)
Definition: intern.h:410
rb_w32_sleep
int rb_w32_sleep(unsigned long msec)
arg
VALUE arg
Definition: rb_mjit_min_header-2.7.2.h:5636
rb_thread_wakeup_timer_thread
void rb_thread_wakeup_timer_thread(int)
rb_ec_raised_set
#define rb_ec_raised_set(ec, f)
Definition: eval_intern.h:258
rb_execution_context_struct::stack_maxsize
size_t stack_maxsize
Definition: vm_core.h:889
rb_w32_select_with_thread
int rb_w32_select_with_thread(int nfds, fd_set *rd, fd_set *wr, fd_set *ex, struct timeval *timeout, void *th)
Definition: win32.c:3113
rb_native_mutex_lock
void rb_native_mutex_lock(rb_nativethread_lock_t *lock)
rb_sigwait_sleep
void rb_sigwait_sleep(const rb_thread_t *, int fd, const rb_hrtime_t *)
rb_thread_struct::vm
rb_vm_t * vm
Definition: vm_core.h:913
__pthread_cond_t
Definition: rb_mjit_min_header-2.7.2.h:1388
rb_exc_raise
void rb_exc_raise(VALUE mesg)
Raises an exception in the current thread.
Definition: eval.c:668
ETIMEDOUT
#define ETIMEDOUT
Definition: win32.h:570
rb_bug
void rb_bug(const char *fmt,...)
Definition: error.c:636
rb_native_mutex_destroy
void rb_native_mutex_destroy(rb_nativethread_lock_t *lock)
timeval
Definition: missing.h:53
rb_thread_struct::unblock
struct rb_unblock_callback unblock
Definition: vm_core.h:957
GET_THREAD
#define GET_THREAD()
Definition: vm_core.h:1765
rb_w32_wait_events_blocking
int rb_w32_wait_events_blocking(HANDLE *events, int num, DWORD timeout)
memcpy
void * memcpy(void *__restrict, const void *__restrict, size_t)
BUFSIZ
#define BUFSIZ
Definition: rb_mjit_min_header-2.7.2.h:1511
rb_sigwait_fd_get
int rb_sigwait_fd_get(const rb_thread_t *)
rb_w32_set_thread_description_str
int rb_w32_set_thread_description_str(HANDLE th, VALUE name)
Definition: win32.c:8076
rb_thread_struct::native_thread_data
native_thread_data_t native_thread_data
Definition: vm_core.h:940
err
int err
Definition: win32.c:135
rb_vm_struct
Definition: vm_core.h:576
__pthread_t
Definition: rb_mjit_min_header-2.7.2.h:1382
count
int count
Definition: encoding.c:57
errno
int errno
EINTR
#define EINTR
Definition: rb_mjit_min_header-2.7.2.h:10989
len
uint8_t len
Definition: escape.c:17
rb_native_cond_wait
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex)
stderr
#define stderr
Definition: rb_mjit_min_header-2.7.2.h:1522
timespec
Definition: missing.h:60
rb_sigwait_fd_put
void rb_sigwait_fd_put(const rb_thread_t *, int fd)
rb_unblock_callback::arg
void * arg
Definition: vm_core.h:818
rb_thread_struct::interrupt_lock
rb_nativethread_lock_t interrupt_lock
Definition: vm_core.h:956
GVL_UNLOCK_BEGIN
#define GVL_UNLOCK_BEGIN(th)
Definition: thread.c:170
EINVAL
#define EINVAL
Definition: rb_mjit_min_header-2.7.2.h:11007
rb_vm_struct::default_params
struct rb_vm_struct::@10 default_params
rb_reserved_fd_p
int rb_reserved_fd_p(int fd)
rb_thread_struct
Definition: vm_core.h:910
fprintf
int fprintf(FILE *__restrict, const char *__restrict,...) __attribute__((__format__(__printf__
rb_w32_set_thread_description
int rb_w32_set_thread_description(HANDLE th, const WCHAR *name)
Definition: win32.c:8059
gettimeofday
int gettimeofday(struct timeval *, struct timezone *)
Definition: win32.c:4598
rb_ec_raised_p
#define rb_ec_raised_p(ec, f)
Definition: eval_intern.h:260
rb_native_mutex_initialize
void rb_native_mutex_initialize(rb_nativethread_lock_t *lock)
rb_native_cond_signal
void rb_native_cond_signal(rb_nativethread_cond_t *cond)
rb_unblock_callback::func
rb_unblock_function_t * func
Definition: vm_core.h:817
rb_fd_ptr
#define rb_fd_ptr(f)
Definition: intern.h:411
rb_global_vm_lock_struct::lock
rb_nativethread_lock_t lock
Definition: thread_pthread.h:51
name
const char * name
Definition: nkf.c:208
rb_execution_context_struct
Definition: vm_core.h:843
n
const char size_t n
Definition: rb_mjit_min_header-2.7.2.h:5491