12 #ifdef THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION
17 #ifdef HAVE_SYS_RESOURCE_H
18 #include <sys/resource.h>
20 #ifdef HAVE_THR_STKSEGMENT
25 #elif HAVE_SYS_FCNTL_H
26 #include <sys/fcntl.h>
28 #ifdef HAVE_SYS_PRCTL_H
29 #include <sys/prctl.h>
31 #if defined(HAVE_SYS_TIME_H)
34 #if defined(__HAIKU__)
35 #include <kernel/OS.h>
40 #if defined(HAVE_SYS_EVENTFD_H) && defined(HAVE_EVENTFD)
41 # define USE_EVENTFD (1)
42 # include <sys/eventfd.h>
44 # define USE_EVENTFD (0)
47 #if defined(SIGVTALRM) && !defined(__CYGWIN__)
48 # define USE_UBF_LIST 1
76 #define UBF_TIMER_NONE 0
77 #define UBF_TIMER_POSIX 1
78 #define UBF_TIMER_PTHREAD 2
81 # if defined(HAVE_TIMER_SETTIME) && defined(HAVE_TIMER_CREATE) && \
82 defined(CLOCK_MONOTONIC) && defined(USE_UBF_LIST)
84 # define UBF_TIMER UBF_TIMER_POSIX
85 # elif defined(USE_UBF_LIST)
87 # define UBF_TIMER UBF_TIMER_PTHREAD
90 # define UBF_TIMER UBF_TIMER_NONE
103 #if UBF_TIMER == UBF_TIMER_POSIX
113 #elif UBF_TIMER == UBF_TIMER_PTHREAD
114 static void *timer_pthread_fn(
void *);
135 static void clear_thread_cache_altstack(
void);
136 static void ubf_wakeup_all_threads(
void);
137 static int ubf_threads_empty(
void);
143 static void ubf_timer_disarm(
void);
144 static void threadptr_trap_interrupt(
rb_thread_t *);
146 #define TIMER_THREAD_CREATED_P() (signal_self_pipe.owner_process == getpid())
149 #define BUSY_WAIT_SIGNALS (0)
157 #define THREAD_INVALID ((const rb_thread_t *)-1)
160 #ifdef HAVE_SCHED_YIELD
161 #define native_thread_yield() (void)sched_yield()
163 #define native_thread_yield() ((void)0)
166 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
167 defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
168 defined(HAVE_CLOCK_GETTIME)
172 static const void *
const condattr_monotonic =
NULL;
178 #define TIME_QUANTUM_MSEC (100)
179 #define TIME_QUANTUM_USEC (TIME_QUANTUM_MSEC * 1000)
180 #define TIME_QUANTUM_NSEC (TIME_QUANTUM_USEC * 1000)
189 designate_timer_thread(
rb_vm_t *vm)
217 abs = native_cond_timeout(&nd->
cond.
gvlq, TIME_QUANTUM_NSEC);
221 ubf_wakeup_all_threads();
236 if (vm->
gvl.
owner) timer_thread_function();
247 "we must not be in ubf_list and GVL waitq at the same time");
253 do_gvl_timer(vm, th);
272 if (!designate_timer_thread(vm) && !ubf_threads_empty()) {
282 gvl_acquire_common(vm, th);
287 gvl_release_common(
rb_vm_t *vm)
301 gvl_release_common(vm);
314 ubf_wakeup_all_threads();
316 next = gvl_release_common(vm);
334 native_thread_yield();
338 gvl_acquire_common(vm, th);
369 clear_thread_cache_altstack();
372 #if defined(HAVE_WORKING_FORK)
373 static void thread_cache_reset(
void);
377 thread_cache_reset();
383 #define NATIVE_MUTEX_LOCK_DEBUG 0
386 mutex_debug(
const char *msg,
void *lock)
388 if (NATIVE_MUTEX_LOCK_DEBUG) {
402 mutex_debug(
"lock", lock);
412 mutex_debug(
"unlock", lock);
422 mutex_debug(
"trylock", lock);
438 mutex_debug(
"init", lock);
448 mutex_debug(
"destroy", lock);
530 }
while (r ==
EINTR);
542 if (condattr_monotonic) {
549 return rb_hrtime_add(rb_timespec2hrtime(&ts), rel);
553 #define native_cleanup_push pthread_cleanup_push
554 #define native_cleanup_pop pthread_cleanup_pop
565 ruby_thread_from_native(
void)
581 #if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK)
582 if (condattr_monotonic) {
587 if (r) condattr_monotonic =
NULL;
593 native_thread_init(th);
608 ruby_thread_set_native(th);
611 #ifndef USE_THREAD_CACHE
612 #define USE_THREAD_CACHE 1
628 if (USE_THREAD_CACHE)
629 ruby_thread_set_native(0);
633 static rb_thread_t *register_cached_thread_and_wait(
void *);
636 #if defined HAVE_PTHREAD_GETATTR_NP || defined HAVE_PTHREAD_ATTR_GET_NP
637 #define STACKADDR_AVAILABLE 1
638 #elif defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP
639 #define STACKADDR_AVAILABLE 1
640 #undef MAINSTACKADDR_AVAILABLE
641 #define MAINSTACKADDR_AVAILABLE 1
642 void *pthread_get_stackaddr_np(
pthread_t);
643 size_t pthread_get_stacksize_np(
pthread_t);
644 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
645 #define STACKADDR_AVAILABLE 1
646 #elif defined HAVE_PTHREAD_GETTHRDS_NP
647 #define STACKADDR_AVAILABLE 1
648 #elif defined __HAIKU__
649 #define STACKADDR_AVAILABLE 1
652 #ifndef MAINSTACKADDR_AVAILABLE
653 # ifdef STACKADDR_AVAILABLE
654 # define MAINSTACKADDR_AVAILABLE 1
656 # define MAINSTACKADDR_AVAILABLE 0
659 #if MAINSTACKADDR_AVAILABLE && !defined(get_main_stack)
660 # define get_main_stack(addr, size) get_stack(addr, size)
663 #ifdef STACKADDR_AVAILABLE
668 get_stack(
void **addr,
size_t *
size)
670 #define CHECK_ERR(expr) \
671 {int err = (expr); if (err) return err;}
672 #ifdef HAVE_PTHREAD_GETATTR_NP
677 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
681 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
684 # ifdef HAVE_PTHREAD_ATTR_GETGUARDSIZE
691 #elif defined HAVE_PTHREAD_ATTR_GET_NP
695 # ifdef HAVE_PTHREAD_ATTR_GETSTACK
698 CHECK_ERR(pthread_attr_getstackaddr(&attr, addr));
703 #elif (defined HAVE_PTHREAD_GET_STACKADDR_NP && defined HAVE_PTHREAD_GET_STACKSIZE_NP)
705 *addr = pthread_get_stackaddr_np(th);
706 *
size = pthread_get_stacksize_np(th);
707 #elif defined HAVE_THR_STKSEGMENT || defined HAVE_PTHREAD_STACKSEG_NP
709 # if defined HAVE_THR_STKSEGMENT
710 CHECK_ERR(thr_stksegment(&stk));
716 #elif defined HAVE_PTHREAD_GETTHRDS_NP
718 struct __pthrdsinfo thinfo;
720 int regsiz=
sizeof(reg);
721 CHECK_ERR(pthread_getthrds_np(&th, PTHRDSINFO_QUERY_ALL,
722 &thinfo,
sizeof(thinfo),
724 *addr = thinfo.__pi_stackaddr;
728 *
size = thinfo.__pi_stackend - thinfo.__pi_stackaddr;
730 #elif defined __HAIKU__
733 CHECK_ERR(get_thread_info(find_thread(
NULL), &info));
734 *addr = info.stack_base;
738 #error STACKADDR_AVAILABLE is defined but not implemented.
747 size_t stack_maxsize;
749 } native_main_thread;
751 #ifdef STACK_END_ADDRESS
752 extern void *STACK_END_ADDRESS;
756 RUBY_STACK_SPACE_LIMIT = 1024 * 1024,
757 RUBY_STACK_SPACE_RATIO = 5
761 space_size(
size_t stack_size)
763 size_t space_size = stack_size / RUBY_STACK_SPACE_RATIO;
764 if (space_size > RUBY_STACK_SPACE_LIMIT) {
765 return RUBY_STACK_SPACE_LIMIT;
774 reserve_stack(
volatile char *limit,
size_t size)
777 # error needs alloca()
780 volatile char buf[0x100];
781 enum {stack_check_margin = 0x1000};
785 if (!getrlimit(RLIMIT_STACK, &rl) && rl.rlim_cur == RLIM_INFINITY)
788 if (
size < stack_check_margin)
return;
789 size -= stack_check_margin;
793 const volatile char *end =
buf +
sizeof(
buf);
803 size_t sz = limit - end;
818 size_t sz =
buf - limit;
825 # define reserve_stack(limit, size) ((void)(limit), (void)(size))
828 #undef ruby_init_stack
838 #if MAINSTACKADDR_AVAILABLE
839 if (native_main_thread.stack_maxsize)
return;
843 if (get_main_stack(&stackaddr, &
size) == 0) {
844 native_main_thread.stack_maxsize =
size;
845 native_main_thread.stack_start = stackaddr;
846 reserve_stack(stackaddr,
size);
851 #ifdef STACK_END_ADDRESS
852 native_main_thread.stack_start = STACK_END_ADDRESS;
854 if (!native_main_thread.stack_start ||
856 native_main_thread.stack_start > addr,
857 native_main_thread.stack_start < addr)) {
858 native_main_thread.stack_start = (
VALUE *)addr;
862 #if defined(HAVE_GETRLIMIT)
863 #if defined(PTHREAD_STACK_DEFAULT)
864 # if PTHREAD_STACK_DEFAULT < RUBY_STACK_SPACE*5
865 # error "PTHREAD_STACK_DEFAULT is too small"
867 size_t size = PTHREAD_STACK_DEFAULT;
875 if (getrlimit(RLIMIT_STACK, &rlim) == 0) {
878 addr = native_main_thread.stack_start;
880 space = ((
size_t)((
char *)addr +
size) / pagesize) * pagesize - (
size_t)addr;
883 space = (
size_t)addr - ((
size_t)((
char *)addr -
size) / pagesize + 1) * pagesize;
885 native_main_thread.stack_maxsize = space;
889 #if MAINSTACKADDR_AVAILABLE
899 start = native_main_thread.stack_start;
900 end = (
char *)native_main_thread.stack_start + native_main_thread.stack_maxsize;
903 start = (
char *)native_main_thread.stack_start - native_main_thread.stack_maxsize;
904 end = native_main_thread.stack_start;
907 if ((
void *)addr < start || (
void *)addr > end) {
909 native_main_thread.stack_start = (
VALUE *)addr;
910 native_main_thread.stack_maxsize = 0;
915 #define CHECK_ERR(expr) \
916 {int err = (expr); if (err) {rb_bug_errno(#expr, err);}}
928 #ifdef STACKADDR_AVAILABLE
932 if (get_stack(&start, &
size) == 0) {
946 #define USE_NATIVE_THREAD_INIT 1
950 thread_start_func_1(
void *th_ptr)
958 #if !defined USE_NATIVE_THREAD_INIT
963 #if defined USE_NATIVE_THREAD_INIT
964 native_thread_init_stack(th);
966 native_thread_init(th);
968 #if defined USE_NATIVE_THREAD_INIT
971 thread_start_func_2(th, &stack_start);
976 if ((th = register_cached_thread_and_wait(
RB_ALTSTACK(altstack))) != 0) {
985 struct cached_thread_entry {
997 # if defined(HAVE_WORKING_FORK)
999 thread_cache_reset(
void)
1002 list_head_init(&cached_thread_head);
1011 #ifndef THREAD_CACHE_TIME
1012 # define THREAD_CACHE_TIME ((rb_hrtime_t)3 * RB_HRTIME_PER_SEC)
1016 register_cached_thread_and_wait(
void *altstack)
1019 struct cached_thread_entry entry;
1022 entry.altstack = altstack;
1025 end = native_cond_timeout(&entry.cond, end);
1029 list_add(&cached_thread_head, &entry.node);
1031 native_cond_timedwait(&entry.cond, &thread_cache_lock, &end);
1033 if (entry.th ==
NULL) {
1047 # if defined(HAVE_WORKING_FORK)
1048 static void thread_cache_reset(
void) { }
1055 #if USE_THREAD_CACHE
1056 struct cached_thread_entry *entry;
1059 entry =
list_pop(&cached_thread_head,
struct cached_thread_entry, node);
1074 clear_thread_cache_altstack(
void)
1076 #if USE_THREAD_CACHE
1077 struct cached_thread_entry *entry;
1082 entry->altstack = 0;
1094 if (use_cached_thread(th)) {
1095 thread_debug(
"create (use cached thread): %p\n", (
void *)th);
1100 const size_t space = space_size(stack_size);
1106 # ifdef PTHREAD_STACK_MIN
1107 thread_debug(
"create - stack size: %lu\n", (
unsigned long)stack_size);
1111 # ifdef HAVE_PTHREAD_ATTR_SETINHERITSCHED
1125 #if USE_NATIVE_THREAD_PRIORITY
1130 #if defined(_POSIX_PRIORITY_SCHEDULING) && (_POSIX_PRIORITY_SCHEDULING > 0)
1139 if (min > priority) {
1142 else if (max < priority) {
1146 sp.sched_priority = priority;
1158 return rb_fd_select(
n, readfds, writefds, exceptfds, timeout);
1162 ubf_pthread_cond_signal(
void *
ptr)
1165 thread_debug(
"ubf_pthread_cond_signal (%p)\n", (
void *)th);
1193 thread_debug(
"native_sleep: interrupted before sleep\n");
1206 end = native_cond_timeout(cond, *rel);
1207 native_cond_timedwait(cond, lock, &end);
1224 ubf_list_atfork(
void)
1226 list_head_init(&ubf_list_head);
1274 ubf_select(
void *
ptr)
1278 const rb_thread_t *cur = ruby_thread_from_native();
1280 register_ubf_list(th);
1292 if (cur != vm->
gvl.
timer && cur != sigwait_th) {
1298 if (native_mutex_trylock(&vm->
gvl.
lock) == 0) {
1306 ubf_wakeup_thread(th);
1310 ubf_threads_empty(
void)
1316 ubf_wakeup_all_threads(
void)
1321 if (!ubf_threads_empty()) {
1325 ubf_wakeup_thread(th);
1332 #define register_ubf_list(th) (void)(th)
1333 #define unregister_ubf_list(th) (void)(th)
1334 #define ubf_select 0
1335 static void ubf_wakeup_all_threads(
void) {
return; }
1336 static int ubf_threads_empty(
void) {
return 1; }
1337 #define ubf_list_atfork() do {} while (0)
1341 #define WRITE_CONST(fd, str) (void)(write((fd),(str),sizeof(str)-1)<0)
1350 } signal_self_pipe = {
1357 rb_thread_wakeup_timer_thread_fd(
int fd)
1362 const char buff =
'!';
1369 if ((result =
write(fd, &buff,
sizeof(buff))) <= 0) {
1372 case EINTR:
goto retry;
1374 #if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
1379 async_bug_fd(
"rb_thread_wakeup_timer_thread: write", e, fd);
1382 if (TT_DEBUG)
WRITE_CONST(2,
"rb_thread_wakeup_timer_thread: write\n");
1396 #if UBF_TIMER == UBF_TIMER_POSIX
1397 if ((!current || timer_posix.owner == current) &&
1398 !
ATOMIC_CAS(timer_posix.state, RTIMER_DISARM, RTIMER_ARMING)) {
1402 it.it_interval.tv_nsec = it.it_value.tv_nsec = TIME_QUANTUM_NSEC;
1407 switch (
ATOMIC_CAS(timer_posix.state, RTIMER_ARMING, RTIMER_ARMED)) {
1413 case RTIMER_ARMING:
return;
1430 #elif UBF_TIMER == UBF_TIMER_PTHREAD
1431 if (!current || current == timer_pthread.owner) {
1433 rb_thread_wakeup_timer_thread_fd(timer_pthread.low[1]);
1445 rb_thread_wakeup_timer_thread_fd(signal_self_pipe.normal[1]);
1454 if (signal_self_pipe.owner_process == current) {
1455 rb_thread_wakeup_timer_thread_fd(signal_self_pipe.normal[1]);
1461 if (system_working > 0) {
1472 if (!mth || system_working <= 0)
return;
1479 ubf_timer_arm(current);
1490 #define CLOSE_INVALIDATE_PAIR(expr) \
1491 close_invalidate_pair(expr,"close_invalidate: "#expr)
1493 close_invalidate(
int *fdp,
const char *msg)
1498 if (
close(fd) < 0) {
1499 async_bug_fd(msg,
errno, fd);
1504 close_invalidate_pair(
int fds[2],
const char *msg)
1507 close_invalidate(&fds[0], msg);
1511 close_invalidate(&fds[0], msg);
1512 close_invalidate(&fds[1], msg);
1517 set_nonblock(
int fd)
1522 oflags =
fcntl(fd, F_GETFL);
1533 setup_communication_pipe_internal(
int pipes[2])
1537 if (pipes[0] >= 0 || pipes[1] >= 0) {
1547 #if USE_EVENTFD && defined(EFD_NONBLOCK) && defined(EFD_CLOEXEC)
1548 pipes[0] = pipes[1] = eventfd(0, EFD_NONBLOCK|EFD_CLOEXEC);
1549 if (pipes[0] >= 0) {
1557 rb_warn(
"pipe creation failed for timer: %s, scheduling broken",
1563 set_nonblock(pipes[0]);
1564 set_nonblock(pipes[1]);
1568 #if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
1569 # define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
1577 #ifdef SET_CURRENT_THREAD_NAME
1582 else if ((loc = threadptr_invoke_proc_location(th)) !=
Qnil) {
1597 if (
len >=
sizeof(
buf)) {
1598 buf[
sizeof(
buf)-2] =
'*';
1599 buf[
sizeof(
buf)-1] =
'\0';
1609 #ifdef SET_ANOTHER_THREAD_NAME
1618 ubf_timer_invalidate(
void)
1620 #if UBF_TIMER == UBF_TIMER_PTHREAD
1621 CLOSE_INVALIDATE_PAIR(timer_pthread.low);
1626 ubf_timer_pthread_create(
rb_pid_t current)
1628 #if UBF_TIMER == UBF_TIMER_PTHREAD
1630 if (timer_pthread.owner == current)
1633 if (setup_communication_pipe_internal(timer_pthread.low) < 0)
1638 timer_pthread.owner = current;
1640 rb_warn(
"pthread_create failed for timer: %s, signals racy",
1648 #if UBF_TIMER == UBF_TIMER_POSIX
1650 # define UBF_TIMER_CLOCK CLOCK_REALTIME
1652 # define UBF_TIMER_CLOCK CLOCK_MONOTONIC
1659 sev.sigev_value.sival_ptr = &timer_posix;
1661 if (!
timer_create(UBF_TIMER_CLOCK, &sev, &timer_posix.timerid)) {
1664 if (prev != RTIMER_DEAD) {
1665 rb_bug(
"timer_posix was not dead: %u\n", (
unsigned)prev);
1667 timer_posix.owner = current;
1673 if (UBF_TIMER == UBF_TIMER_PTHREAD)
1674 ubf_timer_pthread_create(current);
1678 rb_thread_create_timer_thread(
void)
1682 rb_pid_t owner = signal_self_pipe.owner_process;
1684 if (owner && owner != current) {
1685 CLOSE_INVALIDATE_PAIR(signal_self_pipe.normal);
1686 CLOSE_INVALIDATE_PAIR(signal_self_pipe.ub_main);
1687 ubf_timer_invalidate();
1690 if (setup_communication_pipe_internal(signal_self_pipe.normal) < 0)
return;
1691 if (setup_communication_pipe_internal(signal_self_pipe.ub_main) < 0)
return;
1693 ubf_timer_create(current);
1694 if (owner != current) {
1696 sigwait_th = THREAD_INVALID;
1697 signal_self_pipe.owner_process = current;
1702 ubf_timer_disarm(
void)
1704 #if UBF_TIMER == UBF_TIMER_POSIX
1707 prev =
ATOMIC_CAS(timer_posix.state, RTIMER_ARMED, RTIMER_DISARM);
1709 case RTIMER_DISARM:
return;
1710 case RTIMER_ARMING:
return;
1716 prev =
ATOMIC_CAS(timer_posix.state, RTIMER_DISARM, RTIMER_DISARM);
1719 if (prev == RTIMER_DEAD)
return;
1725 case RTIMER_DEAD:
return;
1727 rb_bug(
"UBF_TIMER_POSIX bad state: %u\n", (
unsigned)prev);
1730 #elif UBF_TIMER == UBF_TIMER_PTHREAD
1736 ubf_timer_destroy(
void)
1738 #if UBF_TIMER == UBF_TIMER_POSIX
1739 if (timer_posix.owner ==
getpid()) {
1741 size_t i, max = 10000000;
1744 for (
i = 0;
i < max;
i++) {
1745 switch (
ATOMIC_CAS(timer_posix.state, expect, RTIMER_DEAD)) {
1747 if (expect == RTIMER_DISARM)
goto done;
1748 expect = RTIMER_DISARM;
1751 native_thread_yield();
1752 expect = RTIMER_ARMED;
1755 if (expect == RTIMER_ARMED) {
1760 expect = RTIMER_ARMED;
1763 rb_bug(
"RTIMER_DEAD unexpected");
1766 rb_bug(
"timed out waiting for timer to arm");
1773 #elif UBF_TIMER == UBF_TIMER_PTHREAD
1776 timer_pthread.owner = 0;
1778 rb_thread_wakeup_timer_thread_fd(timer_pthread.low[1]);
1787 native_stop_timer_thread(
void)
1790 stopped = --system_working <= 0;
1792 ubf_timer_destroy();
1799 native_reset_timer_thread(
void)
1804 #ifdef HAVE_SIGALTSTACK
1806 ruby_stack_overflowed_p(
const rb_thread_t *th,
const void *addr)
1810 const size_t water_mark = 1024 * 1024;
1813 #ifdef STACKADDR_AVAILABLE
1814 if (get_stack(&base, &
size) == 0) {
1818 if (getrlimit(RLIMIT_STACK, &rlim) == 0 && rlim.rlim_cur >
size) {
1834 size /= RUBY_STACK_SPACE_RATIO;
1835 if (
size > water_mark)
size = water_mark;
1837 if (
size > ~(
size_t)base+1)
size = ~(
size_t)base+1;
1838 if (addr > base && addr <= (
void *)((
char *)base +
size))
return 1;
1841 if (
size > (
size_t)base)
size = (
size_t)base;
1842 if (addr > (
void *)((
char *)base -
size) && addr <= base)
return 1;
1855 #if UBF_TIMER == UBF_TIMER_PTHREAD
1856 if (fd == timer_pthread.low[0] || fd == timer_pthread.low[1])
1859 if (fd == signal_self_pipe.normal[0] || fd == signal_self_pipe.normal[1])
1861 if (fd == signal_self_pipe.ub_main[0] || fd == signal_self_pipe.ub_main[1])
1865 if (signal_self_pipe.owner_process ==
getpid())
1883 #ifdef SET_CURRENT_THREAD_NAME
1892 rb_thread_create_mjit_thread(
void (*worker_func)(
void))
1913 if (signal_self_pipe.normal[0] >= 0) {
1921 if (
ATOMIC_PTR_CAS(sigwait_th, THREAD_INVALID, th) == THREAD_INVALID) {
1922 return signal_self_pipe.normal[0];
1933 VM_ASSERT(signal_self_pipe.normal[0] == fd);
1941 ruby_ppoll(
struct pollfd *fds, nfds_t nfds,
1958 timeout_ms = (
int)(tmp + tmp2);
1964 return poll(fds, nfds, timeout_ms);
1966 # define ppoll(fds,nfds,ts,sigmask) ruby_ppoll((fds),(nfds),(ts),(sigmask))
1975 pfd.fd = sigwait_fd;
1976 pfd.events = POLLIN;
1979 (
void)ppoll(&pfd, 1, rb_hrtime2timespec(&ts, rel), 0);
1980 check_signals_nogvl(th, sigwait_fd);
1999 const rb_hrtime_t *sto = sigwait_timeout(th, sigwait_fd, &to, &
n);
2002 n = ppoll(&pfd, 1, rb_hrtime2timespec(&ts, sto), 0);
2003 if (check_signals_nogvl(th, sigwait_fd))
2007 if (rel && hrtime_update_expire(&to, end))
2019 ubf_ppoll_sleep(
void *ignore)
2021 rb_thread_wakeup_timer_thread_fd(signal_self_pipe.ub_main[1]);
2034 #define GVL_UNLOCK_BEGIN_YIELD(th) do { \
2035 const native_thread_data_t *next; \
2036 rb_vm_t *vm = th->vm; \
2037 RB_GC_SAVE_MACHINE_CONTEXT(th); \
2038 rb_native_mutex_lock(&vm->gvl.lock); \
2039 next = gvl_release_common(vm); \
2040 rb_native_mutex_unlock(&vm->gvl.lock); \
2041 if (!next && vm_living_thread_num(vm) > 1) { \
2042 native_thread_yield(); \
2062 GVL_UNLOCK_BEGIN_YIELD(th);
2065 struct pollfd pfd[2];
2068 pfd[0].fd = signal_self_pipe.normal[0];
2069 pfd[1].fd = signal_self_pipe.ub_main[0];
2070 pfd[0].events = pfd[1].events = POLLIN;
2071 if (ppoll(pfd, 2, rb_hrtime2timespec(&ts, rel), 0) > 0) {
2072 if (pfd[1].revents & POLLIN) {
2073 (
void)consume_communication_pipe(pfd[1].fd);
2082 unblock_function_clear(th);
2091 if (sigwait_fd >= 0) {
2096 GVL_UNLOCK_BEGIN_YIELD(th);
2102 check_signals_nogvl(th, sigwait_fd);
2104 unblock_function_clear(th);
2110 native_ppoll_sleep(th, rel);
2113 native_cond_sleep(th, rel);
2117 #if UBF_TIMER == UBF_TIMER_PTHREAD
2119 timer_pthread_fn(
void *p)
2127 pfd.fd = timer_pthread.low[0];
2128 pfd.events = POLLIN;
2130 while (system_working > 0) {
2131 (
void)poll(&pfd, 1, timeout);
2132 ccp = consume_communication_pipe(pfd.fd);
2134 if (system_working > 0) {
2139 timeout = TIME_QUANTUM_MSEC;
2159 ubf_caller(
void *ignore)
2172 rb_thread_start_unblock_thread(
void)