Misc #14937 ยป 0001-thread_pthread-lazy-spawn-timer-thread-only-on-conte.patch
internal.h | ||
---|---|---|
# define __has_extension __has_feature
|
||
#endif
|
||
/* Prevent compiler from reordering access */
|
||
#define ACCESS_ONCE(type,x) (*((volatile type *)&(x)))
|
||
#if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
|
||
# define STATIC_ASSERT(name, expr) _Static_assert(expr, #name ": " #expr)
|
||
#elif GCC_VERSION_SINCE(4, 6, 0) || __has_extension(c_static_assert)
|
process.c | ||
---|---|---|
void rb_native_cond_wait(rb_nativethread_cond_t *, rb_nativethread_lock_t *);
|
||
rb_nativethread_cond_t *rb_sleep_cond_get(const rb_execution_context_t *);
|
||
void rb_sleep_cond_put(rb_nativethread_cond_t *);
|
||
int rb_sigwait_fd_get(const rb_thread_t *);
|
||
void rb_sigwait_sleep(const rb_thread_t *, int fd, const struct timespec *);
|
||
void rb_sigwait_fd_put(const rb_thread_t *, int fd);
|
||
/*
|
||
* When a thread is done using sigwait_fd and there are other threads
|
||
* sleeping on waitpid, we must kick one of the threads out of
|
||
* rb_native_cond_wait so it can switch to rb_sigwait_sleep
|
||
*/
|
||
static void
|
||
sigwait_fd_migrate_sleeper(rb_vm_t *vm)
|
||
{
|
||
struct waitpid_state *w = 0;
|
||
list_for_each(&vm->waiting_pids, w, wnode) {
|
||
if (!w->cond) continue; /* somebody else already got sigwait_fd */
|
||
rb_native_cond_signal(w->cond);
|
||
return;
|
||
}
|
||
list_for_each(&vm->waiting_grps, w, wnode) {
|
||
if (!w->cond) continue; /* somebody else already got sigwait_fd */
|
||
rb_native_cond_signal(w->cond);
|
||
return;
|
||
}
|
||
}
|
||
void
|
||
rb_sigwait_fd_migrate(rb_vm_t *vm)
|
||
{
|
||
rb_native_mutex_lock(&vm->waitpid_lock);
|
||
sigwait_fd_migrate_sleeper(vm);
|
||
rb_native_mutex_unlock(&vm->waitpid_lock);
|
||
}
|
||
static void
|
||
waitpid_notify(struct waitpid_state *w, rb_pid_t ret)
|
||
{
|
||
w->ret = ret;
|
||
list_del_init(&w->wnode);
|
||
rb_native_cond_signal(w->cond);
|
||
if (w->cond) {
|
||
rb_native_cond_signal(w->cond);
|
||
}
|
||
else {
|
||
/* w is owned by this thread */
|
||
}
|
||
}
|
||
#ifdef _WIN32 /* for spawnvp result from mjit.c */
|
||
... | ... | |
#endif
|
||
extern volatile unsigned int ruby_nocldwait; /* signal.c */
|
||
/* called by timer thread */
|
||
/* called by timer thread or thread which acquired sigwait_fd */
|
||
static void
|
||
waitpid_each(struct list_head *head)
|
||
{
|
||
... | ... | |
w->options = options;
|
||
}
|
||
static const struct timespec *
|
||
sigwait_sleep_time(void)
|
||
{
|
||
if (SIGCHLD_LOSSY) {
|
||
static const struct timespec busy_wait = { 0, 100000000 };
|
||
return &busy_wait;
|
||
}
|
||
return 0;
|
||
}
|
||
/*
|
||
* must be called with vm->waitpid_lock held, this is not interruptible
|
||
*/
|
||
... | ... | |
if (w.ret == -1) w.errnum = errno;
|
||
}
|
||
else {
|
||
w.cond = cond;
|
||
int sigwait_fd;
|
||
w.ec = 0;
|
||
list_add(w.pid > 0 ? &vm->waiting_pids : &vm->waiting_grps, &w.wnode);
|
||
do {
|
||
rb_native_cond_wait(w.cond, &vm->waitpid_lock);
|
||
sigwait_fd = rb_sigwait_fd_get(0);
|
||
if (sigwait_fd >= 0) {
|
||
w.cond = 0;
|
||
rb_native_mutex_unlock(&vm->waitpid_lock);
|
||
rb_sigwait_sleep(0, sigwait_fd, sigwait_sleep_time());
|
||
rb_native_mutex_lock(&vm->waitpid_lock);
|
||
rb_sigwait_fd_put(0, sigwait_fd);
|
||
}
|
||
else {
|
||
w.cond = cond;
|
||
rb_native_cond_wait(w.cond, &vm->waitpid_lock);
|
||
}
|
||
} while (!w.ret);
|
||
list_del(&w.wnode);
|
||
/* we're done, maybe other waitpid callers are not: */
|
||
if (sigwait_fd >= 0)
|
||
sigwait_fd_migrate_sleeper(vm);
|
||
}
|
||
if (status) {
|
||
*status = w.status;
|
||
... | ... | |
struct waitpid_state *w = x;
|
||
/* th->interrupt_lock is already held by rb_threadptr_interrupt_common */
|
||
rb_native_cond_signal(w->cond);
|
||
if (w->cond)
|
||
rb_native_cond_signal(w->cond);
|
||
else
|
||
rb_thread_wakeup_timer_thread(0); /* kick sigwait_fd */
|
||
}
|
||
static void *
|
||
... | ... | |
{
|
||
struct waitpid_state *w = x;
|
||
rb_thread_t *th = rb_ec_thread_ptr(w->ec);
|
||
int sigwait_fd = -1;
|
||
rb_native_mutex_lock(&th->interrupt_lock);
|
||
/*
|
||
... | ... | |
* by the time we enter this. And we may also be interrupted.
|
||
*/
|
||
if (!w->ret && !RUBY_VM_INTERRUPTED_ANY(w->ec)) {
|
||
if (SIGCHLD_LOSSY) {
|
||
rb_thread_wakeup_timer_thread();
|
||
sigwait_fd = rb_sigwait_fd_get(th);
|
||
if (sigwait_fd >= 0) {
|
||
rb_nativethread_cond_t *cond = w->cond;
|
||
w->cond = 0;
|
||
rb_native_mutex_unlock(&th->interrupt_lock);
|
||
rb_sigwait_sleep(th, sigwait_fd, sigwait_sleep_time());
|
||
rb_native_mutex_lock(&th->interrupt_lock);
|
||
w->cond = cond;
|
||
rb_sigwait_fd_put(th, sigwait_fd);
|
||
}
|
||
else {
|
||
/* another thread calling rb_sigwait_sleep will process
|
||
* signals for us */
|
||
if (SIGCHLD_LOSSY) {
|
||
rb_thread_wakeup_timer_thread(0);
|
||
}
|
||
rb_native_cond_wait(w->cond, &th->interrupt_lock);
|
||
}
|
||
rb_native_cond_wait(w->cond, &th->interrupt_lock);
|
||
}
|
||
rb_native_mutex_unlock(&th->interrupt_lock);
|
||
if (sigwait_fd >= 0)
|
||
rb_sigwait_fd_migrate(th->vm);
|
||
return 0;
|
||
}
|
||
signal.c | ||
---|---|---|
static rb_atomic_t sigchld_hit;
|
||
/* Prevent compiler from reordering access */
|
||
#define ACCESS_ONCE(type,x) (*((volatile type *)&(x)))
|
||
static RETSIGTYPE
|
||
sighandler(int sig)
|
||
{
|
||
... | ... | |
else {
|
||
signal_enque(sig);
|
||
}
|
||
rb_thread_wakeup_timer_thread();
|
||
rb_thread_wakeup_timer_thread(sig);
|
||
#if !defined(BSD_SIGNAL) && !defined(POSIX_SIGNAL)
|
||
ruby_signal(sig, sighandler);
|
||
#endif
|
||
... | ... | |
#ifdef HAVE_PTHREAD_SIGMASK
|
||
sigset_t mask;
|
||
sigemptyset(&mask);
|
||
sigaddset(&mask, RUBY_SIGCHLD); /* timer-thread handles this */
|
||
pthread_sigmask(SIG_SETMASK, &mask, NULL);
|
||
#endif
|
||
}
|
||
... | ... | |
void ruby_waitpid_all(rb_vm_t *); /* process.c */
|
||
/* only runs in the timer-thread */
|
||
void
|
||
ruby_sigchld_handler(rb_vm_t *vm)
|
||
{
|
test/ruby/test_process.rb | ||
---|---|---|
puts Dir.entries("/proc/self/task") - %W[. ..]
|
||
end
|
||
bug4920 = '[ruby-dev:43873]'
|
||
assert_equal(2, data.size, bug4920)
|
||
assert((1..2).include?(data.size), bug4920)
|
||
assert_not_include(data.map(&:to_i), pid)
|
||
end
|
||
else # darwin
|
thread.c | ||
---|---|---|
static void timespec_sub(struct timespec *, const struct timespec *);
|
||
static int timespec_update_expire(struct timespec *, const struct timespec *);
|
||
static void getclockofday(struct timespec *);
|
||
NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
|
||
static void consume_communication_pipe(int fd);
|
||
static void check_signals_nogvl(rb_thread_t *, int sigwait_fd);
|
||
void rb_sigwait_fd_migrate(rb_vm_t *); /* process.c */
|
||
#define eKillSignal INT2FIX(0)
|
||
#define eTerminateSignal INT2FIX(1)
|
||
... | ... | |
return FALSE;
|
||
}
|
||
#define restore_fdset(fds1, fds2) \
|
||
((fds1) ? rb_fd_dup(fds1, fds2) : (void)0)
|
||
struct select_set {
|
||
rb_fdset_t read;
|
||
rb_fdset_t write;
|
||
rb_fdset_t except;
|
||
int max;
|
||
int sigwait_fd;
|
||
rb_thread_t *th;
|
||
rb_fdset_t *rset;
|
||
rb_fdset_t *wset;
|
||
rb_fdset_t *eset;
|
||
rb_fdset_t orig_rset;
|
||
rb_fdset_t orig_wset;
|
||
rb_fdset_t orig_eset;
|
||
struct timeval *timeout;
|
||
};
|
||
static size_t
|
||
select_set_memsize(const void *p)
|
||
static VALUE
|
||
select_set_free(VALUE p)
|
||
{
|
||
return sizeof(struct select_set);
|
||
}
|
||
struct select_set *set = (struct select_set *)p;
|
||
static void
|
||
select_set_free(void *p)
|
||
{
|
||
struct select_set *orig = p;
|
||
if (set->sigwait_fd >= 0) {
|
||
rb_sigwait_fd_put(set->th, set->sigwait_fd);
|
||
rb_sigwait_fd_migrate(set->th->vm);
|
||
}
|
||
rb_fd_term(&orig->read);
|
||
rb_fd_term(&orig->write);
|
||
rb_fd_term(&orig->except);
|
||
xfree(orig);
|
||
}
|
||
rb_fd_term(&set->orig_rset);
|
||
rb_fd_term(&set->orig_wset);
|
||
rb_fd_term(&set->orig_eset);
|
||
static const rb_data_type_t select_set_type = {
|
||
"select_set",
|
||
{NULL, select_set_free, select_set_memsize,},
|
||
0, 0, RUBY_TYPED_FREE_IMMEDIATELY
|
||
};
|
||
return Qfalse;
|
||
}
|
||
static int
|
||
do_select(int n, rb_fdset_t *const readfds, rb_fdset_t *const writefds,
|
||
rb_fdset_t *const exceptfds, struct timeval *timeout)
|
||
static VALUE
|
||
do_select(VALUE p)
|
||
{
|
||
struct select_set *set = (struct select_set *)p;
|
||
int MAYBE_UNUSED(result);
|
||
int lerrno;
|
||
struct timespec ts, end, *tsp;
|
||
rb_thread_t *th = GET_THREAD();
|
||
VALUE o;
|
||
struct select_set *orig;
|
||
o = TypedData_Make_Struct(0, struct select_set, &select_set_type, orig);
|
||
timeout_prepare(&tsp, &ts, &end, timeout);
|
||
timeout_prepare(&tsp, &ts, &end, set->timeout);
|
||
#define restore_fdset(dst, src) \
|
||
((dst) ? rb_fd_dup(dst, src) : (void)0)
|
||
#define do_select_update() \
|
||
(restore_fdset(readfds, &orig->read), \
|
||
restore_fdset(writefds, &orig->write), \
|
||
restore_fdset(exceptfds, &orig->except), \
|
||
(restore_fdset(set->rset, &set->orig_rset), \
|
||
restore_fdset(set->wset, &set->orig_wset), \
|
||
restore_fdset(set->eset, &set->orig_eset), \
|
||
TRUE)
|
||
#define fd_init_copy(f) \
|
||
(f##fds) ? rb_fd_init_copy(&orig->f, f##fds) : rb_fd_no_init(&orig->f)
|
||
fd_init_copy(read);
|
||
fd_init_copy(write);
|
||
fd_init_copy(except);
|
||
#undef fd_init_copy
|
||
do {
|
||
lerrno = 0;
|
||
BLOCKING_REGION(th, {
|
||
result = native_fd_select(n, readfds, writefds, exceptfds,
|
||
timeval_for(timeout, tsp), th);
|
||
BLOCKING_REGION(set->th, {
|
||
result = native_fd_select(set->max, set->rset, set->wset, set->eset,
|
||
timeval_for(set->timeout, tsp), set->th);
|
||
if (result < 0) lerrno = errno;
|
||
}, ubf_select, th, FALSE);
|
||
}, ubf_select, set->th, FALSE);
|
||
RUBY_VM_CHECK_INTS_BLOCKING(th->ec); /* may raise */
|
||
} while (wait_retryable(&result, lerrno, tsp, &end) && do_select_update());
|
||
if (set->sigwait_fd >= 0 && rb_fd_isset(set->sigwait_fd, set->rset)) {
|
||
result--;
|
||
check_signals_nogvl(set->th, set->sigwait_fd);
|
||
}
|
||
/* didn't raise, perform cleanup ourselves */
|
||
select_set_free(orig);
|
||
rb_gc_force_recycle(o);
|
||
RUBY_VM_CHECK_INTS_BLOCKING(set->th->ec); /* may raise */
|
||
} while (wait_retryable(&result, lerrno, tsp, &end) && do_select_update());
|
||
if (result < 0) {
|
||
errno = lerrno;
|
||
}
|
||
return result;
|
||
return (VALUE)result;
|
||
}
|
||
static void
|
||
... | ... | |
return TRUE;
|
||
}
|
||
static rb_fdset_t *
|
||
init_set_fd(int fd, rb_fdset_t *fds)
|
||
{
|
||
if (fd < 0) {
|
||
return 0;
|
||
}
|
||
rb_fd_init(fds);
|
||
rb_fd_set(fd, fds);
|
||
return fds;
|
||
}
|
||
int
|
||
rb_thread_fd_select(int max, rb_fdset_t * read, rb_fdset_t * write, rb_fdset_t * except,
|
||
struct timeval *timeout)
|
||
{
|
||
if (!read && !write && !except) {
|
||
struct select_set set;
|
||
set.th = GET_THREAD();
|
||
set.max = max;
|
||
set.sigwait_fd = rb_sigwait_fd_get(set.th);
|
||
set.rset = read;
|
||
set.wset = write;
|
||
set.eset = except;
|
||
set.timeout = timeout;
|
||
if (set.sigwait_fd >= 0) {
|
||
if (set.rset)
|
||
rb_fd_set(set.sigwait_fd, set.rset);
|
||
else
|
||
set.rset = init_set_fd(set.sigwait_fd, &set.orig_rset);
|
||
if (set.sigwait_fd > set.max) {
|
||
set.max = set.sigwait_fd + 1;
|
||
}
|
||
}
|
||
if (!set.rset && !set.wset && !set.eset) {
|
||
if (!timeout) {
|
||
rb_thread_sleep_forever();
|
||
return 0;
|
||
... | ... | |
return 0;
|
||
}
|
||
if (read) {
|
||
rb_fd_resize(max - 1, read);
|
||
}
|
||
if (write) {
|
||
rb_fd_resize(max - 1, write);
|
||
}
|
||
if (except) {
|
||
rb_fd_resize(max - 1, except);
|
||
}
|
||
return do_select(max, read, write, except, timeout);
|
||
#define fd_init_copy(f) do { \
|
||
if (set.f) { \
|
||
rb_fd_resize(set.max - 1, set.f); \
|
||
if (&set.orig_##f != set.f) { /* sigwait_fd */ \
|
||
rb_fd_init_copy(&set.orig_##f, set.f); \
|
||
} \
|
||
} \
|
||
else { \
|
||
rb_fd_no_init(&set.orig_##f); \
|
||
} \
|
||
} while (0)
|
||
fd_init_copy(rset);
|
||
fd_init_copy(wset);
|
||
fd_init_copy(eset);
|
||
#undef fd_init_copy
|
||
return (int)rb_ensure(do_select, (VALUE)&set, select_set_free, (VALUE)&set);
|
||
}
|
||
#ifdef USE_POLL
|
||
... | ... | |
int
|
||
rb_wait_for_single_fd(int fd, int events, struct timeval *timeout)
|
||
{
|
||
struct pollfd fds;
|
||
struct pollfd fds[2];
|
||
int result = 0, lerrno;
|
||
struct timespec ts, end, *tsp;
|
||
rb_thread_t *th = GET_THREAD();
|
||
nfds_t nfds;
|
||
timeout_prepare(&tsp, &ts, &end, timeout);
|
||
fds.fd = fd;
|
||
fds.events = (short)events;
|
||
fds[0].fd = fd;
|
||
fds[0].events = (short)events;
|
||
do {
|
||
fds.revents = 0;
|
||
fds[0].revents = 0;
|
||
fds[1].fd = rb_sigwait_fd_get(th);
|
||
if (fds[1].fd >= 0) {
|
||
fds[1].events = POLLIN;
|
||
fds[1].revents = 0;
|
||
nfds = 2;
|
||
}
|
||
else {
|
||
nfds = 1;
|
||
}
|
||
lerrno = 0;
|
||
BLOCKING_REGION(th, {
|
||
result = ppoll(&fds, 1, tsp, NULL);
|
||
result = ppoll(fds, nfds, tsp, NULL);
|
||
if (result < 0) lerrno = errno;
|
||
}, ubf_select, th, FALSE);
|
||
if (fds[1].fd >= 0) {
|
||
if (fds[1].revents) {
|
||
result--;
|
||
check_signals_nogvl(th, fds[1].fd);
|
||
}
|
||
rb_sigwait_fd_put(th, fds[1].fd);
|
||
rb_sigwait_fd_migrate(th->vm);
|
||
}
|
||
RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
|
||
} while (wait_retryable(&result, lerrno, tsp, &end));
|
||
if (result < 0) {
|
||
... | ... | |
return -1;
|
||
}
|
||
if (fds.revents & POLLNVAL) {
|
||
if (fds[0].revents & POLLNVAL) {
|
||
errno = EBADF;
|
||
return -1;
|
||
}
|
||
... | ... | |
* Therefore we need to fix it up.
|
||
*/
|
||
result = 0;
|
||
if (fds.revents & POLLIN_SET)
|
||
if (fds[0].revents & POLLIN_SET)
|
||
result |= RB_WAITFD_IN;
|
||
if (fds.revents & POLLOUT_SET)
|
||
if (fds[0].revents & POLLOUT_SET)
|
||
result |= RB_WAITFD_OUT;
|
||
if (fds.revents & POLLEX_SET)
|
||
if (fds[0].revents & POLLEX_SET)
|
||
result |= RB_WAITFD_PRI;
|
||
/* all requested events are ready if there is an error */
|
||
if (fds.revents & POLLERR_SET)
|
||
if (fds[0].revents & POLLERR_SET)
|
||
result |= events;
|
||
return result;
|
||
}
|
||
#else /* ! USE_POLL - implement rb_io_poll_fd() using select() */
|
||
static rb_fdset_t *
|
||
init_set_fd(int fd, rb_fdset_t *fds)
|
||
{
|
||
if (fd < 0) {
|
||
return 0;
|
||
}
|
||
rb_fd_init(fds);
|
||
rb_fd_set(fd, fds);
|
||
return fds;
|
||
}
|
||
struct select_args {
|
||
union {
|
||
int fd;
|
||
... | ... | |
}
|
||
rb_native_mutex_unlock(&vm->thread_destruct_lock);
|
||
/* check signal */
|
||
#ifndef HAVE_PTHREAD_H
|
||
/*
|
||
* check signal, pthreads platforms do this from Ruby threads which
|
||
* have working rb_sigwait_fd_get
|
||
*/
|
||
ruby_sigchld_handler(vm);
|
||
rb_threadptr_check_signal(vm->main_thread);
|
||
#endif
|
||
#if 0
|
||
/* prove profiler */
|
||
... | ... | |
#endif
|
||
}
|
||
static void
|
||
async_bug_fd(const char *mesg, int errno_arg, int fd)
|
||
{
|
||
char buff[64];
|
||
size_t n = strlcpy(buff, mesg, sizeof(buff));
|
||
if (n < sizeof(buff)-3) {
|
||
ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
|
||
}
|
||
rb_async_bug_errno(buff, errno_arg);
|
||
}
|
||
/* VM-dependent API is not available for this function */
|
||
static void
|
||
consume_communication_pipe(int fd)
|
||
{
|
||
#define CCP_READ_BUFF_SIZE 1024
|
||
/* buffer can be shared because no one refers to them. */
|
||
static char buff[CCP_READ_BUFF_SIZE];
|
||
ssize_t result;
|
||
while (1) {
|
||
result = read(fd, buff, sizeof(buff));
|
||
if (result == 0) {
|
||
return;
|
||
}
|
||
else if (result < 0) {
|
||
int e = errno;
|
||
switch (e) {
|
||
case EINTR:
|
||
continue; /* retry */
|
||
case EAGAIN:
|
||
#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
|
||
case EWOULDBLOCK:
|
||
#endif
|
||
return;
|
||
default:
|
||
async_bug_fd("consume_communication_pipe: read", e, fd);
|
||
}
|
||
}
|
||
}
|
||
}
|
||
static void
|
||
check_signals_nogvl(rb_thread_t *th, int sigwait_fd)
|
||
{
|
||
rb_vm_t *vm = GET_VM(); /* th may be 0 */
|
||
consume_communication_pipe(sigwait_fd);
|
||
ubf_wakeup_all_threads();
|
||
ruby_sigchld_handler(vm);
|
||
if (rb_signal_buff_size()) {
|
||
if (th == vm->main_thread)
|
||
/* no need to lock + wakeup if already in main thread */
|
||
RUBY_VM_SET_TRAP_INTERRUPT(th->ec);
|
||
else
|
||
threadptr_trap_interrupt(vm->main_thread);
|
||
}
|
||
}
|
||
void
|
||
rb_thread_stop_timer_thread(void)
|
||
{
|
thread_pthread.c | ||
---|---|---|
void rb_native_cond_wait(rb_nativethread_cond_t *cond, rb_nativethread_lock_t *mutex);
|
||
void rb_native_cond_initialize(rb_nativethread_cond_t *cond);
|
||
void rb_native_cond_destroy(rb_nativethread_cond_t *cond);
|
||
static void rb_thread_wakeup_timer_thread_low(void);
|
||
static void rb_thread_spawn_timer(rb_vm_t *);
|
||
static void rb_thread_wakeup_timer_thread_low(rb_vm_t *);
|
||
#define TIMER_THREAD_MASK (1)
|
||
#define TIMER_THREAD_SLEEPY (2|TIMER_THREAD_MASK)
|
||
#define TIMER_THREAD_BUSY (4|TIMER_THREAD_MASK)
|
||
#if defined(HAVE_POLL) && defined(HAVE_FCNTL) && defined(F_GETFL) && \
|
||
defined(F_SETFL) && defined(O_NONBLOCK) && \
|
||
defined(F_GETFD) && defined(F_SETFD) && defined(FD_CLOEXEC)
|
||
/* The timer thread sleeps while only one Ruby thread is running. */
|
||
# define TIMER_IMPL TIMER_THREAD_SLEEPY
|
||
#else
|
||
# define TIMER_IMPL TIMER_THREAD_BUSY
|
||
#endif
|
||
static rb_nativethread_lock_t timer_thread_lock;
|
||
static rb_nativethread_cond_t timer_thread_idle;
|
||
static rb_nativethread_cond_t timer_thread_busy;
|
||
static struct {
|
||
pthread_t id;
|
||
int created;
|
||
int created; /* protected by vm->gvl.lock */
|
||
} timer_thread;
|
||
#define TIMER_THREAD_CREATED_P() (timer_thread.created != 0)
|
||
#define TIMER_THREAD_CREATED_P() (timer_thread_pipe.owner_process == getpid())
|
||
#if defined(HAVE_PTHREAD_CONDATTR_SETCLOCK) && \
|
||
defined(CLOCK_REALTIME) && defined(CLOCK_MONOTONIC) && \
|
||
... | ... | |
#endif
|
||
static void
|
||
gvl_acquire_common(rb_vm_t *vm)
|
||
gvl_acquire_common(rb_vm_t *vm, const rb_thread_t *th)
|
||
{
|
||
if (vm->gvl.acquired) {
|
||
/*
|
||
* TODO: If [Feature #14717] is accepted, check preemptibility
|
||
* of thread and do not wake up timer thread unless running
|
||
* thread is preemptible. If no Threads are preemptible;
|
||
* timer thread will never exist for this process \o/
|
||
* cf. https://bugs.ruby-lang.org/issues/14717
|
||
*/
|
||
if (!vm->gvl.waiting++) {
|
||
/*
|
||
... | ... | |
* When timer thread is polling mode, we don't want to
|
||
* make confusing timer thread interval time.
|
||
*/
|
||
rb_thread_wakeup_timer_thread_low();
|
||
rb_thread_wakeup_timer_thread_low(vm);
|
||
}
|
||
while (vm->gvl.acquired) {
|
||
... | ... | |
}
|
||
static void
|
||
gvl_acquire(rb_vm_t *vm, rb_thread_t *th)
|
||
gvl_acquire(rb_vm_t *vm, const rb_thread_t *th)
|
||
{
|
||
rb_native_mutex_lock(&vm->gvl.lock);
|
||
gvl_acquire_common(vm);
|
||
gvl_acquire_common(vm, th);
|
||
rb_native_mutex_unlock(&vm->gvl.lock);
|
||
}
|
||
... | ... | |
rb_native_cond_broadcast(&vm->gvl.switch_wait_cond);
|
||
acquire:
|
||
gvl_acquire_common(vm);
|
||
gvl_acquire_common(vm, th);
|
||
rb_native_mutex_unlock(&vm->gvl.lock);
|
||
}
|
||
... | ... | |
return err;
|
||
}
|
||
#if (TIMER_IMPL & TIMER_THREAD_MASK)
|
||
static void
|
||
native_thread_join(pthread_t th)
|
||
{
|
||
... | ... | |
rb_raise(rb_eThreadError, "native_thread_join() failed (%d)", err);
|
||
}
|
||
}
|
||
#endif /* TIMER_THREAD_MASK */
|
||
#if USE_NATIVE_THREAD_PRIORITY
|
||
... | ... | |
}
|
||
static void
|
||
native_sleep(rb_thread_t *th, struct timespec *timeout_rel)
|
||
native_cond_sleep(rb_thread_t *th, struct timespec *timeout_rel)
|
||
{
|
||
struct timespec timeout;
|
||
rb_nativethread_lock_t *lock = &th->interrupt_lock;
|
||
... | ... | |
ubf_select(void *ptr)
|
||
{
|
||
rb_thread_t *th = (rb_thread_t *)ptr;
|
||
register_ubf_list(th);
|
||
if (th == ruby_thread_from_native()) {
|
||
/* already awake */
|
||
return;
|
||
}
|
||
register_ubf_list(th);
|
||
rb_native_mutex_lock(&th->vm->gvl.lock);
|
||
/*
|
||
* ubf_wakeup_thread() doesn't guarantee to wake up a target thread.
|
||
* Therefore, we repeatedly call ubf_wakeup_thread() until a target thread
|
||
... | ... | |
* In the other hands, we shouldn't call rb_thread_wakeup_timer_thread()
|
||
* if running on timer thread because it may make endless wakeups.
|
||
*/
|
||
if (!pthread_equal(pthread_self(), timer_thread.id))
|
||
rb_thread_wakeup_timer_thread();
|
||
if (!timer_thread.created || !pthread_equal(pthread_self(),
|
||
timer_thread.id)) {
|
||
rb_thread_wakeup_timer_thread_low(th->vm);
|
||
}
|
||
rb_native_mutex_unlock(&th->vm->gvl.lock);
|
||
ubf_wakeup_thread(th);
|
||
}
|
||
... | ... | |
*/
|
||
#define TIME_QUANTUM_USEC (100 * 1000)
|
||
#if TIMER_IMPL == TIMER_THREAD_SLEEPY
|
||
static struct {
|
||
/*
|
||
* Read end of each pipe is closed inside timer thread for shutdown
|
||
* Write ends are closed by a normal Ruby thread during shutdown
|
||
*/
|
||
/* pipes are closed in forked children when owner_process does not match */
|
||
int normal[2];
|
||
int low[2];
|
||
/* volatile for signal handler use: */
|
||
volatile rb_pid_t owner_process;
|
||
} timer_thread_pipe = {
|
||
{-1, -1},
|
||
{-1, -1}, /* low priority */
|
||
};
|
||
NORETURN(static void async_bug_fd(const char *mesg, int errno_arg, int fd));
|
||
static void
|
||
async_bug_fd(const char *mesg, int errno_arg, int fd)
|
||
{
|
||
char buff[64];
|
||
size_t n = strlcpy(buff, mesg, sizeof(buff));
|
||
if (n < sizeof(buff)-3) {
|
||
ruby_snprintf(buff+n, sizeof(buff)-n, "(%d)", fd);
|
||
}
|
||
rb_async_bug_errno(buff, errno_arg);
|
||
}
|
||
/* only use signal-safe system calls here */
|
||
static void
|
||
rb_thread_wakeup_timer_thread_fd(int fd)
|
||
... | ... | |
}
|
||
void
|
||
rb_thread_wakeup_timer_thread(void)
|
||
rb_thread_wakeup_timer_thread(int sig)
|
||
{
|
||
/* must be safe inside sighandler, so no mutex */
|
||
if (timer_thread_pipe.owner_process == getpid()) {
|
||
rb_thread_wakeup_timer_thread_fd(timer_thread_pipe.normal[1]);
|
||
}
|
||
}
|
||
static void
|
||
rb_thread_wakeup_timer_thread_low(void)
|
||
{
|
||
if (timer_thread_pipe.owner_process == getpid()) {
|
||
rb_thread_wakeup_timer_thread_fd(timer_thread_pipe.low[1]);
|
||
}
|
||
}
|
||
/* VM-dependent API is not available for this function */
|
||
static void
|
||
consume_communication_pipe(int fd)
|
||
{
|
||
#define CCP_READ_BUFF_SIZE 1024
|
||
/* buffer can be shared because no one refers to them. */
|
||
static char buff[CCP_READ_BUFF_SIZE];
|
||
ssize_t result;
|
||
while (1) {
|
||
result = read(fd, buff, sizeof(buff));
|
||
if (result == 0) {
|
||
return;
|
||
}
|
||
else if (result < 0) {
|
||
int e = errno;
|
||
switch (e) {
|
||
case EINTR:
|
||
continue; /* retry */
|
||
case EAGAIN:
|
||
#if defined(EWOULDBLOCK) && EWOULDBLOCK != EAGAIN
|
||
case EWOULDBLOCK:
|
||
#endif
|
||
return;
|
||
default:
|
||
async_bug_fd("consume_communication_pipe: read", e, fd);
|
||
}
|
||
/*
|
||
* system_working check is required because vm and main_thread are
|
||
* freed during shutdown
|
||
*/
|
||
if (sig && system_working) {
|
||
volatile rb_execution_context_t *ec;
|
||
rb_vm_t *vm = GET_VM();
|
||
rb_thread_t *mth;
|
||
/*
|
||
* FIXME: root VM and main_thread should be static and not
|
||
* on heap for maximum safety (and startup/shutdown speed)
|
||
*/
|
||
if (!vm) return;
|
||
mth = vm->main_thread;
|
||
if (!mth || !system_working) return;
|
||
/* this relies on GC for grace period before cont_free */
|
||
ec = ACCESS_ONCE(rb_execution_context_t *, mth->ec);
|
||
if (ec) RUBY_VM_SET_TRAP_INTERRUPT(ec);
|
||
}
|
||
}
|
||
}
|
||
... | ... | |
rb_sys_fail(0);
|
||
}
|
||
/* communication pipe with timer thread and signal handler */
|
||
static int
|
||
setup_communication_pipe_internal(int pipes[2])
|
||
{
|
||
... | ... | |
return 0;
|
||
}
|
||
/* communication pipe with timer thread and signal handler */
|
||
static int
|
||
setup_communication_pipe(void)
|
||
static void
|
||
rb_thread_wakeup_timer_thread_low(rb_vm_t *vm)
|
||
{
|
||
rb_pid_t owner = timer_thread_pipe.owner_process;
|
||
if (owner && owner != getpid()) {
|
||
CLOSE_INVALIDATE(normal[0]);
|
||
CLOSE_INVALIDATE(normal[1]);
|
||
CLOSE_INVALIDATE(low[0]);
|
||
CLOSE_INVALIDATE(low[1]);
|
||
}
|
||
if (setup_communication_pipe_internal(timer_thread_pipe.normal) < 0) {
|
||
return errno;
|
||
/*
|
||
* this does not have to be async-signal safe, and is called
|
||
* while holding vm->gvl.lock (not the entire GVL)
|
||
*/
|
||
if (timer_thread.created) {
|
||
rb_native_mutex_lock(&timer_thread_lock);
|
||
rb_native_cond_signal(&timer_thread_idle);
|
||
rb_native_mutex_unlock(&timer_thread_lock);
|
||
}
|
||
if (setup_communication_pipe_internal(timer_thread_pipe.low) < 0) {
|
||
return errno;
|
||
else {
|
||
rb_thread_spawn_timer(vm);
|
||
}
|
||
return 0;
|
||
}
|
||
/**
|
||
* Let the timer thread sleep a while.
|
||
*
|
||
* The timer thread sleeps until woken up by rb_thread_wakeup_timer_thread() if only one Ruby thread is running.
|
||
* The timer thread sleeps until woken up by rb_thread_wakeup_timer_thread_low()
|
||
* if only one Ruby thread is running.
|
||
* @pre the calling context is in the timer thread.
|
||
*/
|
||
static inline void
|
||
static void
|
||
timer_thread_sleep(rb_vm_t *vm)
|
||
{
|
||
int result;
|
||
int need_polling;
|
||
struct pollfd pollfds[2];
|
||
pollfds[0].fd = timer_thread_pipe.normal[0];
|
||
pollfds[0].events = POLLIN;
|
||
pollfds[1].fd = timer_thread_pipe.low[0];
|
||
pollfds[1].events = POLLIN;
|
||
need_polling = !ubf_threads_empty();
|
||
if (SIGCHLD_LOSSY && !need_polling) {
|
||
rb_native_mutex_lock(&vm->waitpid_lock);
|
||
if (!list_empty(&vm->waiting_pids) || !list_empty(&vm->waiting_grps)) {
|
||
need_polling = 1;
|
||
}
|
||
rb_native_mutex_unlock(&vm->waitpid_lock);
|
||
}
|
||
if (vm->gvl.waiting > 0 || need_polling) {
|
||
/* polling (TIME_QUANTUM_USEC usec) */
|
||
result = poll(pollfds, 1, TIME_QUANTUM_USEC/1000);
|
||
if (vm->gvl.waiting > 0 || !ubf_threads_empty()) {
|
||
static const struct timespec ts = { 0, TIME_QUANTUM_USEC * 1000 };
|
||
native_cond_timedwait(&timer_thread_busy, &timer_thread_lock, &ts);
|
||
}
|
||
else {
|
||
/* wait (infinite) */
|
||
result = poll(pollfds, numberof(pollfds), -1);
|
||
}
|
||
if (result == 0) {
|
||
/* maybe timeout */
|
||
}
|
||
else if (result > 0) {
|
||
consume_communication_pipe(timer_thread_pipe.normal[0]);
|
||
consume_communication_pipe(timer_thread_pipe.low[0]);
|
||
}
|
||
else { /* result < 0 */
|
||
int e = errno;
|
||
switch (e) {
|
||
case EBADF:
|
||
case EINVAL:
|
||
case ENOMEM: /* from Linux man */
|
||
case EFAULT: /* from FreeBSD man */
|
||
rb_async_bug_errno("thread_timer: select", e);
|
||
default:
|
||
/* ignore */;
|
||
}
|
||
rb_native_cond_wait(&timer_thread_idle, &timer_thread_lock);
|
||
}
|
||
}
|
||
#endif /* TIMER_THREAD_SLEEPY */
|
||
#if TIMER_IMPL == TIMER_THREAD_BUSY
|
||
# define PER_NANO 1000000000
|
||
void rb_thread_wakeup_timer_thread(void) {}
|
||
static void rb_thread_wakeup_timer_thread_low(void) {}
|
||
static rb_nativethread_lock_t timer_thread_lock;
|
||
static rb_nativethread_cond_t timer_thread_cond;
|
||
static inline void
|
||
timer_thread_sleep(rb_vm_t *unused)
|
||
{
|
||
struct timespec ts;
|
||
ts.tv_sec = 0;
|
||
ts.tv_nsec = TIME_QUANTUM_USEC * 1000;
|
||
ts = native_cond_timeout(&timer_thread_cond, ts);
|
||
native_cond_timedwait(&timer_thread_cond, &timer_thread_lock, &ts);
|
||
}
|
||
#endif /* TIMER_IMPL == TIMER_THREAD_BUSY */
|
||
#if !defined(SET_CURRENT_THREAD_NAME) && defined(__linux__) && defined(PR_SET_NAME)
|
||
# define SET_CURRENT_THREAD_NAME(name) prctl(PR_SET_NAME, name)
|
||
... | ... | |
thread_timer(void *p)
|
||
{
|
||
rb_vm_t *vm = p;
|
||
#ifdef HAVE_PTHREAD_SIGMASK /* mainly to enable SIGCHLD */
|
||
{
|
||
sigset_t mask;
|
||
sigemptyset(&mask);
|
||
pthread_sigmask(SIG_SETMASK, &mask, NULL);
|
||
}
|
||
#endif
|
||
if (TT_DEBUG) WRITE_CONST(2, "start timer thread\n");
|
||
... | ... | |
SET_CURRENT_THREAD_NAME("ruby-timer-thr");
|
||
#endif
|
||
#if TIMER_IMPL == TIMER_THREAD_BUSY
|
||
rb_native_mutex_initialize(&timer_thread_lock);
|
||
rb_native_cond_initialize(&timer_thread_cond);
|
||
rb_native_mutex_lock(&timer_thread_lock);
|
||
#endif
|
||
while (system_working > 0) {
|
||
/* timer function */
|
||
... | ... | |
/* wait */
|
||
timer_thread_sleep(vm);
|
||
}
|
||
#if TIMER_IMPL == TIMER_THREAD_BUSY
|
||
rb_native_mutex_unlock(&timer_thread_lock);
|
||
rb_native_cond_destroy(&timer_thread_cond);
|
||
rb_native_mutex_destroy(&timer_thread_lock);
|
||
#endif
|
||
if (TT_DEBUG) WRITE_CONST(2, "finish timer thread\n");
|
||
return NULL;
|
||
}
|
||
#if (TIMER_IMPL & TIMER_THREAD_MASK)
|
||
static void
|
||
rb_thread_create_timer_thread(void)
|
||
rb_thread_spawn_timer(rb_vm_t *vm)
|
||
{
|
||
if (!timer_thread.created) {
|
||
if (!timer_thread.created) { /* protected by vm->gvl.lock mutex */
|
||
size_t stack_size = 0;
|
||
int err;
|
||
pthread_attr_t attr;
|
||
rb_vm_t *vm = GET_VM();
|
||
int err = pthread_attr_init(&attr);
|
||
err = pthread_attr_init(&attr);
|
||
if (err != 0) {
|
||
rb_warn("pthread_attr_init failed for timer: %s, scheduling broken",
|
||
strerror(err));
|
||
return;
|
||
fprintf(stderr,
|
||
"pthread_attr_init failed for timer: %s, scheduling broken",
|
||
strerror(err));
|
||
return;
|
||
}
|
||
# ifdef PTHREAD_STACK_MIN
|
||
{
|
||
... | ... | |
}
|
||
}
|
||
# endif
|
||
#if TIMER_IMPL == TIMER_THREAD_SLEEPY
|
||
err = setup_communication_pipe();
|
||
if (err) return;
|
||
#endif /* TIMER_THREAD_SLEEPY */
|
||
/* create timer thread */
|
||
if (timer_thread.created) {
|
||
rb_bug("rb_thread_create_timer_thread: Timer thread was already created\n");
|
||
rb_bug("rb_thread_spawn_timer: Timer thread was already created\n");
|
||
}
|
||
rb_native_mutex_initialize(&timer_thread_lock);
|
||
rb_native_cond_initialize(&timer_thread_idle);
|
||
rb_native_cond_initialize(&timer_thread_busy);
|
||
err = pthread_create(&timer_thread.id, &attr, thread_timer, vm);
|
||
pthread_attr_destroy(&attr);
|
||
if (err == EINVAL) {
|
||
/*
|
||
* Even if we are careful with our own stack use in thread_timer(),
|
||
* any third-party libraries (eg libkqueue) which rely on __thread
|
||
* storage can cause small stack sizes to fail. So lets hope the
|
||
* default stack size is enough for them:
|
||
*/
|
||
stack_size = 0;
|
||
err = pthread_create(&timer_thread.id, NULL, thread_timer, vm);
|
||
}
|
||
if (err != 0) {
|
||
rb_warn("pthread_create failed for timer: %s, scheduling broken",
|
||
strerror(err));
|
||
if (stack_size) {
|
||
rb_warn("timer thread stack size: %"PRIuSIZE, stack_size);
|
||
}
|
||
else {
|
||
rb_warn("timer thread stack size: system default");
|
||
}
|
||
VM_ASSERT(err == 0);
|
||
return;
|
||
}
|
||
#if TIMER_IMPL == TIMER_THREAD_SLEEPY
|
||
/* validate pipe on this process */
|
||
timer_thread_pipe.owner_process = getpid();
|
||
#endif /* TIMER_THREAD_SLEEPY */
|
||
timer_thread.created = 1;
|
||
if (err == EINVAL) {
|
||
/*
|
||
* Even if we are careful with our own stack use in thread_timer(),
|
||
* any third-party libraries (eg libkqueue) which rely on __thread
|
||
* storage can cause small stack sizes to fail. So lets hope the
|
||
* default stack size is enough for them:
|
||
*/
|
||
stack_size = 0;
|
||
err = pthread_create(&timer_thread.id, NULL, thread_timer, vm);
|
||
}
|
||
if (!err) {
|
||
timer_thread.created = 1;
|
||
}
|
||
else {
|
||
rb_native_mutex_destroy(&timer_thread_lock);
|
||
rb_native_cond_destroy(&timer_thread_idle);
|
||
rb_native_cond_destroy(&timer_thread_busy);
|
||
fprintf(stderr,
|
||
"pthread_create failed for timer: %s, scheduling broken",
|
||
strerror(err));
|
||
if (stack_size) {
|
||
fprintf(stderr,
|
||
"timer thread stack size: %"PRIuSIZE, stack_size);
|
||
}
|
||
else {
|
||
fprintf(stderr, "timer thread stack size: system default");
|
||
}
|
||
VM_ASSERT(err == 0);
|
||
}
|
||
}
|
||
}
|
||
#define SIGWAIT_NONE ((const rb_thread_t *)-1)
|
||
static const rb_thread_t *sigwait_th;
|
||
static void
|
||
rb_thread_create_timer_thread(void)
|
||
{
|
||
/* we only create the pipe, and lazy-spawn */
|
||
rb_pid_t current = getpid();
|
||
rb_pid_t owner = timer_thread_pipe.owner_process;
|
||
if (owner && owner != current) {
|
||
CLOSE_INVALIDATE(normal[0]);
|
||
CLOSE_INVALIDATE(normal[1]);
|
||
}
|
||
if (setup_communication_pipe_internal(timer_thread_pipe.normal) < 0) return;
|
||
if (owner != current) {
|
||
/* validate pipe on this process */
|
||
sigwait_th = SIGWAIT_NONE;
|
||
timer_thread_pipe.owner_process = current;
|
||
}
|
||
}
|
||
#endif /* TIMER_IMPL & TIMER_THREAD_MASK */
|
||
static int
|
||
native_stop_timer_thread(void)
|
||
... | ... | |
if (TT_DEBUG) fprintf(stderr, "stop timer thread\n");
|
||
if (stopped) {
|
||
#if TIMER_IMPL == TIMER_THREAD_SLEEPY
|
||
/* kick timer thread out of sleep */
|
||
rb_thread_wakeup_timer_thread_fd(timer_thread_pipe.normal[1]);
|
||
#endif
|
||
/* timer thread will stop looping when system_working <= 0: */
|
||
native_thread_join(timer_thread.id);
|
||
rb_vm_t *vm = GET_VM();
|
||
/*
|
||
* don't care if timer_thread_pipe may fill up at this point.
|
||
* If we restart timer thread, signals will be processed, if
|
||
* we don't, it's because we're in a different child
|
||
*/
|
||
if (TT_DEBUG) fprintf(stderr, "joined timer thread\n");
|
||
timer_thread.created = 0;
|
||
rb_native_mutex_lock(&vm->gvl.lock);
|
||
if (timer_thread.created) {
|
||
/* kick timer thread out of sleep ASAP */
|
||
rb_native_mutex_lock(&timer_thread_lock);
|
||
rb_native_cond_signal(&timer_thread_idle);
|
||
rb_native_cond_signal(&timer_thread_busy);
|
||
rb_native_mutex_unlock(&timer_thread_lock);
|
||
/* timer thread will stop looping when system_working <= 0: */
|
||
native_thread_join(timer_thread.id);
|
||
rb_native_cond_destroy(&timer_thread_idle);
|
||
rb_native_cond_destroy(&timer_thread_busy);
|
||
rb_native_mutex_destroy(&timer_thread_lock);
|
||
if (TT_DEBUG) fprintf(stderr, "joined timer thread\n");
|
||
timer_thread.created = 0;
|
||
}
|
||
rb_native_mutex_unlock(&vm->gvl.lock);
|
||
}
|
||
return stopped;
|
||
}
|
||
... | ... | |
int
|
||
rb_reserved_fd_p(int fd)
|
||
{
|
||
#if TIMER_IMPL == TIMER_THREAD_SLEEPY
|
||
if ((fd == timer_thread_pipe.normal[0] ||
|
||
fd == timer_thread_pipe.normal[1] ||
|
||
fd == timer_thread_pipe.low[0] ||
|
||
fd == timer_thread_pipe.low[1]) &&
|
||
fd == timer_thread_pipe.normal[1]) &&
|
||
timer_thread_pipe.owner_process == getpid()) { /* async-signal-safe */
|
||
return 1;
|
||
}
|
||
else {
|
||
return 0;
|
||
}
|
||
#else
|
||
return 0;
|
||
#endif
|
||
}
|
||
rb_nativethread_id_t
|
||
... | ... | |
}
|
||
#endif /* USE_NATIVE_SLEEP_COND */
|
||
int
|
||
rb_sigwait_fd_get(const rb_thread_t *th)
|
||
{
|
||
if (timer_thread_pipe.owner_process == getpid() &&
|
||
timer_thread_pipe.normal[0] >= 0) {
|
||
if (ATOMIC_PTR_CAS(sigwait_th, SIGWAIT_NONE, th) == SIGWAIT_NONE) {
|
||
return timer_thread_pipe.normal[0];
|
||
}
|
||
}
|
||
return -1; /* avoid thundering herd */
|
||
}
|
||
void
|
||
rb_sigwait_fd_put(const rb_thread_t *th, int fd)
|
||
{
|
||
const rb_thread_t *old;
|
||
VM_ASSERT(timer_thread_pipe.normal[0] == fd);
|
||
old = ATOMIC_PTR_EXCHANGE(sigwait_th, SIGWAIT_NONE);
|
||
if (old != th) assert(old == th);
|
||
}
|
||
static void
|
||
ubf_sigwait(void *ignore)
|
||
{
|
||
rb_thread_wakeup_timer_thread(0);
|
||
}
|
||
void
|
||
rb_sigwait_sleep(rb_thread_t *th, int sigwait_fd, const struct timespec *ts)
|
||
{
|
||
struct pollfd pfd;
|
||
pfd.fd = sigwait_fd;
|
||
pfd.events = POLLIN;
|
||
(void)ppoll(&pfd, 1, ts, 0);
|
||
check_signals_nogvl(th, sigwait_fd);
|
||
}
|
||
static void
|
||
native_sleep(rb_thread_t *th, struct timespec *timeout_rel)
|
||
{
|
||
int sigwait_fd = rb_sigwait_fd_get(th);
|
||
if (sigwait_fd >= 0) {
|
||
GVL_UNLOCK_BEGIN(th);
|
||
rb_native_mutex_lock(&th->interrupt_lock);
|
||
th->unblock.func = ubf_sigwait;
|
||
rb_native_mutex_unlock(&th->interrupt_lock);
|
||
if (!RUBY_VM_INTERRUPTED(th->ec)) {
|
||
rb_sigwait_sleep(th, sigwait_fd, timeout_rel);
|
||
}
|
||
unblock_function_clear(th);
|
||
rb_sigwait_fd_put(th, sigwait_fd);
|
||
rb_sigwait_fd_migrate(th->vm);
|
||
GVL_UNLOCK_END(th);
|
||
}
|
||
else {
|
||
native_cond_sleep(th, timeout_rel);
|
||
}
|
||
}
|
||
#endif /* THREAD_SYSTEM_DEPENDENT_IMPLEMENTATION */
|
thread_win32.c | ||
---|---|---|
#define native_thread_yield() Sleep(0)
|
||
#define unregister_ubf_list(th)
|
||
#define ubf_wakeup_all_threads() do {} while (0)
|
||
static volatile DWORD ruby_native_thread_key = TLS_OUT_OF_INDEXES;
|
||
... | ... | |
}
|
||
void
|
||
rb_thread_wakeup_timer_thread(void)
|
||
rb_thread_wakeup_timer_thread(int sig)
|
||
{
|
||
/* do nothing */
|
||
}
|
||
... | ... | |
return 0;
|
||
}
|
||
int
|
||
rb_sigwait_fd_get(rb_thread_t *th)
|
||
{
|
||
return -1; /* TODO */
|
||
}
|
||
void
|
||
rb_sigwait_fd_put(rb_thread_t *th, int fd)
|
||
{
|
||
rb_bug("not implemented, should not be called");
|
||
}
|
||
void
|
||
rb_sigwait_sleep(const rb_thread_t *th, int fd, const struct timespec *ts)
|
||
{
|
||
rb_bug("not implemented, should not be called");
|
||
}
|
||
rb_nativethread_id_t
|
||
rb_nativethread_self(void)
|
||
{
|
vm_core.h | ||
---|---|---|
void rb_thread_start_timer_thread(void);
|
||
void rb_thread_stop_timer_thread(void);
|
||
void rb_thread_reset_timer_thread(void);
|
||
void rb_thread_wakeup_timer_thread(void);
|
||
void rb_thread_wakeup_timer_thread(int);
|
||
static inline void
|
||
rb_vm_living_threads_init(rb_vm_t *vm)
|
||
-
|