Project

General

Profile

Feature #14723 ยป sleepy-gc-wip-v1.diff

normalperson (Eric Wong), 04/29/2018 03:57 AM

View differences:

gc.c
}
}
int
rb_gc_inprogress(const rb_execution_context_t *ec)
{
rb_objspace_t *objspace = rb_ec_vm_ptr(ec)->objspace;
/* TODO: should this also check is_incremental_marking() ? */
return is_lazy_sweeping(&objspace->eden_heap) ||
is_incremental_marking(objspace);
}
/* returns true if there is more work to do, false if not */
int
rb_gc_step(const rb_execution_context_t *ec)
{
rb_objspace_t *objspace = rb_ec_vm_ptr(ec)->objspace;
gc_rest(objspace);
return rb_gc_inprogress(ec);
}
struct objspace_and_reason {
rb_objspace_t *objspace;
int reason;
internal.h
void ruby_gc_set_params(int safe_level);
void rb_copy_wb_protected_attribute(VALUE dest, VALUE obj);
struct rb_execution_context_struct;
int rb_gc_inprogress(const struct rb_execution_context_struct *);
int rb_gc_step(const struct rb_execution_context_struct *);
#if defined(HAVE_MALLOC_USABLE_SIZE) || defined(HAVE_MALLOC_SIZE) || defined(_WIN32)
#define ruby_sized_xrealloc(ptr, new_size, old_size) ruby_xrealloc(ptr, new_size)
#define ruby_sized_xrealloc2(ptr, new_count, element_size, old_count) ruby_xrealloc(ptr, new_count, element_size)
thread.c
return 0;
}
static void
timeout_prepare(struct timespec **tsp,
struct timespec *ts, struct timespec *end,
const struct timeval *timeout)
{
if (timeout) {
getclockofday(end);
timespec_add(end, timespec_for(ts, timeout));
*tsp = ts;
}
else {
*tsp = 0;
}
}
#if THREAD_DEBUG
#ifdef HAVE_VA_ARGS_MACRO
void rb_thread_debug(const char *file, int line, const char *fmt, ...);
......
rb_fdset_t MAYBE_UNUSED(orig_read);
rb_fdset_t MAYBE_UNUSED(orig_write);
rb_fdset_t MAYBE_UNUSED(orig_except);
struct timespec end;
struct timespec *tsp = 0;
struct timespec ts
#if defined(__GNUC__) && (__GNUC__ == 7 || __GNUC__ == 8)
= {0, 0}
#endif
;
struct timespec ts, end, *tsp;
rb_thread_t *th = GET_THREAD();
timeout_prepare(&tsp, &ts, &end, timeout);
#define do_select_update() \
(restore_fdset(readfds, &orig_read), \
restore_fdset(writefds, &orig_write), \
restore_fdset(exceptfds, &orig_except), \
update_timespec(tsp, &end))
if (timeout) {
getclockofday(&end);
timespec_add(&end, timespec_for(&ts, timeout));
tsp = &ts;
}
#define fd_init_copy(f) \
(f##fds) ? rb_fd_init_copy(&orig_##f, f##fds) : rb_fd_no_init(&orig_##f)
fd_init_copy(read);
......
int
rb_wait_for_single_fd(int fd, int events, struct timeval *timeout)
{
static const struct timespec zero;
struct pollfd fds;
int result = 0, lerrno;
struct timespec ts;
struct timespec end;
struct timespec *tsp = 0;
struct timespec ts, end, *tsp;
rb_thread_t *th = GET_THREAD();
int do_gc = rb_gc_inprogress(th->ec);
if (timeout) {
getclockofday(&end);
timespec_add(&end, timespec_for(&ts, timeout));
tsp = &ts;
}
timeout_prepare(&tsp, &ts, &end, timeout);
fds.fd = fd;
fds.events = (short)events;
do {
fds.revents = 0;
lerrno = 0;
BLOCKING_REGION({
result = ppoll(&fds, 1, tsp, NULL);
if (result < 0) lerrno = errno;
}, ubf_select, th, FALSE);
RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
} while (result < 0 && retryable(errno = lerrno) &&
if (!do_gc || gvl_contended_p(th->vm)) {
BLOCKING_REGION({
result = ppoll(&fds, 1, tsp, NULL);
if (result < 0) lerrno = errno;
}, ubf_select, th, FALSE);
RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
}
else { /* no need to release GVL if nobody is waiting for it */
do_gc = rb_gc_step(th->ec);
result = ppoll(&fds, 1, &zero, NULL);
if (result < 0) lerrno = errno;
}
} while ((result == 0 || (result < 0 && retryable(errno = lerrno))) &&
update_timespec(tsp, &end));
if (result < 0) return -1;
thread_pthread.c
rb_native_mutex_unlock(&vm->gvl.lock);
}
static int
gvl_contended_p(const rb_vm_t *vm)
{
return vm->gvl.waiting > 0;
}
static void
gvl_init(rb_vm_t *vm)
{
thread_win32.c
gvl_acquire(vm, th);
}
static void
gvl_contended_p(const rb_vm_t *vm)
{
return 1; /* TODO for win32 maintainer */
}
static void
gvl_init(rb_vm_t *vm)
{
    (1-1/1)