Project

General

Profile

sleepy-gc-wip-v1.diff

normalperson (Eric Wong), 04/29/2018 03:57 AM

View differences:

gc.c
6518 6518
    }
6519 6519
}
6520 6520

  
6521
int
6522
rb_gc_inprogress(const rb_execution_context_t *ec)
6523
{
6524
    rb_objspace_t *objspace = rb_ec_vm_ptr(ec)->objspace;
6525

  
6526
    /* TODO: should this also check is_incremental_marking() ? */
6527
    return is_lazy_sweeping(&objspace->eden_heap) ||
6528
               is_incremental_marking(objspace);
6529
}
6530

  
6531
/* returns true if there is more work to do, false if not */
6532
int
6533
rb_gc_step(const rb_execution_context_t *ec)
6534
{
6535
    rb_objspace_t *objspace = rb_ec_vm_ptr(ec)->objspace;
6536

  
6537
    gc_rest(objspace);
6538

  
6539
    return rb_gc_inprogress(ec);
6540
}
6541

  
6521 6542
struct objspace_and_reason {
6522 6543
    rb_objspace_t *objspace;
6523 6544
    int reason;
internal.h
1290 1290
void ruby_gc_set_params(int safe_level);
1291 1291
void rb_copy_wb_protected_attribute(VALUE dest, VALUE obj);
1292 1292

  
1293
struct rb_execution_context_struct;
1294
int rb_gc_inprogress(const struct rb_execution_context_struct *);
1295
int rb_gc_step(const struct rb_execution_context_struct *);
1296

  
1293 1297
#if defined(HAVE_MALLOC_USABLE_SIZE) || defined(HAVE_MALLOC_SIZE) || defined(_WIN32)
1294 1298
#define ruby_sized_xrealloc(ptr, new_size, old_size) ruby_xrealloc(ptr, new_size)
1295 1299
#define ruby_sized_xrealloc2(ptr, new_count, element_size, old_count) ruby_xrealloc(ptr, new_count, element_size)
thread.c
232 232
    return 0;
233 233
}
234 234

  
235
static void
236
timeout_prepare(struct timespec **tsp,
237
            struct timespec *ts, struct timespec *end,
238
            const struct timeval *timeout)
239
{
240
    if (timeout) {
241
        getclockofday(end);
242
        timespec_add(end, timespec_for(ts, timeout));
243
        *tsp = ts;
244
    }
245
    else {
246
	*tsp = 0;
247
    }
248
}
249

  
235 250
#if THREAD_DEBUG
236 251
#ifdef HAVE_VA_ARGS_MACRO
237 252
void rb_thread_debug(const char *file, int line, const char *fmt, ...);
......
3808 3823
    rb_fdset_t MAYBE_UNUSED(orig_read);
3809 3824
    rb_fdset_t MAYBE_UNUSED(orig_write);
3810 3825
    rb_fdset_t MAYBE_UNUSED(orig_except);
3811
    struct timespec end;
3812
    struct timespec *tsp = 0;
3813
    struct timespec ts
3814
#if defined(__GNUC__) && (__GNUC__ == 7 || __GNUC__ == 8)
3815
        = {0, 0}
3816
#endif
3817
        ;
3826
    struct timespec ts, end, *tsp;
3818 3827
    rb_thread_t *th = GET_THREAD();
3819 3828

  
3829
    timeout_prepare(&tsp, &ts, &end, timeout);
3820 3830
#define do_select_update() \
3821 3831
    (restore_fdset(readfds, &orig_read), \
3822 3832
     restore_fdset(writefds, &orig_write), \
3823 3833
     restore_fdset(exceptfds, &orig_except), \
3824 3834
     update_timespec(tsp, &end))
3825 3835

  
3826
    if (timeout) {
3827
        getclockofday(&end);
3828
        timespec_add(&end, timespec_for(&ts, timeout));
3829
        tsp = &ts;
3830
    }
3831

  
3832 3836
#define fd_init_copy(f) \
3833 3837
    (f##fds) ? rb_fd_init_copy(&orig_##f, f##fds) : rb_fd_no_init(&orig_##f)
3834 3838
    fd_init_copy(read);
......
3957 3961
int
3958 3962
rb_wait_for_single_fd(int fd, int events, struct timeval *timeout)
3959 3963
{
3964
    static const struct timespec zero;
3960 3965
    struct pollfd fds;
3961 3966
    int result = 0, lerrno;
3962
    struct timespec ts;
3963
    struct timespec end;
3964
    struct timespec *tsp = 0;
3967
    struct timespec ts, end, *tsp;
3965 3968
    rb_thread_t *th = GET_THREAD();
3969
    int do_gc = rb_gc_inprogress(th->ec);
3966 3970

  
3967
    if (timeout) {
3968
        getclockofday(&end);
3969
        timespec_add(&end, timespec_for(&ts, timeout));
3970
        tsp = &ts;
3971
    }
3972

  
3971
    timeout_prepare(&tsp, &ts, &end, timeout);
3973 3972
    fds.fd = fd;
3974 3973
    fds.events = (short)events;
3975 3974

  
3976 3975
    do {
3977 3976
        fds.revents = 0;
3978 3977
        lerrno = 0;
3979
        BLOCKING_REGION({
3980
            result = ppoll(&fds, 1, tsp, NULL);
3981
            if (result < 0) lerrno = errno;
3982
        }, ubf_select, th, FALSE);
3983 3978

  
3984
        RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
3985
    } while (result < 0 && retryable(errno = lerrno) &&
3979
        if (!do_gc || gvl_contended_p(th->vm)) {
3980
            BLOCKING_REGION({
3981
                result = ppoll(&fds, 1, tsp, NULL);
3982
                if (result < 0) lerrno = errno;
3983
            }, ubf_select, th, FALSE);
3984
            RUBY_VM_CHECK_INTS_BLOCKING(th->ec);
3985
        }
3986
        else { /* no need to release GVL if nobody is waiting for it */
3987
            do_gc = rb_gc_step(th->ec);
3988
            result = ppoll(&fds, 1, &zero, NULL);
3989
            if (result < 0) lerrno = errno;
3990
        }
3991
    } while ((result == 0 || (result < 0 && retryable(errno = lerrno))) &&
3986 3992
            update_timespec(tsp, &end));
3987 3993
    if (result < 0) return -1;
3988 3994

  
thread_pthread.c
156 156
    rb_native_mutex_unlock(&vm->gvl.lock);
157 157
}
158 158

  
159
static int
160
gvl_contended_p(const rb_vm_t *vm)
161
{
162
    return vm->gvl.waiting > 0;
163
}
164

  
159 165
static void
160 166
gvl_init(rb_vm_t *vm)
161 167
{
thread_win32.c
113 113
  gvl_acquire(vm, th);
114 114
}
115 115

  
116
static void
117
gvl_contended_p(const rb_vm_t *vm)
118
{
119
    return 1; /* TODO for win32 maintainer */
120
}
121

  
116 122
static void
117 123
gvl_init(rb_vm_t *vm)
118 124
{