Project

General

Profile

Feature #14767 ยป 0001-gc.c-use-monotonic-counters-for-objspace_malloc_incr.patch

normalperson (Eric Wong), 05/17/2018 12:14 AM

View differences:

gc.c
gc_mode_sweeping
};
struct monoctr {
size_t add;
size_t sub;
};
typedef struct rb_objspace {
struct {
size_t limit;
size_t increase;
struct monoctr m;
#if MALLOC_ALLOCATED_SIZE
size_t allocated_size;
size_t allocations;
......
size_t old_objects_limit;
#if RGENGC_ESTIMATE_OLDMALLOC
size_t oldmalloc_increase;
struct monoctr oldmalloc;
size_t oldmalloc_increase_limit;
#endif
......
VALUE *ruby_initial_gc_stress_ptr = &ruby_initial_gc_stress;
#define malloc_limit objspace->malloc_params.limit
#define malloc_increase objspace->malloc_params.increase
#define malloc_allocated_size objspace->malloc_params.allocated_size
#define heap_pages_sorted objspace->heap_pages.sorted
#define heap_allocated_pages objspace->heap_pages.allocated_pages
......
static void
gc_marks_check(rb_objspace_t *objspace, int (*checker_func)(ANYARGS), const char *checker_name)
{
size_t saved_malloc_increase = objspace->malloc_params.increase;
struct monoctr saved_malloc = objspace->malloc_params.m;
#if RGENGC_ESTIMATE_OLDMALLOC
size_t saved_oldmalloc_increase = objspace->rgengc.oldmalloc_increase;
struct monoctr saved_oldmalloc = objspace->rgengc.oldmalloc;
#endif
VALUE already_disabled = rb_gc_disable();
......
objspace->rgengc.allrefs_table = 0;
if (already_disabled == Qfalse) rb_gc_enable();
objspace->malloc_params.increase = saved_malloc_increase;
objspace->malloc_params.m = saved_malloc;
#if RGENGC_ESTIMATE_OLDMALLOC
objspace->rgengc.oldmalloc_increase = saved_oldmalloc_increase;
objspace->rgengc.oldmalloc = saved_oldmalloc;
#endif
}
#endif /* RGENGC_CHECK_MODE >= 4 */
......
}
}
static size_t
monoctr_read(const struct monoctr *mc)
{
size_t add = mc->add;
size_t sub = mc->sub;
size_t diff = add - sub;
return (diff <= add) ? diff : 0;
}
static size_t
monoctr_xchg0(struct monoctr *mc)
{
size_t add = ATOMIC_SIZE_EXCHANGE(mc->add, 0);
size_t sub = ATOMIC_SIZE_EXCHANGE(mc->sub, 0);
size_t diff = add - sub;
return (diff <= add) ? diff : 0;
}
static size_t
malloc_increase(const rb_objspace_t *objspace)
{
return monoctr_read(&objspace->malloc_params.m);
}
#if RGENGC_ESTIMATE_OLDMALLOC
static size_t
oldmalloc_increase(const rb_objspace_t *objspace)
{
return monoctr_read(&objspace->rgengc.oldmalloc);
}
#endif
static void
gc_reset_malloc_info(rb_objspace_t *objspace)
{
gc_prof_set_malloc_info(objspace);
{
size_t inc = ATOMIC_SIZE_EXCHANGE(malloc_increase, 0);
ssize_t inc = monoctr_xchg0(&objspace->malloc_params.m);
size_t old_limit = malloc_limit;
if (inc > malloc_limit) {
if (inc > (ssize_t)malloc_limit) {
malloc_limit = (size_t)(inc * gc_params.malloc_limit_growth_factor);
if (gc_params.malloc_limit_max > 0 && /* ignore max-check if 0 */
malloc_limit > gc_params.malloc_limit_max) {
......
/* reset oldmalloc info */
#if RGENGC_ESTIMATE_OLDMALLOC
if (!is_full_marking(objspace)) {
if (objspace->rgengc.oldmalloc_increase > objspace->rgengc.oldmalloc_increase_limit) {
if (oldmalloc_increase(objspace) > objspace->rgengc.oldmalloc_increase_limit) {
objspace->rgengc.need_major_gc |= GPR_FLAG_MAJOR_BY_OLDMALLOC;
objspace->rgengc.oldmalloc_increase_limit =
(size_t)(objspace->rgengc.oldmalloc_increase_limit * gc_params.oldmalloc_limit_growth_factor);
......
if (0) fprintf(stderr, "%d\t%d\t%u\t%u\t%d\n",
(int)rb_gc_count(),
(int)objspace->rgengc.need_major_gc,
(unsigned int)objspace->rgengc.oldmalloc_increase,
(unsigned int)oldmalloc_increase(objspace),
(unsigned int)objspace->rgengc.oldmalloc_increase_limit,
(unsigned int)gc_params.oldmalloc_limit_max);
}
else {
/* major GC */
objspace->rgengc.oldmalloc_increase = 0;
MEMZERO(&objspace->rgengc.oldmalloc, struct monoctr, 1);
if ((objspace->profile.latest_gc_info & GPR_FLAG_MAJOR_BY_OLDMALLOC) == 0) {
objspace->rgengc.oldmalloc_increase_limit =
......
SET(total_freed_pages, objspace->profile.total_freed_pages);
SET(total_allocated_objects, objspace->total_allocated_objects);
SET(total_freed_objects, objspace->profile.total_freed_objects);
SET(malloc_increase_bytes, malloc_increase);
SET(malloc_increase_bytes, malloc_increase(objspace));
SET(malloc_increase_bytes_limit, malloc_limit);
#if USE_RGENGC
SET(minor_gc_count, objspace->profile.minor_gc_count);
......
SET(old_objects, objspace->rgengc.old_objects);
SET(old_objects_limit, objspace->rgengc.old_objects_limit);
#if RGENGC_ESTIMATE_OLDMALLOC
SET(oldmalloc_increase_bytes, objspace->rgengc.oldmalloc_increase);
SET(oldmalloc_increase_bytes, oldmalloc_increase(objspace));
SET(oldmalloc_increase_bytes_limit, objspace->rgengc.oldmalloc_increase_limit);
#endif
......
MEMOP_TYPE_REALLOC = 3
};
#if MALLOC_ALLOCATED_SIZE
static inline void
atomic_sub_nounderflow(size_t *var, size_t sub)
{
......
if (ATOMIC_SIZE_CAS(*var, val, val-sub) == val) break;
}
}
#endif
static void
objspace_malloc_gc_stress(rb_objspace_t *objspace)
......
static void
objspace_malloc_increase(rb_objspace_t *objspace, void *mem, size_t new_size, size_t old_size, enum memop_type type)
{
if (new_size > old_size) {
ATOMIC_SIZE_ADD(malloc_increase, new_size - old_size);
#if RGENGC_ESTIMATE_OLDMALLOC
ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc_increase, new_size - old_size);
#endif
/* n.b. these checks for non-zero get inlined */
if (new_size) {
ATOMIC_SIZE_ADD(objspace->malloc_params.m.add, new_size);
ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc.add, new_size);
}
else {
atomic_sub_nounderflow(&malloc_increase, old_size - new_size);
#if RGENGC_ESTIMATE_OLDMALLOC
atomic_sub_nounderflow(&objspace->rgengc.oldmalloc_increase, old_size - new_size);
#endif
if (old_size) {
ATOMIC_SIZE_ADD(objspace->malloc_params.m.sub, old_size);
ATOMIC_SIZE_ADD(objspace->rgengc.oldmalloc.sub, old_size);
}
if (type == MEMOP_TYPE_MALLOC) {
retry:
if (malloc_increase > malloc_limit && ruby_native_thread_p() && !dont_gc) {
if (malloc_increase(objspace) > malloc_limit && ruby_native_thread_p() && !dont_gc) {
if (ruby_thread_has_gvl_p() && is_lazy_sweeping(heap_eden)) {
gc_rest(objspace); /* gc_rest can reduce malloc_increase */
goto retry;
......
#if GC_PROFILE_MORE_DETAIL
if (gc_prof_enabled(objspace)) {
gc_profile_record *record = gc_prof_record(objspace);
record->allocate_increase = malloc_increase;
record->allocate_increase = malloc_increase(objspace);
record->allocate_limit = malloc_limit;
}
#endif
-
    (1-1/1)