Feature #14370 » direct_marking.diff
compile.c | ||
---|---|---|
#define APPEND_ELEM(anchor, before, elem) APPEND_ELEM(iseq, (anchor), (before), (elem))
|
||
#endif
|
||
static int
|
||
iseq_add_mark_object(const rb_iseq_t *iseq, VALUE v)
|
||
{
|
||
if (!SPECIAL_CONST_P(v)) {
|
||
rb_iseq_add_mark_object(iseq, v);
|
||
}
|
||
return COMPILE_OK;
|
||
}
|
||
static int
|
||
iseq_add_mark_object_compile_time(const rb_iseq_t *iseq, VALUE v)
|
||
{
|
||
... | ... | |
encoded[i] = (VALUE)table[insn];
|
||
i += len;
|
||
}
|
||
FL_SET(iseq, ISEQ_TRANSLATED);
|
||
#endif
|
||
return COMPILE_OK;
|
||
}
|
||
... | ... | |
rb_iseq_path(iseq), rb_iseq_realpath(iseq),
|
||
INT2FIX(line_no), parent, type, ISEQ_COMPILE_DATA(iseq)->option);
|
||
debugs("[new_child_iseq]< ---------------------------------------\n");
|
||
iseq_add_mark_object(iseq, (VALUE)ret_iseq);
|
||
iseq_add_mark_object_compile_time(iseq, (VALUE)ret_iseq);
|
||
return ret_iseq;
|
||
}
|
||
... | ... | |
rb_iseq_path(iseq), rb_iseq_realpath(iseq),
|
||
INT2FIX(line_no), parent, type, ISEQ_COMPILE_DATA(iseq)->option);
|
||
debugs("[new_child_iseq_ifunc]< ---------------------------------------\n");
|
||
iseq_add_mark_object(iseq, (VALUE)ret_iseq);
|
||
iseq_add_mark_object_compile_time(iseq, (VALUE)ret_iseq);
|
||
return ret_iseq;
|
||
}
|
||
... | ... | |
switch (nd_type(val_node)) {
|
||
case NODE_LIT:
|
||
dv = val_node->nd_lit;
|
||
iseq_add_mark_object(iseq, dv);
|
||
break;
|
||
case NODE_NIL:
|
||
dv = Qnil;
|
||
... | ... | |
rb_hash_rehash(map);
|
||
freeze_hide_obj(map);
|
||
generated_iseq[code_index + 1 + j] = map;
|
||
FL_SET(iseq, ISEQ_MARKABLE_ISEQ);
|
||
break;
|
||
}
|
||
case TS_LINDEX:
|
||
... | ... | |
{
|
||
VALUE v = operands[j];
|
||
generated_iseq[code_index + 1 + j] = v;
|
||
if (!SPECIAL_CONST_P(v)) {
|
||
FL_SET(iseq, ISEQ_MARKABLE_ISEQ);
|
||
}
|
||
break;
|
||
}
|
||
case TS_VALUE: /* VALUE */
|
||
... | ... | |
VALUE v = operands[j];
|
||
generated_iseq[code_index + 1 + j] = v;
|
||
/* to mark ruby object */
|
||
iseq_add_mark_object(iseq, v);
|
||
if (!SPECIAL_CONST_P(v)) {
|
||
FL_SET(iseq, ISEQ_MARKABLE_ISEQ);
|
||
}
|
||
break;
|
||
}
|
||
case TS_IC: /* inline cache */
|
||
... | ... | |
generated_iseq[code_index + 1 + j] = (VALUE)ic;
|
||
break;
|
||
}
|
||
case TS_ISE: /* inline storage entry */
|
||
{
|
||
unsigned int ic_index = FIX2UINT(operands[j]);
|
||
IC ic = (IC)&iseq->body->is_entries[ic_index];
|
||
if (UNLIKELY(ic_index >= iseq->body->is_size)) {
|
||
rb_bug("iseq_set_sequence: ic_index overflow: index: %d, size: %d", ic_index, iseq->body->is_size);
|
||
}
|
||
generated_iseq[code_index + 1 + j] = (VALUE)ic;
|
||
FL_SET(iseq, ISEQ_MARKABLE_ISEQ);
|
||
break;
|
||
}
|
||
case TS_CALLINFO: /* call info */
|
||
{
|
||
struct rb_call_info *base_ci = (struct rb_call_info *)operands[j];
|
||
... | ... | |
entry->end = label_get_position((LABEL *)(ptr[2] & ~1));
|
||
entry->iseq = (rb_iseq_t *)ptr[3];
|
||
/* register iseq as mark object */
|
||
if (entry->iseq != 0) {
|
||
iseq_add_mark_object(iseq, (VALUE)entry->iseq);
|
||
}
|
||
/* stack depth */
|
||
if (ptr[4]) {
|
||
LABEL *lobj = (LABEL *)(ptr[4] & ~1);
|
||
... | ... | |
}
|
||
if (only_special_literals) {
|
||
iseq_add_mark_object(iseq, literals);
|
||
iseq_add_mark_object_compile_time(iseq, literals);
|
||
ADD_INSN(ret, nd_line(orig_node), dup);
|
||
ADD_INSN2(ret, nd_line(orig_node), opt_case_dispatch, literals, elselabel);
|
||
... | ... | |
break;
|
||
}
|
||
case TS_IC: /* inline cache */
|
||
case TS_ISE: /* inline storage entry */
|
||
rb_str_catf(str, "<ic:%d>", FIX2INT(OPERAND_AT(iobj, j)));
|
||
break;
|
||
case TS_CALLINFO: /* call info */
|
||
... | ... | |
}
|
||
loaded_iseq = rb_iseqw_to_iseq(iseqw);
|
||
iseq_add_mark_object(iseq, (VALUE)loaded_iseq);
|
||
return loaded_iseq;
|
||
}
|
||
... | ... | |
break;
|
||
case TS_VALUE:
|
||
argv[j] = op;
|
||
iseq_add_mark_object(iseq, op);
|
||
break;
|
||
case TS_ISEQ:
|
||
{
|
||
... | ... | |
argv[j] = (VALUE)rb_global_entry(SYM2ID(op));
|
||
break;
|
||
case TS_IC:
|
||
case TS_ISE:
|
||
argv[j] = op;
|
||
if (NUM2UINT(op) >= iseq->body->is_size) {
|
||
iseq->body->is_size = NUM2INT(op) + 1;
|
||
... | ... | |
}
|
||
RB_GC_GUARD(op);
|
||
argv[j] = map;
|
||
rb_iseq_add_mark_object(iseq, map);
|
||
}
|
||
break;
|
||
case TS_FUNCPTR:
|
||
... | ... | |
code[code_index] = (VALUE)ibf_dump_iseq(dump, (const rb_iseq_t *)op);
|
||
break;
|
||
case TS_IC:
|
||
case TS_ISE:
|
||
{
|
||
unsigned int i;
|
||
for (i=0; i<iseq->body->is_size; i++) {
|
||
... | ... | |
code[code_index] = (VALUE)ibf_load_iseq(load, (const rb_iseq_t *)op);
|
||
break;
|
||
case TS_IC:
|
||
case TS_ISE:
|
||
code[code_index] = (VALUE)&is_entries[(int)op];
|
||
break;
|
||
case TS_CALLINFO:
|
||
... | ... | |
dump_body.is_entries = NULL;
|
||
dump_body.ci_entries = ibf_dump_ci_entries(dump, iseq);
|
||
dump_body.cc_entries = NULL;
|
||
dump_body.mark_ary = ISEQ_FLIP_CNT(iseq);
|
||
dump_body.variable.coverage = Qnil;
|
||
dump_body.variable.original_iseq = Qnil;
|
||
return ibf_dump_write(dump, &dump_body, sizeof(dump_body));
|
||
}
|
||
... | ... | |
load_body->ci_kw_size = body->ci_kw_size;
|
||
load_body->insns_info.size = body->insns_info.size;
|
||
RB_OBJ_WRITE(iseq, &load_body->mark_ary, iseq_mark_ary_create((int)body->mark_ary));
|
||
iseq_mark_ary_create(iseq, (int)body->variable.flip_count);
|
||
{
|
||
VALUE realpath = Qnil, path = ibf_load_object(load, body->location.pathobj);
|
||
... | ... | |
rb_ary_store(load->obj_list, (long)object_index, obj);
|
||
}
|
||
iseq_add_mark_object(load->iseq, obj);
|
||
return obj;
|
||
}
|
||
}
|
||
... | ... | |
ibf_load_iseq_complete(iseq);
|
||
#endif /* !USE_LAZY_LOAD */
|
||
if (load->iseq) {
|
||
iseq_add_mark_object(load->iseq, (VALUE)iseq);
|
||
}
|
||
return iseq;
|
||
}
|
||
}
|
insns.def | ||
---|---|---|
/* run iseq only once */
|
||
DEFINE_INSN
|
||
once
|
||
(ISEQ iseq, IC ic)
|
||
(ISEQ iseq, ISE ise)
|
||
()
|
||
(VALUE val)
|
||
{
|
||
val = vm_once_dispatch(ec, iseq, ic);
|
||
val = vm_once_dispatch(ec, iseq, ise);
|
||
}
|
||
/* case dispatcher, jump by table if possible */
|
iseq.c | ||
---|---|---|
RUBY_FREE_LEAVE("iseq");
|
||
}
|
||
#if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
|
||
static int
|
||
rb_vm_insn_addr2insn2(const void *addr)
|
||
{
|
||
int insn;
|
||
const void * const *table = rb_vm_get_insns_address_table();
|
||
for (insn = 0; insn < VM_INSTRUCTION_SIZE; insn++) {
|
||
if (table[insn] == addr) {
|
||
return insn;
|
||
}
|
||
}
|
||
rb_bug("rb_vm_insn_addr2insn: invalid insn address: %p", addr);
|
||
}
|
||
#endif
|
||
static int
|
||
rb_vm_insn_null_translator(const void *addr)
|
||
{
|
||
return (int)addr;
|
||
}
|
||
typedef void iseq_value_itr_t(void *ctx, VALUE obj);
|
||
typedef int rb_vm_insns_translator_t(const void *addr);
|
||
static int
|
||
iseq_extract_values(const VALUE *code, size_t pos, iseq_value_itr_t * func, void *data, rb_vm_insns_translator_t * translator)
|
||
{
|
||
VALUE insn = translator((void *)code[pos]);
|
||
int len = insn_len(insn);
|
||
int op_no;
|
||
const char *types = insn_op_types(insn);
|
||
for (op_no = 0; types[op_no]; op_no++) {
|
||
char type = types[op_no];
|
||
switch (type) {
|
||
case TS_CDHASH:
|
||
case TS_ISEQ:
|
||
case TS_VALUE:
|
||
{
|
||
VALUE op = code[pos + op_no + 1];
|
||
if (!SPECIAL_CONST_P(op)) {
|
||
func(data, op);
|
||
}
|
||
break;
|
||
}
|
||
case TS_ISE:
|
||
{
|
||
union iseq_inline_storage_entry *const is = (union iseq_inline_storage_entry *)code[pos + op_no + 1];
|
||
if (is->once.value) {
|
||
func(data, is->once.value);
|
||
}
|
||
break;
|
||
}
|
||
default:
|
||
break;
|
||
}
|
||
}
|
||
return len;
|
||
}
|
||
static void
|
||
rb_iseq_each_value(const rb_iseq_t *iseq, iseq_value_itr_t * func, void *data)
|
||
{
|
||
unsigned int size;
|
||
const VALUE *code;
|
||
size_t n;
|
||
rb_vm_insns_translator_t * translator;
|
||
size = iseq->body->iseq_size;
|
||
code = iseq->body->iseq_encoded;
|
||
#if OPT_DIRECT_THREADED_CODE || OPT_CALL_THREADED_CODE
|
||
if (FL_TEST(iseq, ISEQ_TRANSLATED)) {
|
||
translator = rb_vm_insn_addr2insn2;
|
||
} else {
|
||
translator = rb_vm_insn_null_translator;
|
||
}
|
||
#else
|
||
translator = rb_vm_insn_null_translator;
|
||
#endif
|
||
for (n = 0; n < size;) {
|
||
n += iseq_extract_values(code, n, func, data, translator);
|
||
}
|
||
}
|
||
static void
|
||
each_insn_value(void *ctx, VALUE obj)
|
||
{
|
||
rb_gc_mark(obj);
|
||
}
|
||
void
|
||
rb_iseq_mark(const rb_iseq_t *iseq)
|
||
{
|
||
... | ... | |
if (iseq->body) {
|
||
const struct rb_iseq_constant_body *body = iseq->body;
|
||
RUBY_MARK_UNLESS_NULL(body->mark_ary);
|
||
if(FL_TEST(iseq, ISEQ_MARKABLE_ISEQ)) {
|
||
rb_iseq_each_value(iseq, each_insn_value, NULL);
|
||
}
|
||
rb_gc_mark(body->variable.coverage);
|
||
rb_gc_mark(body->variable.original_iseq);
|
||
rb_gc_mark(body->location.label);
|
||
rb_gc_mark(body->location.base_label);
|
||
rb_gc_mark(body->location.pathobj);
|
||
RUBY_MARK_UNLESS_NULL((VALUE)body->parent_iseq);
|
||
if (body->catch_table) {
|
||
const struct iseq_catch_table *table = body->catch_table;
|
||
unsigned int i;
|
||
for(i = 0; i < table->size; i++) {
|
||
const struct iseq_catch_table_entry *entry;
|
||
entry = &table->entries[i];
|
||
if (entry->iseq) {
|
||
rb_gc_mark((VALUE)entry->iseq);
|
||
}
|
||
}
|
||
}
|
||
}
|
||
if (FL_TEST(iseq, ISEQ_NOT_LOADED_YET)) {
|
||
rb_gc_mark(iseq->aux.loader.obj);
|
||
}
|
||
... | ... | |
}
|
||
}
|
||
void
|
||
rb_iseq_add_mark_object(const rb_iseq_t *iseq, VALUE obj)
|
||
{
|
||
/* TODO: check dedup */
|
||
rb_ary_push(ISEQ_MARK_ARY(iseq), obj);
|
||
}
|
||
static VALUE
|
||
prepare_iseq_build(rb_iseq_t *iseq,
|
||
VALUE name, VALUE path, VALUE realpath, VALUE first_lineno, const rb_code_location_t *code_location,
|
||
... | ... | |
if (iseq != iseq->body->local_iseq) {
|
||
RB_OBJ_WRITE(iseq, &iseq->body->location.base_label, iseq->body->local_iseq->body->location.label);
|
||
}
|
||
RB_OBJ_WRITE(iseq, &iseq->body->mark_ary, iseq_mark_ary_create(0));
|
||
iseq_mark_ary_create(iseq, 0);
|
||
ISEQ_COMPILE_DATA_ALLOC(iseq);
|
||
RB_OBJ_WRITE(iseq, &ISEQ_COMPILE_DATA(iseq)->err_info, err_info);
|
||
... | ... | |
break;
|
||
case TS_IC:
|
||
case TS_ISE:
|
||
ret = rb_sprintf("<is:%"PRIdPTRDIFF">", (union iseq_inline_storage_entry *)op - iseq->body->is_entries);
|
||
break;
|
||
... | ... | |
}
|
||
break;
|
||
case TS_IC:
|
||
case TS_ISE:
|
||
{
|
||
union iseq_inline_storage_entry *is = (union iseq_inline_storage_entry *)*seq;
|
||
rb_ary_push(ary, INT2FIX(is - iseq->body->is_entries));
|
iseq.h | ||
---|---|---|
return sizeof(struct rb_call_info_kw_arg) + sizeof(VALUE) * (keyword_len - 1);
|
||
}
|
||
enum iseq_mark_ary_index {
|
||
ISEQ_MARK_ARY_COVERAGE,
|
||
ISEQ_MARK_ARY_FLIP_CNT,
|
||
ISEQ_MARK_ARY_ORIGINAL_ISEQ,
|
||
ISEQ_MARK_ARY_INITIAL_SIZE
|
||
};
|
||
static inline VALUE
|
||
iseq_mark_ary_create(int flip_cnt)
|
||
static inline void
|
||
iseq_mark_ary_create(rb_iseq_t *iseq, int flip_cnt)
|
||
{
|
||
VALUE ary = rb_ary_tmp_new(ISEQ_MARK_ARY_INITIAL_SIZE);
|
||
rb_ary_push(ary, Qnil); /* ISEQ_MARK_ARY_COVERAGE */
|
||
rb_ary_push(ary, INT2FIX(flip_cnt)); /* ISEQ_MARK_ARY_FLIP_CNT */
|
||
rb_ary_push(ary, Qnil); /* ISEQ_MARK_ARY_ORIGINAL_ISEQ */
|
||
return ary;
|
||
RB_OBJ_WRITE(iseq, &iseq->body->variable.coverage, Qnil);
|
||
RB_OBJ_WRITE(iseq, &iseq->body->variable.original_iseq, Qnil);
|
||
iseq->body->variable.flip_count = flip_cnt;
|
||
}
|
||
#define ISEQ_MARK_ARY(iseq) (iseq)->body->mark_ary
|
||
#define ISEQ_COVERAGE(iseq) RARRAY_AREF(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_COVERAGE)
|
||
#define ISEQ_COVERAGE_SET(iseq, cov) RARRAY_ASET(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_COVERAGE, cov)
|
||
#define ISEQ_COVERAGE(iseq) iseq->body->variable.coverage
|
||
#define ISEQ_COVERAGE_SET(iseq, cov) RB_OBJ_WRITE(iseq, &iseq->body->variable.coverage, cov)
|
||
#define ISEQ_LINE_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_LINES)
|
||
#define ISEQ_BRANCH_COVERAGE(iseq) RARRAY_AREF(ISEQ_COVERAGE(iseq), COVERAGE_INDEX_BRANCHES)
|
||
#define ISEQ_FLIP_CNT(iseq) FIX2INT(RARRAY_AREF(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_FLIP_CNT))
|
||
#define ISEQ_FLIP_CNT(iseq) (iseq)->body->variable.flip_count
|
||
static inline int
|
||
ISEQ_FLIP_CNT_INCREMENT(const rb_iseq_t *iseq)
|
||
{
|
||
int cnt = ISEQ_FLIP_CNT(iseq);
|
||
RARRAY_ASET(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_FLIP_CNT, INT2FIX(cnt+1));
|
||
int cnt = iseq->body->variable.flip_count;
|
||
iseq->body->variable.flip_count += 1;
|
||
return cnt;
|
||
}
|
||
static inline VALUE *
|
||
ISEQ_ORIGINAL_ISEQ(const rb_iseq_t *iseq)
|
||
{
|
||
VALUE str = RARRAY_AREF(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_ORIGINAL_ISEQ);
|
||
VALUE str = iseq->body->variable.original_iseq;
|
||
if (RTEST(str)) return (VALUE *)RSTRING_PTR(str);
|
||
return NULL;
|
||
}
|
||
... | ... | |
static inline void
|
||
ISEQ_ORIGINAL_ISEQ_CLEAR(const rb_iseq_t *iseq)
|
||
{
|
||
RARRAY_ASET(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_ORIGINAL_ISEQ, Qnil);
|
||
RB_OBJ_WRITE(iseq, &iseq->body->variable.original_iseq, Qnil);
|
||
}
|
||
static inline VALUE *
|
||
ISEQ_ORIGINAL_ISEQ_ALLOC(const rb_iseq_t *iseq, long size)
|
||
{
|
||
VALUE str = rb_str_tmp_new(size * sizeof(VALUE));
|
||
RARRAY_ASET(ISEQ_MARK_ARY(iseq), ISEQ_MARK_ARY_ORIGINAL_ISEQ, str);
|
||
RB_OBJ_WRITE(iseq, &iseq->body->variable.original_iseq, str);
|
||
return (VALUE *)RSTRING_PTR(str);
|
||
}
|
||
... | ... | |
#define ISEQ_NOT_LOADED_YET IMEMO_FL_USER1
|
||
#define ISEQ_USE_COMPILE_DATA IMEMO_FL_USER2
|
||
#define ISEQ_TRANSLATED IMEMO_FL_USER3
|
||
#define ISEQ_MARKABLE_ISEQ IMEMO_FL_USER4
|
||
struct iseq_compile_data {
|
||
/* GC is needed */
|
||
... | ... | |
VALUE exception, VALUE body);
|
||
/* iseq.c */
|
||
void rb_iseq_add_mark_object(const rb_iseq_t *iseq, VALUE obj);
|
||
VALUE rb_iseq_load(VALUE data, VALUE parent, VALUE opt);
|
||
VALUE rb_iseq_parameters(const rb_iseq_t *iseq, int is_proc);
|
||
struct st_table *ruby_insn_make_insn_table(void);
|
tool/ruby_vm/models/typemap.rb | ||
---|---|---|
"GENTRY" => %w[G TS_GENTRY],
|
||
"IC" => %w[K TS_IC],
|
||
"ID" => %w[I TS_ID],
|
||
"ISE" => %w[T TS_ISE],
|
||
"ISEQ" => %w[S TS_ISEQ],
|
||
"OFFSET" => %w[O TS_OFFSET],
|
||
"VALUE" => %w[V TS_VALUE],
|
vm_core.h | ||
---|---|---|
*/
|
||
struct rb_call_cache *cc_entries; /* size is ci_size = ci_kw_size */
|
||
VALUE mark_ary; /* Array: includes operands which should be GC marked */
|
||
struct {
|
||
rb_num_t flip_count;
|
||
VALUE coverage;
|
||
VALUE original_iseq;
|
||
} variable;
|
||
unsigned int local_table_size;
|
||
unsigned int is_size;
|
||
... | ... | |
/* inline cache */
|
||
typedef struct iseq_inline_cache_entry *IC;
|
||
typedef union iseq_inline_storage_entry *ISE;
|
||
typedef struct rb_call_info *CALL_INFO;
|
||
typedef struct rb_call_cache *CALL_CACHE;
|
||
vm_insnhelper.c | ||
---|---|---|
}
|
||
static VALUE
|
||
vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, IC ic)
|
||
vm_once_dispatch(rb_execution_context_t *ec, ISEQ iseq, ISE is)
|
||
{
|
||
rb_thread_t *th = rb_ec_thread_ptr(ec);
|
||
rb_thread_t *const RUNNING_THREAD_ONCE_DONE = (rb_thread_t *)(0x1);
|
||
union iseq_inline_storage_entry *const is = (union iseq_inline_storage_entry *)ic;
|
||
again:
|
||
if (is->once.running_thread == RUNNING_THREAD_ONCE_DONE) {
|
||
... | ... | |
else if (is->once.running_thread == NULL) {
|
||
VALUE val;
|
||
is->once.running_thread = th;
|
||
val = is->once.value = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
|
||
val = rb_ensure(vm_once_exec, (VALUE)iseq, vm_once_clear, (VALUE)is);
|
||
RB_OBJ_WRITE(ec->cfp->iseq, &is->once.value, val);
|
||
/* is->once.running_thread is cleared by vm_once_clear() */
|
||
is->once.running_thread = RUNNING_THREAD_ONCE_DONE; /* success */
|
||
rb_iseq_add_mark_object(ec->cfp->iseq, val);
|
||
return val;
|
||
}
|
||
else if (is->once.running_thread == th) {
|
- « Previous
- 1
- …
- 4
- 5
- 6
- Next »