Project

General

Profile

Feature #9614 ยป 0002-ihash-initial-implementation-method-table-conversion.patch

commit message here has more info - normalperson (Eric Wong), 03/09/2014 02:22 AM

View differences:

class.c
36 36

  
37 37
#define id_attached id__attached__
38 38

  
39
static void
40
RCLASS_M_TBL_INIT(VALUE c)
41
{
42
    struct method_table_wrapper *wrapper;
43
    wrapper = ALLOC(struct method_table_wrapper);
44
    wrapper->tbl = rb_ihash_new(&rb_ihash_method_entry, 0);
45
    wrapper->serial = 0;
46
    RCLASS_M_TBL_WRAPPER(c) = wrapper;
47
}
48

  
39 49
void
40 50
rb_class_subclass_add(VALUE super, VALUE klass)
41 51
{
......
270 280
    }
271 281
}
272 282

  
273
static int
274
clone_method_i(st_data_t key, st_data_t value, st_data_t data)
283
static enum rb_ihash_next
284
clone_method_i(struct rb_ihash_node *node, void *arg)
275 285
{
276
    clone_method((VALUE)data, (ID)key, (const rb_method_entry_t *)value);
277
    return ST_CONTINUE;
286
    const rb_method_entry_t *me = rb_method_entry_of(node);
287
    clone_method((VALUE)arg, me->called_id, me);
288
    return RB_IHASH_CONTINUE;
278 289
}
279 290

  
280 291
struct clone_const_arg {
......
357 368
	    rb_free_m_tbl_wrapper(RCLASS_M_TBL_WRAPPER(clone));
358 369
	}
359 370
	RCLASS_M_TBL_INIT(clone);
360
	st_foreach(RCLASS_M_TBL(orig), clone_method_i, (st_data_t)clone);
371
	rb_ihash_foreach(RCLASS_M_TBLP(orig), clone_method_i, (void *)clone);
361 372
    }
362 373

  
363 374
    return clone;
......
403 414
	    rb_singleton_class_attached(clone, attach);
404 415
	}
405 416
	RCLASS_M_TBL_INIT(clone);
406
	st_foreach(RCLASS_M_TBL(klass), clone_method_i, (st_data_t)clone);
417
	rb_ihash_foreach(RCLASS_M_TBLP(klass), clone_method_i, (void *)clone);
407 418
	rb_singleton_class_attached(RBASIC(clone)->klass, clone);
408 419
	FL_SET(clone, FL_SINGLETON);
409 420

  
......
837 848
	rb_raise(rb_eArgError, "cyclic include detected");
838 849
}
839 850

  
840
static int
841
add_refined_method_entry_i(st_data_t key, st_data_t value, st_data_t data)
851
static enum rb_ihash_next
852
add_refined_method_entry_i(struct rb_ihash_node *node, void *arg)
842 853
{
843
    rb_add_refined_method_entry((VALUE) data, (ID) key);
844
    return ST_CONTINUE;
854
    const rb_method_entry_t *me = rb_method_entry_of(node);
855
    rb_add_refined_method_entry((VALUE) arg, me->called_id);
856
    return RB_IHASH_CONTINUE;
845 857
}
846 858

  
847 859
static int
......
849 861
{
850 862
    VALUE p, iclass;
851 863
    int method_changed = 0, constant_changed = 0;
852
    const st_table *const klass_m_tbl = RCLASS_M_TBL(RCLASS_ORIGIN(klass));
864
    const struct rb_ihash_tbl *const klass_m_tbl = RCLASS_M_TBL(RCLASS_ORIGIN(klass));
853 865

  
854 866
    while (module) {
855 867
	int superclass_seen = FALSE;
......
888 900
	    VALUE refined_class =
889 901
		rb_refinement_module_get_refined_class(klass);
890 902

  
891
	    st_foreach(RMODULE_M_TBL(module), add_refined_method_entry_i,
892
		       (st_data_t) refined_class);
903
	    rb_ihash_foreach(RMODULE_M_TBLP(module), add_refined_method_entry_i,
904
		       (void *)refined_class);
893 905
	    FL_SET(c, RMODULE_INCLUDED_INTO_REFINEMENT);
894 906
	}
895 907
	if (RMODULE_M_TBL(module) && RMODULE_M_TBL(module)->num_entries)
......
906 918
    return method_changed;
907 919
}
908 920

  
909
static int
910
move_refined_method(st_data_t key, st_data_t value, st_data_t data)
921
static enum rb_ihash_next
922
move_refined_method(struct rb_ihash_node *node, void *arg)
911 923
{
912
    rb_method_entry_t *me = (rb_method_entry_t *) value;
913
    st_table *tbl = (st_table *) data;
924
    rb_method_entry_t *me = rb_method_entry_of(node);
925
    struct rb_ihash_tbl **tblp = arg;
914 926

  
915 927
    if (me->def->type == VM_METHOD_TYPE_REFINED) {
916 928
	if (me->def->body.orig_me) {
......
918 930
	    me->def->body.orig_me = NULL;
919 931
	    new_me = ALLOC(rb_method_entry_t);
920 932
	    *new_me = *me;
921
	    st_add_direct(tbl, key, (st_data_t) new_me);
933
	    orig_me->mtbl_node = me->mtbl_node; /* careful */
934
	    rb_ihash_add_direct(tblp, &new_me->mtbl_node);
922 935
	    *me = *orig_me;
923 936
	    xfree(orig_me);
924
	    return ST_CONTINUE;
937
	    return RB_IHASH_CONTINUE;
925 938
	}
926 939
	else {
927
	    st_add_direct(tbl, key, (st_data_t) me);
928
	    return ST_DELETE;
940
	    rb_ihash_add_direct(tblp, &me->mtbl_node);
941
	    return RB_IHASH_UNLINKED;
929 942
	}
930 943
    }
931 944
    else {
932
	return ST_CONTINUE;
945
	return RB_IHASH_CONTINUE;
933 946
    }
934 947
}
935 948

  
......
955 968
	RCLASS_ORIGIN(klass) = origin;
956 969
	RCLASS_M_TBL_WRAPPER(origin) = RCLASS_M_TBL_WRAPPER(klass);
957 970
	RCLASS_M_TBL_INIT(klass);
958
	st_foreach(RCLASS_M_TBL(origin), move_refined_method,
959
		   (st_data_t) RCLASS_M_TBL(klass));
971
	rb_ihash_foreach(RCLASS_M_TBLP(origin), move_refined_method,
972
		   (void *)RCLASS_M_TBLP(klass));
960 973
    }
961 974
    changed = include_modules_at(klass, klass, module);
962 975
    if (changed < 0)
......
1113 1126
    return ins_methods_push((ID)name, (long)type, (VALUE)ary, NOEX_PUBLIC);
1114 1127
}
1115 1128

  
1116
static int
1117
method_entry_i(st_data_t key, st_data_t value, st_data_t data)
1129
static enum rb_ihash_next
1130
method_entry_i(struct rb_ihash_node *node, void *arg)
1118 1131
{
1119
    const rb_method_entry_t *me = (const rb_method_entry_t *)value;
1120
    st_table *list = (st_table *)data;
1132
    const rb_method_entry_t *me = rb_method_entry_of(node);
1133
    st_table *list = arg;
1121 1134
    long type;
1122 1135

  
1123 1136
    if (me && me->def->type == VM_METHOD_TYPE_REFINED) {
1124 1137
	me = rb_resolve_refined_method(Qnil, me, NULL);
1125
	if (!me) return ST_CONTINUE;
1138
	if (!me) return RB_IHASH_CONTINUE;
1126 1139
    }
1127
    if (!st_lookup(list, key, 0)) {
1140
    if (!st_lookup(list, me->called_id, 0)) {
1128 1141
	if (UNDEFINED_METHOD_ENTRY_P(me)) {
1129 1142
	    type = -1; /* none */
1130 1143
	}
1131 1144
	else {
1132 1145
	    type = VISI(me->flag);
1133 1146
	}
1134
	st_add_direct(list, key, type);
1147
	st_add_direct(list, me->called_id, type);
1135 1148
    }
1136
    return ST_CONTINUE;
1149
    return RB_IHASH_CONTINUE;
1137 1150
}
1138 1151

  
1139 1152
static VALUE
......
1159 1172

  
1160 1173
    list = st_init_numtable();
1161 1174
    for (; mod; mod = RCLASS_SUPER(mod)) {
1162
	if (RCLASS_M_TBL(mod)) st_foreach(RCLASS_M_TBL(mod), method_entry_i, (st_data_t)list);
1175
	if (RCLASS_M_TBL(mod)) rb_ihash_foreach(RCLASS_M_TBLP(mod), method_entry_i, list);
1163 1176
	if (BUILTIN_TYPE(mod) == T_ICLASS && !prepended) continue;
1164 1177
	if (obj && FL_TEST(mod, FL_SINGLETON)) continue;
1165 1178
	if (!recur) break;
......
1379 1392
rb_obj_singleton_methods(int argc, VALUE *argv, VALUE obj)
1380 1393
{
1381 1394
    VALUE recur, ary, klass, origin;
1382
    st_table *list, *mtbl;
1395
    st_table *list;
1383 1396

  
1384 1397
    if (argc == 0) {
1385 1398
	recur = Qtrue;
......
1391 1404
    origin = RCLASS_ORIGIN(klass);
1392 1405
    list = st_init_numtable();
1393 1406
    if (klass && FL_TEST(klass, FL_SINGLETON)) {
1394
	if ((mtbl = RCLASS_M_TBL(origin)) != 0)
1395
	    st_foreach(mtbl, method_entry_i, (st_data_t)list);
1407
	if (RCLASS_M_TBL(origin))
1408
	    rb_ihash_foreach(RCLASS_M_TBLP(origin), method_entry_i, list);
1396 1409
	klass = RCLASS_SUPER(klass);
1397 1410
    }
1398 1411
    if (RTEST(recur)) {
1399 1412
	while (klass && (FL_TEST(klass, FL_SINGLETON) || RB_TYPE_P(klass, T_ICLASS))) {
1400
	    if (klass != origin && (mtbl = RCLASS_M_TBL(klass)) != 0)
1401
		st_foreach(mtbl, method_entry_i, (st_data_t)list);
1413
	    if (klass != origin && RCLASS_M_TBL(klass))
1414
		rb_ihash_foreach(RCLASS_M_TBLP(klass), method_entry_i, list);
1402 1415
	    klass = RCLASS_SUPER(klass);
1403 1416
	}
1404 1417
    }
common.mk
48 48
		enumerator.$(OBJEXT) \
49 49
		error.$(OBJEXT) \
50 50
		eval.$(OBJEXT) \
51
		ihash.$(OBJEXT) \
51 52
		load.$(OBJEXT) \
52 53
		proc.$(OBJEXT) \
53 54
		file.$(OBJEXT) \
......
611 612
PROBES_H_INCLUDES  = {$(VPATH)}probes.h
612 613
VM_CORE_H_INCLUDES = {$(VPATH)}vm_core.h {$(VPATH)}thread_$(THREAD_MODEL).h \
613 614
		     {$(VPATH)}node.h {$(VPATH)}method.h {$(VPATH)}ruby_atomic.h \
614
	             {$(VPATH)}vm_debug.h {$(VPATH)}id.h {$(VPATH)}thread_native.h
615
	             {$(VPATH)}vm_debug.h {$(VPATH)}id.h {$(VPATH)}thread_native.h \
616
	             {$(VPATH)}ihash.h
615 617

  
616 618
###
617 619

  
......
673 675
  $(RUBY_H_INCLUDES) $(VM_CORE_H_INCLUDES) {$(VPATH)}eval_error.c \
674 676
  {$(VPATH)}eval_jump.c {$(VPATH)}gc.h {$(VPATH)}iseq.h \
675 677
  $(ENCODING_H_INCLUDES) {$(VPATH)}internal.h $(PROBES_H_INCLUDES) {$(VPATH)}vm_opts.h {$(VPATH)}probes_helper.h
678
ihash.$(OBJEXT): {$(VPATH)}ihash.c $(RUBY_H_INCLUDES) {$(VPATH)}ihash.h
676 679
load.$(OBJEXT): {$(VPATH)}load.c {$(VPATH)}eval_intern.h \
677 680
  {$(VPATH)}util.h $(RUBY_H_INCLUDES) $(VM_CORE_H_INCLUDES) \
678 681
  {$(VPATH)}dln.h {$(VPATH)}internal.h $(PROBES_H_INCLUDES) {$(VPATH)}vm_opts.h
gc.c
1433 1433
    return FALSE;
1434 1434
}
1435 1435

  
1436
static int
1437
free_method_entry_i(ID key, rb_method_entry_t *me, st_data_t data)
1436
static enum rb_ihash_next
1437
free_method_entry_i(struct rb_ihash_node *node, void *unused)
1438 1438
{
1439
    rb_method_entry_t *me = rb_method_entry_of(node);
1440

  
1439 1441
    if (!me->mark) {
1440 1442
	rb_free_method_entry(me);
1441 1443
    }
1442
    return ST_CONTINUE;
1444
    return RB_IHASH_UNLINKED;
1443 1445
}
1444 1446

  
1445 1447
void
1446
rb_free_m_tbl(st_table *tbl)
1448
rb_free_m_tbl(struct rb_ihash_tbl **tblp)
1447 1449
{
1448
    st_foreach(tbl, free_method_entry_i, 0);
1449
    st_free_table(tbl);
1450
    rb_ihash_foreach(tblp, free_method_entry_i, 0);
1451
    rb_ihash_free(*tblp);
1450 1452
}
1451 1453

  
1452 1454
void
1453 1455
rb_free_m_tbl_wrapper(struct method_table_wrapper *wrapper)
1454 1456
{
1455 1457
    if (wrapper->tbl) {
1456
	rb_free_m_tbl(wrapper->tbl);
1458
	rb_free_m_tbl(&wrapper->tbl);
1457 1459
    }
1458 1460
    xfree(wrapper);
1459 1461
}
......
2456 2458
	    size += sizeof(struct method_table_wrapper);
2457 2459
	}
2458 2460
	if (RCLASS_M_TBL(obj)) {
2459
	    size += st_memsize(RCLASS_M_TBL(obj));
2461
	    size += rb_ihash_memsize(RCLASS_M_TBL(obj));
2460 2462
	}
2461 2463
	if (RCLASS_EXT(obj)) {
2462 2464
	    if (RCLASS_IV_TBL(obj)) {
......
3425 3427
    mark_method_entry(&rb_objspace, me);
3426 3428
}
3427 3429

  
3428
static int
3429
mark_method_entry_i(ID key, const rb_method_entry_t *me, st_data_t data)
3430
static enum rb_ihash_next
3431
mark_method_entry_i(struct rb_ihash_node *node, void *mta)
3430 3432
{
3431
    struct mark_tbl_arg *arg = (void*)data;
3433
    const rb_method_entry_t *me = rb_method_entry_of(node);
3434
    struct mark_tbl_arg *arg = mta;
3432 3435
    mark_method_entry(arg->objspace, me);
3433 3436
    return ST_CONTINUE;
3434 3437
}
......
3446 3449
	wrapper->serial = serial;
3447 3450
    }
3448 3451
    arg.objspace = objspace;
3449
    st_foreach(wrapper->tbl, mark_method_entry_i, (st_data_t)&arg);
3452
    rb_ihash_foreach(&wrapper->tbl, mark_method_entry_i, &arg);
3450 3453
}
3451 3454

  
3452 3455
static int
hash.c
18 18
#include "internal.h"
19 19
#include <errno.h>
20 20
#include "probes.h"
21
#include "ihash.h"
21 22

  
22 23
#ifdef __APPLE__
23 24
# ifdef HAVE_CRT_EXTERNS_H
......
38 39
    const VALUE base = rb_cHash;
39 40
    VALUE c = klass;
40 41
    while (c != base) {
41
	st_table *mtbl = RCLASS_M_TBL(c);
42
	struct rb_ihash_tbl *mtbl = RCLASS_M_TBL(c);
42 43
	if (mtbl && mtbl->num_entries) return klass;
43 44
	c = RCLASS_SUPER(c);
44 45
    }
ihash.c
1
#include "ihash.h"
2
#include "internal.h"
3

  
4
/* accounts for the compiler-agnostic flexible array byte */
5
#define SIZEOF_IHASH_TBL (sizeof(struct rb_ihash_tbl) - 1)
6

  
7
static inline size_t
8
rb_ihash_nbins(const struct rb_ihash_tbl *tbl)
9
{
10
    return tbl->hash_mask + 1;
11
}
12

  
13
static inline size_t
14
rb_ihash_binpos(const struct rb_ihash_tbl *tbl, st_index_t hash)
15
{
16
    return ((size_t)hash & (size_t)tbl->hash_mask);
17
}
18

  
19
static inline struct rb_ihash_node **
20
rb_ihash_bins(const struct rb_ihash_tbl *tbl)
21
{
22
    return (struct rb_ihash_node **)tbl->flexible_array;
23
}
24

  
25
size_t
26
rb_ihash_memsize(const struct rb_ihash_tbl *tbl)
27
{
28
    size_t nbins = rb_ihash_nbins(tbl);
29
    size_t n = SIZEOF_IHASH_TBL + nbins * sizeof(struct rb_ihash_node *);
30

  
31
    return n + sizeof(struct rb_ihash_node) * tbl->num_entries;
32
}
33

  
34
static struct rb_ihash_tbl *
35
rb_ihash_alloc(const struct rb_ihash_type *type, size_t nbins)
36
{
37
    size_t nbytes = SIZEOF_IHASH_TBL + nbins * sizeof(struct rb_ihash_node *);
38
    struct rb_ihash_tbl *tbl = xcalloc(1, nbytes);
39

  
40
    tbl->type = type;
41
    tbl->hash_mask = nbins - 1;
42

  
43
    if ((size_t)tbl->hash_mask != (nbins - 1)) {
44
	rb_bug("ihash table too big");
45
    }
46

  
47
    return tbl;
48
}
49

  
50
struct rb_ihash_tbl *
51
rb_ihash_new(const struct rb_ihash_type *type, size_t power)
52
{
53
    return rb_ihash_alloc(type, 1 << power);
54
}
55

  
56
int
57
rb_ihash_foreach(struct rb_ihash_tbl **tblp, rb_ihash_iterate fn, void *arg)
58
{
59
    struct rb_ihash_tbl *tbl = *tblp;
60
    size_t nbins = rb_ihash_nbins(tbl);
61
    struct rb_ihash_node **bins = rb_ihash_bins(tbl);
62
    size_t i;
63

  
64
    for (i = 0; i < nbins; i++) {
65
	struct rb_ihash_node *cur = bins[i];
66
	struct rb_ihash_node *last = 0;
67

  
68
	while (cur) {
69
	    struct rb_ihash_node *next = cur->ihash_next;
70
	    struct rb_ihash_node *tmp;
71

  
72
	    switch (fn(cur, arg)) {
73
	      case RB_IHASH_UNLINKED:
74
		if (last) last->ihash_next = next;
75
		else bins[i] = next;
76
		cur = next;
77
		tbl->num_entries--;
78
		break;
79
	      case RB_IHASH_CHECK:
80
		/* check if hash is modified during iteration */
81
		tmp = 0;
82
		tbl = *tblp;
83
		nbins = rb_ihash_nbins(tbl);
84
		bins = rb_ihash_bins(tbl);
85

  
86
		if (i < nbins) {
87
		    for (tmp = bins[i]; tmp; tmp = tmp->ihash_next) {
88
			if (tmp == cur) break;
89
		    }
90
		}
91
		if (!tmp) return 1;
92
		last = cur;
93
		cur = cur->ihash_next;
94
		break;
95
	      case RB_IHASH_CONTINUE:
96
		last = cur;
97
		cur = next;
98
		break;
99
	      case RB_IHASH_STOP:
100
		return 0;
101
	    }
102
	}
103
    }
104
    return 0;
105
}
106

  
107
struct rb_ihash_node *
108
rb_ihash_lookup(const struct rb_ihash_tbl *tbl, const struct rb_ihash_node *key)
109
{
110
    st_index_t hval = tbl->type->hash(key);
111
    struct rb_ihash_node **bins = rb_ihash_bins(tbl);
112
    size_t pos = rb_ihash_binpos(tbl, hval);
113
    struct rb_ihash_node *cur = bins[pos];
114

  
115
    for (; cur; cur = cur->ihash_next) {
116
	if (tbl->type->cmp(key, cur) == 0) {
117
	    return cur;
118
	}
119
    }
120
    return 0;
121
}
122

  
123
static inline void
124
rb_ihash_add_pos(struct rb_ihash_tbl *tbl,
125
		struct rb_ihash_node *ins, size_t pos)
126
{
127
    struct rb_ihash_node **bins = rb_ihash_bins(tbl);
128

  
129
    ins->ihash_next = bins[pos];
130
    bins[pos] = ins;
131
}
132

  
133
static enum rb_ihash_next
134
relink_i(struct rb_ihash_node *node, void *ptr)
135
{
136
    struct rb_ihash_tbl *new_tbl = ptr;
137
    st_index_t hval = new_tbl->type->hash(node);
138
    size_t pos = rb_ihash_binpos(new_tbl, hval);
139

  
140
    rb_ihash_add_pos(new_tbl, node, pos);
141

  
142
    return RB_IHASH_UNLINKED;
143
}
144

  
145
static struct rb_ihash_tbl *
146
rb_ihash_realloc(struct rb_ihash_tbl **oldp, size_t nbins)
147
{
148
    struct rb_ihash_tbl *old_tbl = *oldp;
149
    struct rb_ihash_tbl *new_tbl = rb_ihash_alloc(old_tbl->type, nbins);
150

  
151
    rb_ihash_foreach(oldp, relink_i, new_tbl);
152
    rb_ihash_free(old_tbl);
153
    return new_tbl;
154
}
155

  
156
static void
157
rb_ihash_added(struct rb_ihash_tbl **tblp)
158
{
159
    struct rb_ihash_tbl *tbl = *tblp;
160
    size_t nbins = rb_ihash_nbins(tbl);
161
    static const size_t max_density = 6;
162

  
163
    tbl->num_entries++;
164
    if (tbl->num_entries > (max_density * nbins)) {
165
	*tblp = rb_ihash_realloc(tblp, nbins << 1);
166
    }
167
}
168

  
169
void
170
rb_ihash_add_direct(struct rb_ihash_tbl **tblp, struct rb_ihash_node *ins)
171
{
172
    struct rb_ihash_tbl *tbl = *tblp;
173
    st_index_t hval = tbl->type->hash(ins);
174
    size_t pos = rb_ihash_binpos(tbl, hval);
175

  
176
    rb_ihash_add_pos(tbl, ins, pos);
177
    rb_ihash_added(tblp);
178
}
179

  
180
/* returns pointer to replaced node, 0 if nothing was replaced */
181
struct rb_ihash_node *
182
rb_ihash_insert(struct rb_ihash_tbl **tblp, struct rb_ihash_node *ins)
183
{
184
    struct rb_ihash_tbl *tbl = *tblp;
185
    st_index_t hval = tbl->type->hash(ins);
186
    struct rb_ihash_node **bins = rb_ihash_bins(tbl);
187
    size_t i = rb_ihash_binpos(tbl, hval);
188
    struct rb_ihash_node *last = NULL;
189
    struct rb_ihash_node *cur = bins[i];
190

  
191
    while (cur) {
192
	if (tbl->type->cmp(ins, cur) == 0) {
193
	    /* replace and return existing entry */
194
	    ins->ihash_next = cur->ihash_next;
195
	    if (last) last->ihash_next = ins;
196
	    else bins[i] = ins;
197
	    return cur;
198
	}
199
	last = cur;
200
	cur = cur->ihash_next;
201
    }
202
    rb_ihash_add_pos(tbl, ins, i);
203
    rb_ihash_added(tblp);
204
    return 0;
205
}
206

  
207
/* returns pointer to unlinked node, 0 if nothing was unlinked */
208
struct rb_ihash_node *
209
rb_ihash_unlink(struct rb_ihash_tbl *tbl, const struct rb_ihash_node *key)
210
{
211
    st_index_t hval = tbl->type->hash(key);
212
    struct rb_ihash_node **bins = rb_ihash_bins(tbl);
213
    size_t i = rb_ihash_binpos(tbl, hval);
214
    struct rb_ihash_node *last = 0;
215
    struct rb_ihash_node *cur = bins[i];
216

  
217
    while (cur) {
218
	if (tbl->type->cmp(key, cur) == 0) {
219
	    if (last) last->ihash_next = cur->ihash_next;
220
	    else bins[i] = cur->ihash_next;
221
	    tbl->num_entries--;
222
	    return cur;
223
	}
224
	last = cur;
225
	cur = cur->ihash_next;
226
    }
227

  
228
    return 0;
229
}
ihash.h
1
/*
2
 * Hash table implementation for internal use inside Ruby.
3
 *
4
 * Notes:
5
 *
6
 * - Users are expected to embed the rb_ihash_node struct and st_data_t key
7
 *   inside a main struct stored by users.  This reduces pointer chasing
8
 *   and allows cache-warming even on missed lookups.
9
 *   Wrap the RB_CONTAINER_OF macro in a static inline to get the
10
 *   stored struct pointer from an st_data_t-compatible key pointer.
11
 *
12
 * - does no allocation for entires, user is responsible for it
13
 *
14
 * - this should be safe to use outside of GVL.  Of course users must manage
15
 *   their own locking.
16
 */
17
#ifndef RUBY_IHASH_H
18
#define RUBY_IHASH_H 1
19
#if defined(__cplusplus)
20
extern "C" {
21
#if 0
22
} /* satisfy cc-mode */
23
#endif
24
#endif
25

  
26
#include "ruby/ruby.h"
27
#include "ruby/st.h" /* we use st_data_t and st_index_t data types */
28

  
29
/*
30
 * each struct must have its own rb_ihash_node field inside it,
31
 * use CONTAINER_OF to get the original struct ptr
32
 */
33
struct rb_ihash_node;
34
struct rb_ihash_node {
35
    struct rb_ihash_node *ihash_next;
36
};
37

  
38
enum rb_ihash_next {
39
    RB_IHASH_CONTINUE = 0,
40
    RB_IHASH_CHECK,
41
    RB_IHASH_STOP,
42
    RB_IHASH_UNLINKED /* user must deallocate this manually */
43
};
44

  
45
typedef int (*rb_ihash_compare)(const struct rb_ihash_node *,
46
				const struct rb_ihash_node *);
47
typedef st_index_t (*rb_ihash_compute)(const struct rb_ihash_node *);
48
typedef enum rb_ihash_next (*rb_ihash_iterate)(struct rb_ihash_node *, void *);
49

  
50
struct rb_ihash_type {
51
    rb_ihash_compare cmp;
52
    rb_ihash_compute hash;
53
};
54

  
55
struct rb_ihash_tbl {
56
    const struct rb_ihash_type *type;
57
    uint32_t num_entries;
58
    uint32_t hash_mask;
59
    char flexible_array[1];
60
};
61

  
62
struct rb_ihash_tbl *rb_ihash_new(const struct rb_ihash_type *, size_t);
63

  
64
struct rb_ihash_node *
65
rb_ihash_lookup(const struct rb_ihash_tbl *, const struct rb_ihash_node *);
66

  
67
struct rb_ihash_node *
68
rb_ihash_insert(struct rb_ihash_tbl **, struct rb_ihash_node *);
69

  
70
void rb_ihash_add_direct(struct rb_ihash_tbl **, struct rb_ihash_node *);
71

  
72
struct rb_ihash_node *
73
rb_ihash_unlink(struct rb_ihash_tbl *, const struct rb_ihash_node *);
74

  
75
int rb_ihash_foreach(struct rb_ihash_tbl **, rb_ihash_iterate, void *);
76

  
77
static inline void
78
rb_ihash_free(struct rb_ihash_tbl *tbl)
79
{
80
    /*
81
     * user must call rb_ihash_foreach with an iterator which frees
82
     * and returns RB_IHASH_UNLINK for each entry before calling this
83
     */
84
    xfree(tbl);
85
}
86

  
87
size_t rb_ihash_memsize(const struct rb_ihash_tbl *);
88
#if defined(__cplusplus)
89
#if 0
90
{ /* satisfy cc-mode */
91
#endif
92
}  /* extern "C" { */
93
#endif
94
#endif /* RUBY_IHASH_H */
include/ruby/ruby.h
798 798
#define RMODULE_IV_TBL(m) RCLASS_IV_TBL(m)
799 799
#define RMODULE_CONST_TBL(m) RCLASS_CONST_TBL(m)
800 800
#define RMODULE_M_TBL(m) RCLASS_M_TBL(m)
801
#define RMODULE_M_TBLP(m) RCLASS_M_TBLP(m)
801 802
#define RMODULE_SUPER(m) RCLASS_SUPER(m)
802 803
#define RMODULE_IS_OVERLAID FL_USER2
803 804
#define RMODULE_IS_REFINEMENT FL_USER3
internal.h
55 55

  
56 56
#define numberof(array) ((int)(sizeof(array) / sizeof((array)[0])))
57 57

  
58
#define RB_CONTAINER_OF(ptr,type,field) \
59
	((type *)((uint8_t *)(ptr) - offsetof(type,field)))
60

  
58 61
#define STATIC_ASSERT(name, expr) typedef int static_assert_##name##_check[1 - 2*!(expr)]
59 62

  
60 63
#define GCC_VERSION_SINCE(major, minor, patchlevel) \
......
301 304
};
302 305

  
303 306
struct method_table_wrapper {
304
    st_table *tbl;
307
    struct rb_ihash_tbl *tbl;
305 308
    size_t serial;
306 309
};
307 310

  
......
413 416
#define RCLASS_CONST_TBL(c) (RCLASS_EXT(c)->const_tbl)
414 417
#define RCLASS_M_TBL_WRAPPER(c) (RCLASS(c)->m_tbl_wrapper)
415 418
#define RCLASS_M_TBL(c) (RCLASS_M_TBL_WRAPPER(c) ? RCLASS_M_TBL_WRAPPER(c)->tbl : 0)
419
#define RCLASS_M_TBLP(c) (&RCLASS_M_TBL_WRAPPER(c)->tbl)
416 420
#define RCLASS_IV_INDEX_TBL(c) (RCLASS_EXT(c)->iv_index_tbl)
417 421
#define RCLASS_ORIGIN(c) (RCLASS_EXT(c)->origin)
418 422
#define RCLASS_REFINED_CLASS(c) (RCLASS_EXT(c)->refined_class)
419 423
#define RCLASS_SERIAL(c) (RCLASS_EXT(c)->class_serial)
420 424

  
421
static inline void
422
RCLASS_M_TBL_INIT(VALUE c)
423
{
424
    struct method_table_wrapper *wrapper;
425
    wrapper = ALLOC(struct method_table_wrapper);
426
    wrapper->tbl = st_init_numtable();
427
    wrapper->serial = 0;
428
    RCLASS_M_TBL_WRAPPER(c) = wrapper;
429
}
430

  
431 425
#undef RCLASS_SUPER
432 426
static inline VALUE
433 427
RCLASS_SUPER(VALUE klass)
marshal.c
15 15
#include "ruby/util.h"
16 16
#include "ruby/encoding.h"
17 17
#include "internal.h"
18
#include "ihash.h"
18 19

  
19 20
#include <math.h>
20 21
#ifdef HAVE_FLOAT_H
method.h
12 12
#define METHOD_H
13 13

  
14 14
#include "internal.h"
15
#include "ihash.h"
15 16

  
16 17
#ifndef END_OF_ENUMERATION
17 18
# if defined(__GNUC__) &&! defined(__STRICT_ANSI__)
......
95 96
} rb_method_definition_t;
96 97

  
97 98
typedef struct rb_method_entry_struct {
99
    ID called_id;                    /* key for ihash */
100
    struct rb_ihash_node mtbl_node; /* TODO: union for unlinked entries */
98 101
    rb_method_flag_t flag;
99 102
    char mark;
100 103
    rb_method_definition_t *def;
101
    ID called_id;
102 104
    VALUE klass;                    /* should be mark */
103 105
} rb_method_entry_t;
104 106

  
107
static inline rb_method_entry_t *
108
rb_method_entry_of(const struct rb_ihash_node *node)
109
{
110
    return RB_CONTAINER_OF(node, rb_method_entry_t, mtbl_node);
111
}
112

  
105 113
struct unlinked_method_entry_list_entry {
106 114
    struct unlinked_method_entry_list_entry *next;
107 115
    rb_method_entry_t *me;
......
136 144
void rb_mark_method_entry(const rb_method_entry_t *me);
137 145
void rb_free_method_entry(rb_method_entry_t *me);
138 146
void rb_sweep_method_entry(void *vm);
139
void rb_free_m_tbl(st_table *tbl);
147
void rb_free_m_tbl(struct rb_ihash_tbl **);
140 148
void rb_free_m_tbl_wrapper(struct method_table_wrapper *wrapper);
149
extern struct rb_ihash_type rb_ihash_method_entry;
141 150

  
142 151
#endif /* METHOD_H */
vm.c
1123 1123
    }
1124 1124
}
1125 1125

  
1126
static int
1127
check_redefined_method(st_data_t key, st_data_t value, st_data_t data)
1126
static enum rb_ihash_next
1127
check_redefined_method(struct rb_ihash_node *node, void *arg)
1128 1128
{
1129
    ID mid = (ID)key;
1130
    rb_method_entry_t *me = (rb_method_entry_t *)value;
1131
    VALUE klass = (VALUE)data;
1129
    rb_method_entry_t *me = rb_method_entry_of(node);
1130
    ID mid = me->called_id;
1131
    VALUE klass = (VALUE)arg;
1132 1132
    rb_method_entry_t *newme = rb_method_entry(klass, mid, NULL);
1133 1133

  
1134 1134
    if (newme != me)
1135 1135
	rb_vm_check_redefinition_opt_method(me, me->klass);
1136
    return ST_CONTINUE;
1136
    return RB_IHASH_CONTINUE;
1137 1137
}
1138 1138

  
1139 1139
void
1140 1140
rb_vm_check_redefinition_by_prepend(VALUE klass)
1141 1141
{
1142 1142
    if (!vm_redefinition_check_flag(klass)) return;
1143
    st_foreach(RCLASS_M_TBL(RCLASS_ORIGIN(klass)), check_redefined_method,
1144
	       (st_data_t)klass);
1143
    rb_ihash_foreach(RCLASS_M_TBLP(RCLASS_ORIGIN(klass)), check_redefined_method,
1144
	       (void *)klass);
1145 1145
}
1146 1146

  
1147 1147
static void
vm_insnhelper.c
1103 1103
    const int max = (iseq->arg_rest == -1) ? m + opts + iseq->arg_post_len : UNLIMITED_ARGUMENTS;
1104 1104
    const int orig_argc = ci->argc;
1105 1105
    int argc = orig_argc;
1106
    VALUE *new_argv;
1106 1107
    VALUE *argv = orig_argv;
1107 1108
    VALUE keyword_hash = Qnil;
1108 1109
    rb_num_t opt_pc = 0;
......
1125 1126
    /* post arguments */
1126 1127
    if (iseq->arg_post_len) {
1127 1128
	if (!(orig_argc < iseq->arg_post_start)) {
1128
	    VALUE *new_argv = ALLOCA_N(VALUE, argc);
1129
	    new_argv = ALLOCA_N(VALUE, argc);
1129 1130
	    MEMCPY(new_argv, argv, VALUE, argc);
1130 1131
	    argv = new_argv;
1131 1132
	}
vm_method.c
195 195

  
196 196
static int rb_method_definition_eq(const rb_method_definition_t *d1, const rb_method_definition_t *d2);
197 197

  
198
/*
199
 * use small struct here to avoid errors from tiny stacks in fiber tests,
200
 * TODO: unify this struct into rb_method_entry_t in the future,
201
 * this struct only exists to minimize code churn from introducing ihash
202
 * into method tables
203
 */
204
struct rb_method_entry_finder {
205
    ID called_id;
206
    union {
207
	struct rb_ihash_node mtbl_node; /* lookup does not look into this */
208
	struct rb_ihash_node *retval;
209
    };
210
};
211

  
212
STATIC_ASSERT(method_entry_finder_offsets,
213
  offsetof(struct rb_method_entry_finder, called_id) ==
214
  offsetof(rb_method_entry_t, called_id) &&
215
  offsetof(struct rb_method_entry_finder, mtbl_node) ==
216
  offsetof(rb_method_entry_t, mtbl_node));
217

  
198 218
static inline rb_method_entry_t *
199 219
lookup_method_table(VALUE klass, ID id)
200 220
{
201
    st_data_t body;
202
    st_table *m_tbl = RCLASS_M_TBL(klass);
203
    if (st_lookup(m_tbl, id, &body)) {
204
	return (rb_method_entry_t *) body;
205
    }
206
    else {
207
	return 0;
208
    }
221
    struct rb_method_entry_finder finder;
222

  
223
    finder.called_id = id;
224
    finder.retval = rb_ihash_lookup(RCLASS_M_TBL(klass), &finder.mtbl_node);
225
    return finder.retval ? rb_method_entry_of(finder.retval) : 0;
209 226
}
210 227

  
211 228
static void
......
248 265
		     rb_method_definition_t *def, rb_method_flag_t noex,
249 266
		     VALUE defined_class)
250 267
{
251
    rb_method_entry_t *me;
268
    rb_method_entry_t *me, *old_me;
269
    struct rb_ihash_node *old;
252 270
#if NOEX_NOREDEF
253 271
    VALUE rklass;
254 272
#endif
255
    st_table *mtbl;
256
    st_data_t data;
257 273
    int make_refined = 0;
258 274

  
259 275
    if (NIL_P(klass)) {
......
279 295
	rb_add_refined_method_entry(refined_class, mid);
280 296
    }
281 297
    if (type == VM_METHOD_TYPE_REFINED) {
282
	rb_method_entry_t *old_me =
283
	    lookup_method_table(RCLASS_ORIGIN(klass), mid);
298
	old_me = lookup_method_table(RCLASS_ORIGIN(klass), mid);
284 299
	if (old_me) rb_vm_check_redefinition_opt_method(old_me, klass);
285 300
    }
286 301
    else {
287 302
	klass = RCLASS_ORIGIN(klass);
288 303
    }
289
    mtbl = RCLASS_M_TBL(klass);
290 304

  
291 305
    /* check re-definition */
292
    if (st_lookup(mtbl, mid, &data)) {
293
	rb_method_entry_t *old_me = (rb_method_entry_t *)data;
306
    old_me = lookup_method_table(klass, mid);
307
    if (old_me) {
294 308
	rb_method_definition_t *old_def = old_me->def;
295 309

  
296 310
	if (rb_method_definition_eq(old_def, def)) return old_me;
......
376 390
	make_method_entry_refined(me);
377 391
    }
378 392

  
379
    st_insert(mtbl, mid, (st_data_t) me);
393
    old = rb_ihash_insert(RCLASS_M_TBLP(klass), &me->mtbl_node);
394
    if (old_me && (old != &old_me->mtbl_node)) {
395
	rb_bug("rb_method_entry_make: ihash race between lookup and insert");
396
    }
380 397

  
381 398
    return me;
382 399
}
......
729 746
static void
730 747
remove_method(VALUE klass, ID mid)
731 748
{
732
    st_data_t key, data;
733
    rb_method_entry_t *me = 0;
749
    rb_method_entry_t *me;
750
    struct rb_ihash_node *node;
734 751
    VALUE self = klass;
735 752

  
736 753
    klass = RCLASS_ORIGIN(klass);
......
739 756
	rb_warn("removing `%s' may cause serious problems", rb_id2name(mid));
740 757
    }
741 758

  
742
    if (!st_lookup(RCLASS_M_TBL(klass), mid, &data) ||
743
	!(me = (rb_method_entry_t *)data) ||
744
	(!me->def || me->def->type == VM_METHOD_TYPE_UNDEF)) {
759
    me = lookup_method_table(klass, mid);
760
    if (!me || !me->def || (me->def->type == VM_METHOD_TYPE_UNDEF)) {
745 761
	rb_name_error(mid, "method `%s' not defined in %s",
746 762
		      rb_id2name(mid), rb_class2name(klass));
747 763
    }
748
    key = (st_data_t)mid;
749
    st_delete(RCLASS_M_TBL(klass), &key, &data);
764

  
765
    node = rb_ihash_unlink(RCLASS_M_TBL(klass), &me->mtbl_node);
766
    if (node != &me->mtbl_node) {
767
       rb_bug("remove_method: ihash race between lookup and unlink");
768
    }
750 769

  
751 770
    rb_vm_check_redefinition_opt_method(me, klass);
752 771
    rb_clear_method_cache_by_class(klass);
......
1752 1771
	REPLICATE_METHOD(rb_eException, idRespond_to_missing, NOEX_PUBLIC);
1753 1772
    }
1754 1773
}
1774

  
1775
static int
1776
rb_ihash_me_cmp(const struct rb_ihash_node *a, const struct rb_ihash_node *b)
1777
{
1778
    rb_method_entry_t *me1 = rb_method_entry_of(a);
1779
    rb_method_entry_t *me2 = rb_method_entry_of(b);
1780

  
1781
    return me1->called_id != me2->called_id;
1782
}
1783

  
1784
static st_index_t
1785
rb_ihash_me_hash(const struct rb_ihash_node *node)
1786
{
1787
    rb_method_entry_t *me = rb_method_entry_of(node);
1788
    ID id = me->called_id;
1789

  
1790
    /* TODO: tuning try generating id from hash of str associated with sym */
1791
    return (st_index_t)((id >> 3) ^ (id << 3));
1792
}
1793

  
1794
struct rb_ihash_type rb_ihash_method_entry = {
1795
    rb_ihash_me_cmp,
1796
    rb_ihash_me_hash
1797
};
1755
-