Feature #13174 ยป improve_id_table.patch
id_table.c | ||
---|---|---|
#define ID_TABLE_DEBUG 0
|
||
#endif
|
||
#if ID_TABLE_DEBUG == 0
|
||
#define NDEBUG
|
||
#endif
|
||
#include "ruby_assert.h"
|
||
typedef rb_id_serial_t id_key_t;
|
||
static inline ID
|
||
... | ... | |
uses mark-bit on collisions - need extra 1 bit,
|
||
ID is strictly 3 bits larger than rb_id_serial_t */
|
||
typedef struct rb_id_item {
|
||
id_key_t key;
|
||
#if SIZEOF_VALUE == 8
|
||
int collision;
|
||
#endif
|
||
VALUE val;
|
||
} item_t;
|
||
struct rb_id_table {
|
||
int capa;
|
||
int num;
|
||
int used;
|
||
item_t *items;
|
||
id_key_t *keys;
|
||
};
|
||
#if SIZEOF_VALUE == 8
|
||
#define ITEM_GET_KEY(tbl, i) ((tbl)->items[i].key)
|
||
#define ITEM_KEY_ISSET(tbl, i) ((tbl)->items[i].key)
|
||
#define ITEM_COLLIDED(tbl, i) ((tbl)->items[i].collision)
|
||
#define ITEM_SET_COLLIDED(tbl, i) ((tbl)->items[i].collision = 1)
|
||
#define ITEM_GET_KEY(tbl, i) ((tbl)->keys[i] >> 1)
|
||
#define ITEM_KEY_ISSET(tbl, i) ((tbl)->keys[i] > 1)
|
||
#define ITEM_COLLIDED(tbl, i) ((tbl)->keys[i] & 1)
|
||
#define ITEM_SET_COLLIDED(tbl, i) ((tbl)->keys[i] |= 1)
|
||
static inline void
|
||
ITEM_SET_KEY(struct rb_id_table *tbl, int i, id_key_t key)
|
||
{
|
||
tbl->items[i].key = key;
|
||
tbl->keys[i] = (key << 1) | ITEM_COLLIDED(tbl, i);
|
||
}
|
||
#else
|
||
#define ITEM_GET_KEY(tbl, i) ((tbl)->items[i].key >> 1)
|
||
#define ITEM_KEY_ISSET(tbl, i) ((tbl)->items[i].key > 1)
|
||
#define ITEM_COLLIDED(tbl, i) ((tbl)->items[i].key & 1)
|
||
#define ITEM_SET_COLLIDED(tbl, i) ((tbl)->items[i].key |= 1)
|
||
#define ID_TABLE_VALUES(tbl) ((VALUE*)((tbl)->keys+(tbl)->capa))
|
||
#define ITEM_GET_VALUE(tbl, i) (ID_TABLE_VALUES(tbl)[i])
|
||
static inline void
|
||
ITEM_SET_KEY(struct rb_id_table *tbl, int i, id_key_t key)
|
||
ITEM_SET_VALUE(struct rb_id_table *tbl, int i, VALUE val)
|
||
{
|
||
tbl->items[i].key = (key << 1) | ITEM_COLLIDED(tbl, i);
|
||
ID_TABLE_VALUES(tbl)[i] = val;
|
||
}
|
||
#endif
|
||
#define ITEM_SIZE (sizeof(id_key_t) + sizeof(VALUE))
|
||
static inline int
|
||
round_capa(int capa)
|
||
... | ... | |
if (capa > 0) {
|
||
capa = round_capa(capa);
|
||
tbl->capa = (int)capa;
|
||
tbl->items = ZALLOC_N(item_t, capa);
|
||
tbl->keys = (id_key_t*)ruby_xcalloc(capa, ITEM_SIZE);
|
||
}
|
||
return tbl;
|
||
}
|
||
... | ... | |
void
|
||
rb_id_table_free(struct rb_id_table *tbl)
|
||
{
|
||
xfree(tbl->items);
|
||
xfree(tbl->keys);
|
||
xfree(tbl);
|
||
}
|
||
... | ... | |
{
|
||
tbl->num = 0;
|
||
tbl->used = 0;
|
||
MEMZERO(tbl->items, item_t, tbl->capa);
|
||
memset(tbl->keys, 0, tbl->capa * ITEM_SIZE);
|
||
}
|
||
size_t
|
||
... | ... | |
size_t
|
||
rb_id_table_memsize(const struct rb_id_table *tbl)
|
||
{
|
||
return sizeof(item_t) * tbl->capa + sizeof(struct rb_id_table);
|
||
return ITEM_SIZE * (size_t)tbl->capa + sizeof(struct rb_id_table);
|
||
}
|
||
static int
|
||
... | ... | |
int mask = tbl->capa - 1;
|
||
int ix = key & mask;
|
||
int d = 1;
|
||
assert(key != 0);
|
||
#if ID_TABLE_DEBUG
|
||
assert(key > 0);
|
||
#endif
|
||
while (ITEM_KEY_ISSET(tbl, ix)) {
|
||
ITEM_SET_COLLIDED(tbl, ix);
|
||
ix = (ix + d) & mask;
|
||
... | ... | |
tbl->used++;
|
||
}
|
||
ITEM_SET_KEY(tbl, ix, key);
|
||
tbl->items[ix].val = val;
|
||
ITEM_SET_VALUE(tbl, ix, val);
|
||
}
|
||
static int
|
||
... | ... | |
}
|
||
tbl->num--;
|
||
ITEM_SET_KEY(tbl, ix, 0);
|
||
tbl->items[ix].val = 0;
|
||
ITEM_SET_VALUE(tbl, ix, 0);
|
||
return TRUE;
|
||
} else {
|
||
return FALSE;
|
||
... | ... | |
static void
|
||
hash_table_extend(struct rb_id_table* tbl)
|
||
{
|
||
/* fill rate 66% */
|
||
if (tbl->used + (tbl->used >> 1) >= tbl->capa) {
|
||
int new_cap = round_capa(tbl->num + (tbl->num >> 1));
|
||
struct rb_id_table tmp_tbl;
|
||
int i;
|
||
item_t* old;
|
||
struct rb_id_table tmp_tbl = {0, 0, 0};
|
||
if (new_cap < tbl->capa) {
|
||
new_cap = round_capa(tbl->used + (tbl->used >> 1));
|
||
}
|
||
tmp_tbl.capa = new_cap;
|
||
tmp_tbl.items = ZALLOC_N(item_t, new_cap);
|
||
id_key_t* old;
|
||
VALUE* values = ID_TABLE_VALUES(tbl);
|
||
rb_id_table_init(&tmp_tbl, tbl->num + (tbl->num >> 1) + 1);
|
||
for (i = 0; i < tbl->capa; i++) {
|
||
id_key_t key = ITEM_GET_KEY(tbl, i);
|
||
if (key != 0) {
|
||
hash_table_raw_insert(&tmp_tbl, key, tbl->items[i].val);
|
||
hash_table_raw_insert(&tmp_tbl, key, values[i]);
|
||
}
|
||
}
|
||
old = tbl->items;
|
||
old = tbl->keys;
|
||
*tbl = tmp_tbl;
|
||
xfree(old);
|
||
}
|
||
... | ... | |
int index = hash_table_index(tbl, key);
|
||
if (index >= 0) {
|
||
*valp = tbl->items[index].val;
|
||
*valp = ITEM_GET_VALUE(tbl, index);
|
||
return TRUE;
|
||
}
|
||
else {
|
||
... | ... | |
const int index = hash_table_index(tbl, key);
|
||
if (index >= 0) {
|
||
tbl->items[index].val = val;
|
||
ITEM_SET_VALUE(tbl, index, val);
|
||
}
|
||
else {
|
||
hash_table_extend(tbl);
|
||
... | ... | |
for (i=0; i<capa; i++) {
|
||
if (ITEM_KEY_ISSET(tbl, i)) {
|
||
const id_key_t key = ITEM_GET_KEY(tbl, i);
|
||
enum rb_id_table_iterator_result ret = (*func)(key2id(key), tbl->items[i].val, data);
|
||
assert(key != 0);
|
||
const VALUE val = ITEM_GET_VALUE(tbl, i);
|
||
enum rb_id_table_iterator_result ret = (*func)(key2id(key), val, data);
|
||
if (ret == ID_TABLE_DELETE)
|
||
hash_delete_index(tbl, i);
|
||
... | ... | |
for (i=0; i<capa; i++) {
|
||
if (ITEM_KEY_ISSET(tbl, i)) {
|
||
enum rb_id_table_iterator_result ret = (*func)(tbl->items[i].val, data);
|
||
const VALUE val = ITEM_GET_VALUE(tbl, i);
|
||
enum rb_id_table_iterator_result ret = (*func)(val, data);
|
||
if (ret == ID_TABLE_DELETE)
|
||
hash_delete_index(tbl, i);
|
symbol.c | ||
---|---|---|
#include "symbol.h"
|
||
#include "gc.h"
|
||
#include "probes.h"
|
||
#include "ruby_assert.h"
|
||
#ifndef SYMBOL_DEBUG
|
||
# define SYMBOL_DEBUG 0
|
||
... | ... | |
{
|
||
rb_id_serial_t next_serial = global_symbols.last_id + 1;
|
||
if (next_serial == 0) {
|
||
if (sizeof(next_serial) >= sizeof(ID) &&
|
||
next_serial >= (rb_id_serial_t)(~(ID)0 >> (ID_SCOPE_SHIFT + RUBY_SPECIAL_SHIFT))) {
|
||
return (ID)-1;
|
||
}
|
||
/* need at lest 1 bit for collision mark in id_table */
|
||
else if (sizeof(next_serial) < sizeof(ID) &&
|
||
next_serial >= ~(rb_id_serial_t)0 >> 1) {
|
||
return (ID)-1;
|
||
}
|
||
else {
|
||
... | ... | |
VALUE fstr = RSYMBOL(sym)->fstr;
|
||
ID num = next_id_base();
|
||
if (num == (ID)-1) {
|
||
fstr = rb_str_ellipsize(fstr, 20);
|
||
rb_raise(rb_eRuntimeError,
|
||
"symbol table overflow (symbol %"PRIsVALUE")",
|
||
fstr);
|
||
}
|
||
RSYMBOL(sym)->id = id |= num;
|
||
/* make it permanent object */
|
||
set_id_entry(rb_id_to_serial(num), fstr, sym);
|
||
... | ... | |
ID
|
||
rb_make_internal_id(void)
|
||
{
|
||
return next_id_base() | ID_INTERNAL | ID_STATIC_SYM;
|
||
ID nid = next_id_base();
|
||
/* make_internal_id called very rarely during initialization,
|
||
* so don't bother with exception */
|
||
assert(nid != (ID)-1);
|
||
return nid | ID_INTERNAL | ID_STATIC_SYM;
|
||
}
|
||
static int
|
||
-
|
id_table.c | ||
---|---|---|
if (tbl->capa > 0) {
|
||
int mask = tbl->capa - 1;
|
||
int ix = key & mask;
|
||
id_key_t mix = tbl->capa > 64 ? key : 0;
|
||
int d = 1;
|
||
while (key != ITEM_GET_KEY(tbl, ix)) {
|
||
if (!ITEM_COLLIDED(tbl, ix))
|
||
return -1;
|
||
ix = (ix + d) & mask;
|
||
d++;
|
||
d += 1 + (mix >>= 7);
|
||
}
|
||
return ix;
|
||
}
|
||
... | ... | |
{
|
||
int mask = tbl->capa - 1;
|
||
int ix = key & mask;
|
||
id_key_t mix = tbl->capa > 64 ? key : 0;
|
||
int d = 1;
|
||
#if ID_TABLE_DEBUG
|
||
assert(key > 0);
|
||
... | ... | |
while (ITEM_KEY_ISSET(tbl, ix)) {
|
||
ITEM_SET_COLLIDED(tbl, ix);
|
||
ix = (ix + d) & mask;
|
||
d++;
|
||
d += 1 + (mix >>= 7);
|
||
}
|
||
tbl->num++;
|
||
if (!ITEM_COLLIDED(tbl, ix)) {
|