Project

General

Profile

Feature #13517 ยป 0001-reduce-rb_mutex_t-size-from-160-to-80-bytes-on-64-bi.patch

normalperson (Eric Wong), 04/28/2017 02:21 AM

View differences:

thread.c
4940 4940
		    th->self, th, thread_id_str(th), th->interrupt_flag);
4941 4941
	if (th->locking_mutex) {
4942 4942
	    rb_mutex_t *mutex;
4943
	    struct rb_thread_struct volatile *mth;
4944
	    int waiting;
4945 4943
	    GetMutexPtr(th->locking_mutex, mutex);
4946

  
4947
	    native_mutex_lock(&mutex->lock);
4948
	    mth = mutex->th;
4949
	    waiting = mutex->cond_waiting;
4950
	    native_mutex_unlock(&mutex->lock);
4951
	    rb_str_catf(msg, " mutex:%p cond:%d", mth, waiting);
4944
	    rb_str_catf(msg, " mutex:%p cond:%"PRIuSIZE,
4945
			mutex->th, rb_mutex_num_waiting(mutex));
4952 4946
	}
4953 4947
	{
4954 4948
	    rb_thread_list_t *list = th->join_list;
......
4981 4975
	    rb_mutex_t *mutex;
4982 4976
	    GetMutexPtr(th->locking_mutex, mutex);
4983 4977

  
4984
	    native_mutex_lock(&mutex->lock);
4985
	    if (mutex->th == th || (!mutex->th && mutex->cond_waiting)) {
4978
	    if (mutex->th == th || (!mutex->th && !list_empty(&mutex->waitq))) {
4986 4979
		found = 1;
4987 4980
	    }
4988
	    native_mutex_unlock(&mutex->lock);
4989 4981
	}
4990 4982
	if (found)
4991 4983
	    break;
thread_sync.c
1 1
/* included by thread.c */
2
#include "ccan/list/list.h"
2 3

  
3 4
static VALUE rb_cMutex, rb_cQueue, rb_cSizedQueue, rb_cConditionVariable;
4 5
static VALUE rb_eClosedQueueError;
5 6

  
6 7
/* Mutex */
7 8

  
9
/* mutex_waiter is always on-stack */
10
struct mutex_waiter {
11
    rb_thread_t *th;
12
    struct list_node node;
13
};
14

  
8 15
typedef struct rb_mutex_struct {
9
    rb_nativethread_lock_t lock;
10
    rb_nativethread_cond_t cond;
11 16
    struct rb_thread_struct volatile *th;
12 17
    struct rb_mutex_struct *next_mutex;
13
    int cond_waiting;
18
    struct list_head waitq; /* protected by GVL */
14 19
    int allow_trap;
15 20
} rb_mutex_t;
16 21

  
......
51 56

  
52 57
#define mutex_mark NULL
53 58

  
59
static size_t
60
rb_mutex_num_waiting(rb_mutex_t *mutex)
61
{
62
    struct mutex_waiter *w;
63
    size_t n = 0;
64

  
65
    list_for_each(&mutex->waitq, w, node) {
66
	n++;
67
    }
68

  
69
    return n;
70
}
71

  
54 72
static void
55 73
mutex_free(void *ptr)
56 74
{
......
60 78
	const char *err = rb_mutex_unlock_th(mutex, mutex->th);
61 79
	if (err) rb_bug("%s", err);
62 80
    }
63
    native_mutex_destroy(&mutex->lock);
64
    native_cond_destroy(&mutex->cond);
65 81
    ruby_xfree(ptr);
66 82
}
67 83

  
......
95 111
    rb_mutex_t *mutex;
96 112

  
97 113
    obj = TypedData_Make_Struct(klass, rb_mutex_t, &mutex_data_type, mutex);
98
    native_mutex_initialize(&mutex->lock);
99
    native_cond_initialize(&mutex->cond, RB_CONDATTR_CLOCK_MONOTONIC);
114
    list_head_init(&mutex->waitq);
100 115
    return obj;
101 116
}
102 117

  
......
158 173
    VALUE locked = Qfalse;
159 174
    GetMutexPtr(self, mutex);
160 175

  
161
    native_mutex_lock(&mutex->lock);
162 176
    if (mutex->th == 0) {
163 177
	rb_thread_t *th = GET_THREAD();
164 178
	mutex->th = th;
......
166 180

  
167 181
	mutex_locked(th, self);
168 182
    }
169
    native_mutex_unlock(&mutex->lock);
170 183

  
171 184
    return locked;
172 185
}
173 186

  
174
static int
175
lock_func(rb_thread_t *th, rb_mutex_t *mutex, int timeout_ms)
176
{
177
    int interrupted = 0;
178
    int err = 0;
179

  
180
    mutex->cond_waiting++;
181
    for (;;) {
182
	if (!mutex->th) {
183
	    mutex->th = th;
184
	    break;
185
	}
186
	if (RUBY_VM_INTERRUPTED(th)) {
187
	    interrupted = 1;
188
	    break;
189
	}
190
	if (err == ETIMEDOUT) {
191
	    interrupted = 2;
192
	    break;
193
	}
194

  
195
	if (timeout_ms) {
196
	    struct timespec timeout_rel;
197
	    struct timespec timeout;
198

  
199
	    timeout_rel.tv_sec = 0;
200
	    timeout_rel.tv_nsec = timeout_ms * 1000 * 1000;
201
	    timeout = native_cond_timeout(&mutex->cond, timeout_rel);
202
	    err = native_cond_timedwait(&mutex->cond, &mutex->lock, &timeout);
203
	}
204
	else {
205
	    native_cond_wait(&mutex->cond, &mutex->lock);
206
	    err = 0;
207
	}
208
    }
209
    mutex->cond_waiting--;
210

  
211
    return interrupted;
212
}
213

  
214
static void
215
lock_interrupt(void *ptr)
216
{
217
    rb_mutex_t *mutex = (rb_mutex_t *)ptr;
218
    native_mutex_lock(&mutex->lock);
219
    if (mutex->cond_waiting > 0)
220
	native_cond_broadcast(&mutex->cond);
221
    native_mutex_unlock(&mutex->lock);
222
}
223

  
224 187
/*
225 188
 * At maximum, only one thread can use cond_timedwait and watch deadlock
226 189
 * periodically. Multiple polling thread (i.e. concurrent deadlock check)
......
248 211
    }
249 212

  
250 213
    if (rb_mutex_trylock(self) == Qfalse) {
214
	struct mutex_waiter w;
215

  
251 216
	if (mutex->th == th) {
252 217
	    rb_raise(rb_eThreadError, "deadlock; recursive locking");
253 218
	}
254 219

  
220
	w.th = th;
221

  
255 222
	while (mutex->th != th) {
256
	    int interrupted;
257 223
	    enum rb_thread_status prev_status = th->status;
258
	    volatile int timeout_ms = 0;
259
	    struct rb_unblock_callback oldubf;
224
	    struct timeval *timeout = 0;
225
	    struct timeval tv = { 0, 100000 }; /* 100ms */
260 226

  
261
	    set_unblock_function(th, lock_interrupt, mutex, &oldubf, FALSE);
262 227
	    th->status = THREAD_STOPPED_FOREVER;
263 228
	    th->locking_mutex = self;
264

  
265
	    native_mutex_lock(&mutex->lock);
266 229
	    th->vm->sleeper++;
267 230
	    /*
268
	     * Carefully! while some contended threads are in lock_func(),
231
	     * Carefully! while some contended threads are in native_sleep(),
269 232
	     * vm->sleeper is unstable value. we have to avoid both deadlock
270 233
	     * and busy loop.
271 234
	     */
272 235
	    if ((vm_living_thread_num(th->vm) == th->vm->sleeper) &&
273 236
		!patrol_thread) {
274
		timeout_ms = 100;
237
		timeout = &tv;
275 238
		patrol_thread = th;
276 239
	    }
277 240

  
278
	    GVL_UNLOCK_BEGIN();
279
	    interrupted = lock_func(th, mutex, (int)timeout_ms);
280
	    native_mutex_unlock(&mutex->lock);
281
	    GVL_UNLOCK_END();
241
	    list_add_tail(&mutex->waitq, &w.node);
242
	    native_sleep(th, timeout); /* release GVL */
243
	    list_del(&w.node);
244
	    if (!mutex->th) {
245
		mutex->th = th;
246
	    }
282 247

  
283 248
	    if (patrol_thread == th)
284 249
		patrol_thread = NULL;
285 250

  
286
	    reset_unblock_function(th, &oldubf);
287

  
288 251
	    th->locking_mutex = Qfalse;
289
	    if (mutex->th && interrupted == 2) {
252
	    if (mutex->th && timeout && !RUBY_VM_INTERRUPTED(th)) {
290 253
		rb_check_deadlock(th->vm);
291 254
	    }
292 255
	    if (th->status == THREAD_STOPPED_FOREVER) {
......
296 259

  
297 260
	    if (mutex->th == th) mutex_locked(th, self);
298 261

  
299
	    if (interrupted) {
300
		RUBY_VM_CHECK_INTS_BLOCKING(th);
301
	    }
262
	    RUBY_VM_CHECK_INTS_BLOCKING(th);
302 263
	}
303 264
    }
304 265
    return self;
......
330 291
{
331 292
    const char *err = NULL;
332 293

  
333
    native_mutex_lock(&mutex->lock);
334

  
335 294
    if (mutex->th == 0) {
336 295
	err = "Attempt to unlock a mutex which is not locked";
337 296
    }
338 297
    else if (mutex->th != th) {
339 298
	err = "Attempt to unlock a mutex which is locked by another thread";
340
    }
341
    else {
342
	mutex->th = 0;
343
	if (mutex->cond_waiting > 0)
344
	    native_cond_signal(&mutex->cond);
345
    }
346

  
347
    native_mutex_unlock(&mutex->lock);
348

  
349
    if (!err) {
299
    } else {
300
	struct mutex_waiter *cur = 0, *next = 0;
350 301
	rb_mutex_t *volatile *th_mutex = &th->keeping_mutexes;
302

  
303
	mutex->th = 0;
304
	list_for_each_safe(&mutex->waitq, cur, next, node) {
305
	    list_del_init(&cur->node);
306
	    switch (cur->th->state) {
307
	    case THREAD_KILLED:
308
		continue;
309
	    case THREAD_STOPPED:
310
	    case THREAD_RUNNABLE:
311
	    case THREAD_STOPPED_FOREVER:
312
		rb_threadptr_interrupt(cur->th);
313
		goto found;
314
	    }
315
	}
316
found:
351 317
	while (*th_mutex != mutex) {
352 318
	    th_mutex = &(*th_mutex)->next_mutex;
353 319
	}
......
411 377
	mutexes = mutex->next_mutex;
412 378
	mutex->th = 0;
413 379
	mutex->next_mutex = 0;
380
	list_head_init(&mutex->waitq);
414 381
    }
415 382
}
416 383
#endif
417
-