0001-mutex-fix-silly-last_thread-handling.patch

Motohiro KOSAKI, 04/25/2011 12:16 AM

Download (3 KB)

View differences:

thread.c
3208 3208
    return locked;
3209 3209
}
3210 3210

  
3211
static struct timespec init_lock_timeout(int timeout_ms)
3212
{
3213
	struct timespec ts;
3214
	struct timeval tv;
3215
	int ret;
3216

  
3217
	ret = gettimeofday(&tv, NULL);
3218
	if (ret < 0)
3219
	    rb_sys_fail(0);
3220

  
3221
	ts.tv_sec = tv.tv_sec;
3222
	ts.tv_nsec = tv.tv_usec * 1000 + timeout_ms * 1000 * 1000;
3223
	if (ts.tv_nsec >= 1000000000) {
3224
	    ts.tv_sec++;
3225
	    ts.tv_nsec -= 1000000000;
3226
	}
3227

  
3228
	return ts;
3229
}
3230

  
3211 3231
static int
3212
lock_func(rb_thread_t *th, mutex_t *mutex, int last_thread)
3232
lock_func(rb_thread_t *th, mutex_t *mutex, int timeout_ms)
3213 3233
{
3214 3234
    int interrupted = 0;
3215
#if 0 /* for debug */
3216
    native_thread_yield();
3217
#endif
3218 3235

  
3219 3236
    native_mutex_lock(&mutex->lock);
3220 3237
    th->transition_for_lock = 0;
3221 3238
    while (mutex->th || (mutex->th = th, 0)) {
3222
	if (last_thread) {
3223
	    interrupted = 2;
3224
	    break;
3225
	}
3239
	struct timespec ts;
3240
	int ret;
3226 3241

  
3227 3242
	mutex->cond_waiting++;
3228
	native_cond_wait(&mutex->cond, &mutex->lock);
3243
	if (timeout_ms) {
3244
	    ts = init_lock_timeout(timeout_ms);
3245
	    ret = native_cond_timedwait(&mutex->cond, &mutex->lock, &ts);
3246
	    if (ret == ETIMEDOUT) {
3247
		interrupted = 2;
3248
		break;
3249
	    }
3250
	}
3251
	else {
3252
	    native_cond_wait(&mutex->cond, &mutex->lock);
3253
	}
3229 3254
	mutex->cond_notified--;
3230 3255

  
3231 3256
	if (RUBY_VM_INTERRUPTED(th)) {
......
3236 3261
    th->transition_for_lock = 1;
3237 3262
    native_mutex_unlock(&mutex->lock);
3238 3263

  
3239
    if (interrupted == 2) native_thread_yield();
3240
#if 0 /* for debug */
3241
    native_thread_yield();
3242
#endif
3243

  
3244 3264
    return interrupted;
3245 3265
}
3246 3266

  
......
3280 3300
	while (mutex->th != th) {
3281 3301
	    int interrupted;
3282 3302
	    enum rb_thread_status prev_status = th->status;
3283
	    int last_thread = 0;
3303
	    int timeout_ms = 0;
3284 3304
	    struct rb_unblock_callback oldubf;
3285 3305

  
3286 3306
	    set_unblock_function(th, lock_interrupt, mutex, &oldubf);
3287 3307
	    th->status = THREAD_STOPPED_FOREVER;
3288 3308
	    th->vm->sleeper++;
3289 3309
	    th->locking_mutex = self;
3310

  
3311
	    /*
3312
	     * Carefully! while some contended threads are in lock_fun(),
3313
	     * vm->sleepr is unstable value. we have to avoid both deadlock
3314
	     * and busy loop.
3315
	     */
3290 3316
	    if (vm_living_thread_num(th->vm) == th->vm->sleeper) {
3291
		last_thread = 1;
3317
		timeout_ms = 100;
3292 3318
	    }
3293 3319

  
3294 3320
	    th->transition_for_lock = 1;
3295 3321
	    BLOCKING_REGION_CORE({
3296
		interrupted = lock_func(th, mutex, last_thread);
3322
		interrupted = lock_func(th, mutex, timeout_ms);
3297 3323
	    });
3298 3324
	    th->transition_for_lock = 0;
3299 3325
	    remove_signal_thread_list(th);
3300
-