Project

General

Profile

Bug #14867 ยป 0001-hijack-SIGCHLD-handler-for-internal-use.patch

normalperson (Eric Wong), 06/23/2018 04:02 PM

View differences:

mjit.c
80 80
#include "constant.h"
81 81
#include "id_table.h"
82 82
#include "ruby_assert.h"
83
#include "ruby/thread.h"
83 84
#include "ruby/util.h"
84 85
#include "ruby/version.h"
85 86

  
......
109 110

  
110 111
extern int rb_thread_create_mjit_thread(void (*child_hook)(void), void (*worker_func)(void));
111 112

  
113

  
114
pid_t ruby_waitpid_locked(rb_vm_t *, rb_pid_t, int *status, int options);
115

  
112 116
#define RB_CONDATTR_CLOCK_MONOTONIC 1
113 117

  
114 118
#ifdef _WIN32
......
380 384
{
381 385
    int stat, exit_code;
382 386
    pid_t pid;
387
    rb_vm_t *vm = GET_VM();
383 388

  
389
    rb_nativethread_lock_lock(&vm->waitpid_lock);
384 390
    pid = start_process(path, argv);
385
    if (pid <= 0)
391
    if (pid <= 0) {
392
        rb_nativethread_lock_unlock(&vm->waitpid_lock);
386 393
        return -2;
387

  
394
    }
388 395
    for (;;) {
389
        waitpid(pid, &stat, 0);
390
        if (WIFEXITED(stat)) {
391
            exit_code = WEXITSTATUS(stat);
392
            break;
393
        } else if (WIFSIGNALED(stat)) {
394
            exit_code = -1;
396
        pid_t r = ruby_waitpid_locked(vm, pid, &stat, 0);
397
        if (r == -1) {
398
            if (errno == EINTR) continue;
399
            fprintf(stderr, "waitpid: %s\n", strerror(errno));
395 400
            break;
396 401
        }
402
        else if (r == pid) {
403
            if (WIFEXITED(stat)) {
404
                exit_code = WEXITSTATUS(stat);
405
                break;
406
            } else if (WIFSIGNALED(stat)) {
407
                exit_code = -1;
408
                break;
409
            }
410
        }
397 411
    }
412
    rb_nativethread_lock_unlock(&vm->waitpid_lock);
398 413
    return exit_code;
399 414
}
400 415

  
......
1474 1489
        CRITICAL_SECTION_START(3, "in stop_worker");
1475 1490
        rb_native_cond_broadcast(&mjit_worker_wakeup);
1476 1491
        CRITICAL_SECTION_FINISH(3, "in stop_worker");
1492
        RUBY_VM_CHECK_INTS(GET_EC());
1477 1493
    }
1478 1494
}
1479 1495

  
......
1509 1525
    return Qtrue;
1510 1526
}
1511 1527

  
1528
static void *
1529
wait_pch(void *ignored)
1530
{
1531
    rb_native_cond_wait(&mjit_pch_wakeup, &mjit_engine_mutex);
1532
    return 0;
1533
}
1534

  
1535
static void
1536
ubf_pch(void *ignored)
1537
{
1538
    rb_native_mutex_lock(&mjit_engine_mutex);
1539
    rb_native_cond_signal(&mjit_pch_wakeup);
1540
    rb_native_mutex_unlock(&mjit_engine_mutex);
1541
}
1542

  
1512 1543
/* Finish the threads processing units and creating PCH, finalize
1513 1544
   and free MJIT data.  It should be called last during MJIT
1514 1545
   life.  */
......
1528 1559
       absence.  So wait for a clean finish of the threads.  */
1529 1560
    while (pch_status == PCH_NOT_READY) {
1530 1561
        verbose(3, "Waiting wakeup from make_pch");
1531
        rb_native_cond_wait(&mjit_pch_wakeup, &mjit_engine_mutex);
1562
	/* release GVL to handle interrupts */
1563
        rb_thread_call_without_gvl(wait_pch, 0, ubf_pch, 0);
1532 1564
    }
1533 1565
    CRITICAL_SECTION_FINISH(3, "in mjit_finish to wakeup from pch");
1534 1566

  
process.c
885 885
#endif
886 886
}
887 887

  
888
struct waitpid_arg {
889
    rb_pid_t pid;
890
    int flags;
891
    int *st;
892
};
893

  
894 888
static rb_pid_t
895 889
do_waitpid(rb_pid_t pid, int *st, int flags)
896 890
{
......
903 897
#endif
904 898
}
905 899

  
900
struct waitpid_state {
901
    struct list_node wnode;
902
    rb_nativethread_cond_t cond;
903
    rb_pid_t ret;
904
    rb_pid_t pid;
905
    int status;
906
    int options;
907
    int errnum;
908
    rb_vm_t *vm;
909
};
910

  
911
void rb_native_cond_signal(rb_nativethread_cond_t *);
912
void rb_native_cond_wait(rb_nativethread_cond_t *, rb_nativethread_lock_t *);
913
void rb_native_cond_initialize(rb_nativethread_cond_t *);
914
void rb_native_cond_destroy(rb_nativethread_cond_t *);
915

  
916
/* only called by vm->main_thread */
917
void
918
rb_sigchld(rb_vm_t *vm)
919
{
920
    struct waitpid_state *w = 0, *next;
921

  
922
    rb_nativethread_lock_lock(&vm->waitpid_lock);
923
    list_for_each_safe(&vm->waiting_pids, w, next, wnode) {
924
        w->ret = do_waitpid(w->pid, &w->status, w->options | WNOHANG);
925
        if (w->ret == 0) continue;
926
        if (w->ret == -1) w->errnum = errno;
927
        list_del_init(&w->wnode);
928
        rb_native_cond_signal(&w->cond);
929
    }
930
    rb_nativethread_lock_unlock(&vm->waitpid_lock);
931
}
932

  
933
static void
934
waitpid_state_init(struct waitpid_state *w, rb_vm_t *vm, pid_t pid, int options)
935
{
936
    rb_native_cond_initialize(&w->cond);
937
    w->ret = 0;
938
    w->pid = pid;
939
    w->status = 0;
940
    w->options = options;
941
    w->vm = vm;
942
    list_node_init(&w->wnode);
943
}
944

  
945
/* must be called with vm->waitpid_lock held, this is not interruptible */
946
pid_t
947
ruby_waitpid_locked(rb_vm_t *vm, rb_pid_t pid, int *status, int options)
948
{
949
    struct waitpid_state w;
950

  
951
    assert(!ruby_thread_has_gvl_p() && "must not have GVL");
952

  
953
    waitpid_state_init(&w, vm, pid, options);
954
    w.ret = do_waitpid(w.pid, &w.status, w.options | WNOHANG);
955
    if (w.ret) {
956
        if (w.ret == -1) {
957
            w.errnum = errno;
958
        }
959
    }
960
    else {
961
        list_add(&vm->waiting_pids, &w.wnode);
962
        while (!w.ret) {
963
            rb_native_cond_wait(&w.cond, &vm->waitpid_lock);
964
        }
965
        list_del(&w.wnode);
966
    }
967
    if (status) {
968
        *status = w.status;
969
    }
970
    rb_native_cond_destroy(&w.cond);
971
    errno = w.errnum;
972
    return w.ret;
973
}
974

  
975
static void
976
waitpid_ubf(void *x)
977
{
978
    struct waitpid_state *w = x;
979
    rb_nativethread_lock_lock(&w->vm->waitpid_lock);
980
    if (!w->ret) {
981
        w->errnum = EINTR;
982
        w->ret = -1;
983
    }
984
    rb_native_cond_signal(&w->cond);
985
    rb_nativethread_lock_unlock(&w->vm->waitpid_lock);
986
}
987

  
906 988
static void *
907
rb_waitpid_blocking(void *data)
989
waitpid_nogvl(void *x)
908 990
{
909
    struct waitpid_arg *arg = data;
910
    rb_pid_t result = do_waitpid(arg->pid, arg->st, arg->flags);
911
    return (void *)(VALUE)result;
991
    struct waitpid_state *w = x;
992

  
993
    /* let rb_sigchld handle it */
994
    rb_native_cond_wait(&w->cond, &w->vm->waitpid_lock);
995

  
996
    return 0;
912 997
}
913 998

  
914
static rb_pid_t
915
do_waitpid_nonblocking(rb_pid_t pid, int *st, int flags)
999
static VALUE
1000
waitpid_wait(VALUE x)
1001
{
1002
    struct waitpid_state *w = (struct waitpid_state *)x;
1003

  
1004
    rb_nativethread_lock_lock(&w->vm->waitpid_lock);
1005
    w->ret = do_waitpid(w->pid, &w->status, w->options | WNOHANG);
1006
    if (w->ret) {
1007
        if (w->ret == -1) {
1008
            w->errnum = errno;
1009
        }
1010
    }
1011
    else {
1012
        rb_execution_context_t *ec = GET_EC();
1013

  
1014
        list_add(&w->vm->waiting_pids, &w->wnode);
1015
        do {
1016
            rb_thread_call_without_gvl2(waitpid_nogvl, w, waitpid_ubf, w);
1017
            if (RUBY_VM_INTERRUPTED_ANY(ec) ||
1018
                    (w->ret == -1 && w->errnum == EINTR)) {
1019
                rb_nativethread_lock_unlock(&w->vm->waitpid_lock);
1020

  
1021
                RUBY_VM_CHECK_INTS(ec);
1022

  
1023
                rb_nativethread_lock_lock(&w->vm->waitpid_lock);
1024
                if (w->ret == -1 && w->errnum == EINTR) {
1025
                    w->ret = do_waitpid(w->pid, &w->status, w->options|WNOHANG);
1026
                    if (w->ret == -1)
1027
                        w->errnum = errno;
1028
                }
1029
            }
1030
        } while (!w->ret);
1031
    }
1032
    rb_nativethread_lock_unlock(&w->vm->waitpid_lock);
1033
    return Qfalse;
1034
}
1035

  
1036
static VALUE
1037
waitpid_ensure(VALUE x)
916 1038
{
917
    void *result;
918
    struct waitpid_arg arg;
919
    arg.pid = pid;
920
    arg.st = st;
921
    arg.flags = flags;
922
    result = rb_thread_call_without_gvl(rb_waitpid_blocking, &arg,
923
					RUBY_UBF_PROCESS, 0);
924
    return (rb_pid_t)(VALUE)result;
1039
    struct waitpid_state *w = (struct waitpid_state *)x;
1040

  
1041
    if (w->ret <= 0) {
1042
        rb_nativethread_lock_lock(&w->vm->waitpid_lock);
1043
        list_del_init(&w->wnode);
1044
        rb_nativethread_lock_unlock(&w->vm->waitpid_lock);
1045
    }
1046

  
1047
    rb_native_cond_destroy(&w->cond);
1048
    return Qfalse;
925 1049
}
926 1050

  
927 1051
rb_pid_t
......
933 1057
	result = do_waitpid(pid, st, flags);
934 1058
    }
935 1059
    else {
936
	while ((result = do_waitpid_nonblocking(pid, st, flags)) < 0 &&
937
	       (errno == EINTR)) {
938
	    RUBY_VM_CHECK_INTS(GET_EC());
939
	}
1060
        struct waitpid_state w;
1061

  
1062
        waitpid_state_init(&w, GET_VM(), pid, flags);
1063
        rb_ensure(waitpid_wait, (VALUE)&w, waitpid_ensure, (VALUE)&w);
1064
        if (st) {
1065
            *st = w.status;
1066
        }
1067
        result = w.ret;
940 1068
    }
941 1069
    if (result > 0) {
942 1070
	rb_last_status_set(*st, result);
......
4081 4209
    VALUE execarg_obj;
4082 4210
    struct rb_execarg *eargp;
4083 4211

  
4084
#if defined(SIGCLD) && !defined(SIGCHLD)
4085
# define SIGCHLD SIGCLD
4086
#endif
4087

  
4088
#ifdef SIGCHLD
4089
    RETSIGTYPE (*chfunc)(int);
4090

  
4091
    rb_last_status_clear();
4092
    chfunc = signal(SIGCHLD, SIG_DFL);
4093
#endif
4094 4212
    execarg_obj = rb_execarg_new(argc, argv, TRUE, TRUE);
4095 4213
    pid = rb_execarg_spawn(execarg_obj, NULL, 0);
4096 4214
#if defined(HAVE_WORKING_FORK) || defined(HAVE_SPAWNV)
......
4100 4218
        if (ret == (rb_pid_t)-1)
4101 4219
            rb_sys_fail("Another thread waited the process started by system().");
4102 4220
    }
4103
#endif
4104
#ifdef SIGCHLD
4105
    signal(SIGCHLD, chfunc);
4106 4221
#endif
4107 4222
    TypedData_Get_Struct(execarg_obj, struct rb_execarg, &exec_arg_data_type, eargp);
4108 4223
    if (pid < 0) {
signal.c
1052 1052
    }
1053 1053
}
1054 1054

  
1055
static int
1056
sig_is_chld(int sig)
1057
{
1058
#if defined(SIGCLD)
1059
    return (sig == SIGCLD);
1060
#elif defined(SIGCHLD)
1061
    return (sig == SIGCHLD);
1062
#endif
1063
    return 0;
1064
}
1065

  
1066
void rb_sigchld(rb_vm_t *); /* process.c */
1067

  
1055 1068
void
1056 1069
rb_signal_exec(rb_thread_t *th, int sig)
1057 1070
{
......
1059 1072
    VALUE cmd = vm->trap_list.cmd[sig];
1060 1073
    int safe = vm->trap_list.safe[sig];
1061 1074

  
1075
    if (sig_is_chld(sig)) {
1076
	rb_sigchld(vm);
1077
    }
1062 1078
    if (cmd == 0) {
1063 1079
	switch (sig) {
1064 1080
	  case SIGINT:
......
1117 1133
#endif
1118 1134
#ifdef SIGUSR2
1119 1135
      case SIGUSR2:
1136
#endif
1137
#ifdef SIGCLD
1138
      case SIGCLD:
1139
#elif defined(SIGCHLD)
1140
      case SIGCHLD:
1120 1141
#endif
1121 1142
        func = sighandler;
1122 1143
        break;
......
1155 1176
    VALUE command;
1156 1177

  
1157 1178
    if (NIL_P(*cmd)) {
1179
	if (sig_is_chld(sig)) {
1180
	    goto sig_dfl;
1181
	}
1158 1182
	func = SIG_IGN;
1159 1183
    }
1160 1184
    else {
......
1175 1199
		break;
1176 1200
              case 14:
1177 1201
		if (memcmp(cptr, "SYSTEM_DEFAULT", 14) == 0) {
1202
		    if (sig_is_chld(sig)) {
1203
			goto sig_dfl;
1204
		    }
1178 1205
                    func = SIG_DFL;
1179 1206
                    *cmd = 0;
1180 1207
		}
......
1182 1209
	      case 7:
1183 1210
		if (memcmp(cptr, "SIG_IGN", 7) == 0) {
1184 1211
sig_ign:
1212
		    if (sig_is_chld(sig)) {
1213
			goto sig_dfl;
1214
		    }
1185 1215
                    func = SIG_IGN;
1186 1216
                    *cmd = Qtrue;
1187 1217
		}
......
1418 1448
init_sigchld(int sig)
1419 1449
{
1420 1450
    sighandler_t oldfunc;
1451
    sighandler_t func = sighandler;
1421 1452

  
1422 1453
    oldfunc = ruby_signal(sig, SIG_DFL);
1423 1454
    if (oldfunc == SIG_ERR) return -1;
1424
    if (oldfunc != SIG_DFL && oldfunc != SIG_IGN) {
1425
	ruby_signal(sig, oldfunc);
1426
    }
1427
    else {
1428
	GET_VM()->trap_list.cmd[sig] = 0;
1429
    }
1455
    ruby_signal(sig, func);
1456
    GET_VM()->trap_list.cmd[sig] = 0;
1457

  
1430 1458
    return 0;
1431 1459
}
1432 1460

  
thread.c
413 413
    gvl_release(vm);
414 414
    gvl_destroy(vm);
415 415
    rb_native_mutex_destroy(&vm->thread_destruct_lock);
416
    rb_native_mutex_destroy(&vm->waitpid_lock);
416 417
}
417 418

  
418 419
void
......
4999 5000
	    gvl_init(th->vm);
5000 5001
	    gvl_acquire(th->vm, th);
5001 5002
            rb_native_mutex_initialize(&th->vm->thread_destruct_lock);
5003
            rb_native_mutex_initialize(&th->vm->waitpid_lock);
5002 5004
            rb_native_mutex_initialize(&th->interrupt_lock);
5003 5005

  
5004 5006
	    th->pending_interrupt_queue = rb_ary_tmp_new(0);
vm_core.h
553 553
#endif
554 554

  
555 555
    rb_serial_t fork_gen;
556
    rb_nativethread_lock_t waitpid_lock;
557
    struct list_head waiting_pids; /* <=> struct waitpid_state */
556 558
    struct list_head waiting_fds; /* <=> struct waiting_fd */
557 559
    struct list_head living_threads;
558 560
    VALUE thgroup_default;
......
1561 1563
rb_vm_living_threads_init(rb_vm_t *vm)
1562 1564
{
1563 1565
    list_head_init(&vm->waiting_fds);
1566
    list_head_init(&vm->waiting_pids);
1564 1567
    list_head_init(&vm->living_threads);
1565 1568
    vm->living_thread_num = 0;
1566 1569
}
1567
-