@@ -87,7 +87,7 @@ __msabi extern typeof(VirtualProtectEx) *const __imp_VirtualProtectEx;
87
87
__msabi extern typeof (VirtualQuery ) * const __imp_VirtualQuery ;
88
88
__msabi extern typeof (WriteFile ) * const __imp_WriteFile ;
89
89
90
- extern pthread_mutex_t __sig_worker_lock ;
90
+ atomic_int __sig_worker_state ;
91
91
92
92
textwindows static bool __sig_ignored_by_default (int sig ) {
93
93
return sig == SIGURG || //
@@ -742,74 +742,77 @@ HAIRY static uint32_t __sig_worker(void *arg) {
742
742
STKSZ , PROT_READ | PROT_WRITE ,
743
743
MAP_PRIVATE | MAP_ANONYMOUS | MAP_NOFORK );
744
744
for (;;) {
745
- _pthread_mutex_lock (& __sig_worker_lock );
746
-
747
- // dequeue all pending signals and fire them off. if there's no
748
- // thread that can handle them then __sig_generate will requeue
749
- // those signals back to __sig.process; hence the need for xchg
750
- unsigned long sigs =
751
- atomic_exchange_explicit (__sig .process , 0 , memory_order_acq_rel );
752
- while (sigs ) {
753
- int sig = bsfl (sigs ) + 1 ;
754
- sigs &= ~(1ull << (sig - 1 ));
755
- __sig_generate (sig , SI_KERNEL );
756
- }
757
-
758
- // unblock stalled i/o signals in threads
759
- _pthread_lock ();
760
- for (struct Dll * e = dll_first (_pthread_list ); e ;
761
- e = dll_next (_pthread_list , e )) {
762
- struct PosixThread * pt = POSIXTHREAD_CONTAINER (e );
763
- if (atomic_load_explicit (& pt -> pt_status , memory_order_acquire ) >=
764
- kPosixThreadTerminated )
765
- break ;
766
- if (atomic_load_explicit (& pt -> pt_blocker , memory_order_acquire ) &&
767
- (atomic_load_explicit (& pt -> tib -> tib_sigpending ,
768
- memory_order_acquire ) &
769
- ~atomic_load_explicit (& pt -> pt_blkmask , memory_order_acquire )))
770
- __sig_wake (pt , 0 );
771
- }
772
- _pthread_unlock ();
745
+ // ok sys_execve_nt() might disable this worker
746
+ if (~__sig_worker_state & 2 ) {
747
+
748
+ // dequeue all pending signals and fire them off. if there's no
749
+ // thread that can handle them then __sig_generate will requeue
750
+ // those signals back to __sig.process; hence the need for xchg
751
+ unsigned long sigs =
752
+ atomic_exchange_explicit (__sig .process , 0 , memory_order_acq_rel );
753
+ while (sigs ) {
754
+ int sig = bsfl (sigs ) + 1 ;
755
+ sigs &= ~(1ull << (sig - 1 ));
756
+ __sig_generate (sig , SI_KERNEL );
757
+ }
773
758
774
- // unblock stalled asynchronous signals in threads
775
- for (;;) {
776
- sigset_t pending , mask ;
777
- struct PosixThread * mark = 0 ;
759
+ // unblock stalled i/o signals in threads
778
760
_pthread_lock ();
779
761
for (struct Dll * e = dll_first (_pthread_list ); e ;
780
762
e = dll_next (_pthread_list , e )) {
781
763
struct PosixThread * pt = POSIXTHREAD_CONTAINER (e );
782
764
if (atomic_load_explicit (& pt -> pt_status , memory_order_acquire ) >=
783
765
kPosixThreadTerminated )
784
766
break ;
785
- pending = atomic_load_explicit (& pt -> tib -> tib_sigpending ,
786
- memory_order_acquire );
787
- mask =
788
- atomic_load_explicit (& pt -> tib -> tib_sigmask , memory_order_acquire );
789
- if (pending & ~mask ) {
790
- _pthread_ref (pt );
791
- mark = pt ;
792
- break ;
793
- }
767
+ if (atomic_load_explicit (& pt -> pt_blocker , memory_order_acquire ) &&
768
+ (atomic_load_explicit (& pt -> tib -> tib_sigpending ,
769
+ memory_order_acquire ) &
770
+ ~atomic_load_explicit (& pt -> pt_blkmask , memory_order_acquire )))
771
+ __sig_wake (pt , 0 );
794
772
}
795
773
_pthread_unlock ();
796
- if (!mark )
797
- break ;
798
- while (!atomic_compare_exchange_weak_explicit (
799
- & mark -> tib -> tib_sigpending , & pending , pending & ~mask ,
800
- memory_order_acq_rel , memory_order_relaxed )) {
801
- }
802
- while ((pending = pending & ~mask )) {
803
- int sig = bsfl (pending ) + 1 ;
804
- pending &= ~(1ull << (sig - 1 ));
805
- __sig_killer (mark , sig , SI_KERNEL );
774
+
775
+ // unblock stalled asynchronous signals in threads
776
+ for (;;) {
777
+ sigset_t pending , mask ;
778
+ struct PosixThread * mark = 0 ;
779
+ _pthread_lock ();
780
+ for (struct Dll * e = dll_first (_pthread_list ); e ;
781
+ e = dll_next (_pthread_list , e )) {
782
+ struct PosixThread * pt = POSIXTHREAD_CONTAINER (e );
783
+ if (atomic_load_explicit (& pt -> pt_status , memory_order_acquire ) >=
784
+ kPosixThreadTerminated )
785
+ break ;
786
+ pending = atomic_load_explicit (& pt -> tib -> tib_sigpending ,
787
+ memory_order_acquire );
788
+ mask =
789
+ atomic_load_explicit (& pt -> tib -> tib_sigmask , memory_order_acquire );
790
+ if (pending & ~mask ) {
791
+ _pthread_ref (pt );
792
+ mark = pt ;
793
+ break ;
794
+ }
795
+ }
796
+ _pthread_unlock ();
797
+ if (!mark )
798
+ break ;
799
+ while (!atomic_compare_exchange_weak_explicit (
800
+ & mark -> tib -> tib_sigpending , & pending , pending & ~mask ,
801
+ memory_order_acq_rel , memory_order_relaxed )) {
802
+ }
803
+ while ((pending = pending & ~mask )) {
804
+ int sig = bsfl (pending ) + 1 ;
805
+ pending &= ~(1ull << (sig - 1 ));
806
+ __sig_killer (mark , sig , SI_KERNEL );
807
+ }
808
+ _pthread_unref (mark );
806
809
}
807
- _pthread_unref (mark );
808
810
}
809
811
810
812
// wait until next scheduler quantum
811
- _pthread_mutex_unlock ( & __sig_worker_lock ) ;
813
+ __sig_worker_state |= 1 ;
812
814
Sleep (POLL_INTERVAL_MS );
815
+ __sig_worker_state &= ~1 ;
813
816
}
814
817
__builtin_unreachable ();
815
818
}
0 commit comments