@@ -136,21 +136,22 @@ static textwindows wontreturn void __sig_terminate(int sig) {
136
136
TerminateThisProcess (sig );
137
137
}
138
138
139
- textwindows static void __sig_wake (struct PosixThread * pt , int sig ) {
139
+ textwindows static bool __sig_wake (struct PosixThread * pt , int sig ) {
140
140
atomic_int * blocker ;
141
141
blocker = atomic_load_explicit (& pt -> pt_blocker , memory_order_acquire );
142
142
if (!blocker )
143
- return ;
143
+ return false ;
144
144
// threads can create semaphores on an as-needed basis
145
145
if (blocker == PT_BLOCKER_EVENT ) {
146
146
STRACE ("%G set %d's event object" , sig , _pthread_tid (pt ));
147
147
SetEvent (pt -> pt_event );
148
- return ;
148
+ return !! atomic_load_explicit ( & pt -> pt_blocker , memory_order_acquire ) ;
149
149
}
150
150
// all other blocking ops that aren't overlap should use futexes
151
151
// we force restartable futexes to churn by waking w/o releasing
152
152
STRACE ("%G waking %d's futex" , sig , _pthread_tid (pt ));
153
153
WakeByAddressSingle (blocker );
154
+ return !!atomic_load_explicit (& pt -> pt_blocker , memory_order_acquire );
154
155
}
155
156
156
157
textwindows static bool __sig_start (struct PosixThread * pt , int sig ,
@@ -302,17 +303,48 @@ static textwindows int __sig_killer(struct PosixThread *pt, int sig, int sic) {
302
303
return 0 ;
303
304
}
304
305
305
- // we can't preempt threads that masked sigs or are blocked. we also
306
- // need to ensure we don't overflow the target thread's stack if many
307
- // signals need to be delivered at once. we also need to make sure two
308
- // threads can't deadlock by killing each other at the same time.
309
- if ((atomic_load_explicit (& pt -> tib -> tib_sigmask , memory_order_acquire ) &
310
- (1ull << (sig - 1 ))) ||
311
- atomic_exchange_explicit (& pt -> pt_intoff , 1 , memory_order_acquire )) {
312
- atomic_fetch_or_explicit (& pt -> tib -> tib_sigpending , 1ull << (sig - 1 ),
313
- memory_order_relaxed );
314
- __sig_wake (pt , sig );
315
- return 0 ;
306
+ // we can't preempt threads that masked sigs or are blocked on i/o
307
+ while ((atomic_load_explicit (& pt -> tib -> tib_sigmask , memory_order_acquire ) &
308
+ (1ull << (sig - 1 )))) {
309
+ if (atomic_fetch_or_explicit (& pt -> tib -> tib_sigpending , 1ull << (sig - 1 ),
310
+ memory_order_acq_rel ) &
311
+ (1ull << (sig - 1 )))
312
+ // we believe signal was already enqueued
313
+ return 0 ;
314
+ if (__sig_wake (pt , sig ))
315
+ // we believe i/o routine will handle signal
316
+ return 0 ;
317
+ if (atomic_load_explicit (& pt -> tib -> tib_sigmask , memory_order_acquire ) &
318
+ (1ull << (sig - 1 )))
319
+ // we believe ALLOW_SIGNALS will handle signal
320
+ return 0 ;
321
+ if (!(atomic_fetch_and_explicit (& pt -> tib -> tib_sigpending ,
322
+ ~(1ull << (sig - 1 )),
323
+ memory_order_acq_rel ) &
324
+ (1ull << (sig - 1 ))))
325
+ // we believe another thread sniped our signal
326
+ return 0 ;
327
+ break ;
328
+ }
329
+
330
+ // avoid race conditions and deadlocks with thread suspend process
331
+ if (atomic_exchange_explicit (& pt -> pt_intoff , 1 , memory_order_acquire )) {
332
+ // we believe another thread is asynchronously waking the mark
333
+ if (atomic_fetch_or_explicit (& pt -> tib -> tib_sigpending , 1ull << (sig - 1 ),
334
+ memory_order_acq_rel ) &
335
+ (1ull << (sig - 1 )))
336
+ // we believe our signal is already being delivered
337
+ return 0 ;
338
+ if (atomic_load_explicit (& pt -> pt_intoff , memory_order_acquire ) ||
339
+ atomic_exchange_explicit (& pt -> pt_intoff , 1 , memory_order_acquire ))
340
+ // we believe __sig_tramp will deliver our signal
341
+ return 0 ;
342
+ if (!(atomic_fetch_and_explicit (& pt -> tib -> tib_sigpending ,
343
+ ~(1ull << (sig - 1 )),
344
+ memory_order_acq_rel ) &
345
+ (1ull << (sig - 1 ))))
346
+ // we believe another thread sniped our signal
347
+ return 0 ;
316
348
}
317
349
318
350
// if there's no handler then killing a thread kills the process
@@ -321,17 +353,10 @@ static textwindows int __sig_killer(struct PosixThread *pt, int sig, int sic) {
321
353
__sig_terminate (sig );
322
354
}
323
355
324
- // ignore signals already pending
325
- uintptr_t th = _pthread_syshand (pt );
326
- if (atomic_load_explicit (& pt -> tib -> tib_sigpending , memory_order_acquire ) &
327
- (1ull << (sig - 1 ))) {
328
- atomic_store_explicit (& pt -> pt_intoff , 0 , memory_order_release );
329
- return 0 ;
330
- }
331
-
332
356
// take control of thread
333
357
// suspending the thread happens asynchronously
334
358
// however getting the context blocks until it's frozen
359
+ uintptr_t th = _pthread_syshand (pt );
335
360
if (SuspendThread (th ) == -1u ) {
336
361
STRACE ("SuspendThread failed w/ %d" , GetLastError ());
337
362
atomic_store_explicit (& pt -> pt_intoff , 0 , memory_order_release );
@@ -349,9 +374,7 @@ static textwindows int __sig_killer(struct PosixThread *pt, int sig, int sic) {
349
374
// we can't preempt threads that masked sig or are blocked
350
375
// we can't preempt threads that are running in win32 code
351
376
// so we shall unblock the thread and let it signal itself
352
- if ((atomic_load_explicit (& pt -> tib -> tib_sigmask , memory_order_acquire ) &
353
- (1ull << (sig - 1 ))) ||
354
- !((uintptr_t )__executable_start <= nc .Rip &&
377
+ if (!((uintptr_t )__executable_start <= nc .Rip &&
355
378
nc .Rip < (uintptr_t )__privileged_start )) {
356
379
atomic_fetch_or_explicit (& pt -> tib -> tib_sigpending , 1ull << (sig - 1 ),
357
380
memory_order_relaxed );
@@ -634,6 +657,7 @@ textwindows dontinstrument static uint32_t __sig_worker(void *arg) {
634
657
__maps_track ((char * )(((uintptr_t )sp + __pagesize - 1 ) & - __pagesize ) - STKSZ ,
635
658
STKSZ );
636
659
for (;;) {
660
+
637
661
// dequeue all pending signals and fire them off. if there's no
638
662
// thread that can handle them then __sig_generate will requeue
639
663
// those signals back to __sig.process; hence the need for xchg
@@ -644,6 +668,39 @@ textwindows dontinstrument static uint32_t __sig_worker(void *arg) {
644
668
sigs &= ~(1ull << (sig - 1 ));
645
669
__sig_generate (sig , SI_KERNEL );
646
670
}
671
+
672
+ // unblock stalled asynchronous signals in threads
673
+ _pthread_lock ();
674
+ for (struct Dll * e = dll_first (_pthread_list ); e ;
675
+ e = dll_next (_pthread_list , e )) {
676
+ struct PosixThread * pt = POSIXTHREAD_CONTAINER (e );
677
+ if (atomic_load_explicit (& pt -> pt_status , memory_order_acquire ) >=
678
+ kPosixThreadTerminated ) {
679
+ break ;
680
+ }
681
+ sigset_t pending =
682
+ atomic_load_explicit (& pt -> tib -> tib_sigpending , memory_order_acquire );
683
+ sigset_t mask =
684
+ atomic_load_explicit (& pt -> tib -> tib_sigmask , memory_order_acquire );
685
+ if (pending & ~mask ) {
686
+ _pthread_ref (pt );
687
+ _pthread_unlock ();
688
+ while (!atomic_compare_exchange_weak_explicit (
689
+ & pt -> tib -> tib_sigpending , & pending , pending & ~mask ,
690
+ memory_order_acq_rel , memory_order_relaxed )) {
691
+ }
692
+ while ((pending = pending & ~mask )) {
693
+ int sig = bsfl (pending ) + 1 ;
694
+ pending &= ~(1ull << (sig - 1 ));
695
+ __sig_killer (pt , sig , SI_KERNEL );
696
+ }
697
+ _pthread_lock ();
698
+ _pthread_unref (pt );
699
+ }
700
+ }
701
+ _pthread_unlock ();
702
+
703
+ // wait until next scheduler quantum
647
704
Sleep (POLL_INTERVAL_MS );
648
705
}
649
706
return 0 ;
0 commit comments