std/sys/pal/unix/
stack_overflow.rs

1#![cfg_attr(test, allow(dead_code))]
2
3pub use self::imp::{cleanup, init};
4use self::imp::{drop_handler, make_handler};
5
6pub struct Handler {
7    data: *mut libc::c_void,
8}
9
10impl Handler {
11    pub unsafe fn new() -> Handler {
12        make_handler(false)
13    }
14
15    fn null() -> Handler {
16        Handler { data: crate::ptr::null_mut() }
17    }
18}
19
20impl Drop for Handler {
21    fn drop(&mut self) {
22        unsafe {
23            drop_handler(self.data);
24        }
25    }
26}
27
28#[cfg(all(
29    not(miri),
30    any(
31        target_os = "linux",
32        target_os = "freebsd",
33        target_os = "hurd",
34        target_os = "macos",
35        target_os = "netbsd",
36        target_os = "openbsd",
37        target_os = "solaris",
38        target_os = "illumos",
39    ),
40))]
41mod thread_info;
42
43// miri doesn't model signals nor stack overflows and this code has some
44// synchronization properties that we don't want to expose to user code,
45// hence we disable it on miri.
46#[cfg(all(
47    not(miri),
48    any(
49        target_os = "linux",
50        target_os = "freebsd",
51        target_os = "hurd",
52        target_os = "macos",
53        target_os = "netbsd",
54        target_os = "openbsd",
55        target_os = "solaris",
56        target_os = "illumos",
57    )
58))]
59mod imp {
60    use libc::{
61        MAP_ANON, MAP_FAILED, MAP_FIXED, MAP_PRIVATE, PROT_NONE, PROT_READ, PROT_WRITE, SA_ONSTACK,
62        SA_SIGINFO, SIG_DFL, SIGBUS, SIGSEGV, SS_DISABLE, sigaction, sigaltstack, sighandler_t,
63    };
64    #[cfg(not(all(target_os = "linux", target_env = "gnu")))]
65    use libc::{mmap as mmap64, mprotect, munmap};
66    #[cfg(all(target_os = "linux", target_env = "gnu"))]
67    use libc::{mmap64, mprotect, munmap};
68
69    use super::Handler;
70    use super::thread_info::{delete_current_info, set_current_info, with_current_info};
71    use crate::ops::Range;
72    use crate::sync::atomic::{Atomic, AtomicBool, AtomicPtr, AtomicUsize, Ordering};
73    use crate::sys::pal::unix::os;
74    use crate::{io, mem, ptr};
75
76    // Signal handler for the SIGSEGV and SIGBUS handlers. We've got guard pages
77    // (unmapped pages) at the end of every thread's stack, so if a thread ends
78    // up running into the guard page it'll trigger this handler. We want to
79    // detect these cases and print out a helpful error saying that the stack
80    // has overflowed. All other signals, however, should go back to what they
81    // were originally supposed to do.
82    //
83    // This handler currently exists purely to print an informative message
84    // whenever a thread overflows its stack. We then abort to exit and
85    // indicate a crash, but to avoid a misleading SIGSEGV that might lead
86    // users to believe that unsafe code has accessed an invalid pointer; the
87    // SIGSEGV encountered when overflowing the stack is expected and
88    // well-defined.
89    //
90    // If this is not a stack overflow, the handler un-registers itself and
91    // then returns (to allow the original signal to be delivered again).
92    // Returning from this kind of signal handler is technically not defined
93    // to work when reading the POSIX spec strictly, but in practice it turns
94    // out many large systems and all implementations allow returning from a
95    // signal handler to work. For a more detailed explanation see the
96    // comments on #26458.
97    /// SIGSEGV/SIGBUS entry point
98    /// # Safety
99    /// Rust doesn't call this, it *gets called*.
100    #[forbid(unsafe_op_in_unsafe_fn)]
101    unsafe extern "C" fn signal_handler(
102        signum: libc::c_int,
103        info: *mut libc::siginfo_t,
104        _data: *mut libc::c_void,
105    ) {
106        // SAFETY: this pointer is provided by the system and will always point to a valid `siginfo_t`.
107        let fault_addr = unsafe { (*info).si_addr().addr() };
108
109        // `with_current_info` expects that the process aborts after it is
110        // called. If the signal was not caused by a memory access, this might
111        // not be true. We detect this by noticing that the `si_addr` field is
112        // zero if the signal is synthetic.
113        if fault_addr != 0 {
114            with_current_info(|thread_info| {
115                // If the faulting address is within the guard page, then we print a
116                // message saying so and abort.
117                if let Some(thread_info) = thread_info
118                    && thread_info.guard_page_range.contains(&fault_addr)
119                {
120                    // Hey you! Yes, you modifying the stack overflow message!
121                    // Please make sure that all functions called here are
122                    // actually async-signal-safe. If they're not, try retrieving
123                    // the information beforehand and storing it in `ThreadInfo`.
124                    // Thank you!
125                    // - says Jonas after having had to watch his carefully
126                    //   written code get made unsound again.
127                    let tid = thread_info.tid;
128                    let name = thread_info.name.as_deref().unwrap_or("<unknown>");
129                    rtprintpanic!("\nthread '{name}' ({tid}) has overflowed its stack\n");
130                    rtabort!("stack overflow");
131                }
132            })
133        }
134
135        // Unregister ourselves by reverting back to the default behavior.
136        // SAFETY: assuming all platforms define struct sigaction as "zero-initializable"
137        let mut action: sigaction = unsafe { mem::zeroed() };
138        action.sa_sigaction = SIG_DFL;
139        // SAFETY: pray this is a well-behaved POSIX implementation of fn sigaction
140        unsafe { sigaction(signum, &action, ptr::null_mut()) };
141
142        // See comment above for why this function returns.
143    }
144
145    static PAGE_SIZE: Atomic<usize> = AtomicUsize::new(0);
146    static MAIN_ALTSTACK: Atomic<*mut libc::c_void> = AtomicPtr::new(ptr::null_mut());
147    static NEED_ALTSTACK: Atomic<bool> = AtomicBool::new(false);
148
149    /// # Safety
150    /// Must be called only once
151    #[forbid(unsafe_op_in_unsafe_fn)]
152    pub unsafe fn init() {
153        PAGE_SIZE.store(os::page_size(), Ordering::Relaxed);
154
155        let mut guard_page_range = unsafe { install_main_guard() };
156
157        // Even for panic=immediate-abort, installing the guard pages is important for soundness.
158        // That said, we do not care about giving nice stackoverflow messages via our custom
159        // signal handler, just exit early and let the user enjoy the segfault.
160        if cfg!(panic = "immediate-abort") {
161            return;
162        }
163
164        // SAFETY: assuming all platforms define struct sigaction as "zero-initializable"
165        let mut action: sigaction = unsafe { mem::zeroed() };
166        for &signal in &[SIGSEGV, SIGBUS] {
167            // SAFETY: just fetches the current signal handler into action
168            unsafe { sigaction(signal, ptr::null_mut(), &mut action) };
169            // Configure our signal handler if one is not already set.
170            if action.sa_sigaction == SIG_DFL {
171                if !NEED_ALTSTACK.load(Ordering::Relaxed) {
172                    // haven't set up our sigaltstack yet
173                    NEED_ALTSTACK.store(true, Ordering::Release);
174                    let handler = unsafe { make_handler(true) };
175                    MAIN_ALTSTACK.store(handler.data, Ordering::Relaxed);
176                    mem::forget(handler);
177
178                    if let Some(guard_page_range) = guard_page_range.take() {
179                        set_current_info(guard_page_range);
180                    }
181                }
182
183                action.sa_flags = SA_SIGINFO | SA_ONSTACK;
184                action.sa_sigaction = signal_handler
185                    as unsafe extern "C" fn(i32, *mut libc::siginfo_t, *mut libc::c_void)
186                    as sighandler_t;
187                // SAFETY: only overriding signals if the default is set
188                unsafe { sigaction(signal, &action, ptr::null_mut()) };
189            }
190        }
191    }
192
193    /// # Safety
194    /// Must be called only once
195    #[forbid(unsafe_op_in_unsafe_fn)]
196    pub unsafe fn cleanup() {
197        if cfg!(panic = "immediate-abort") {
198            return;
199        }
200        // FIXME: I probably cause more bugs than I'm worth!
201        // see https://github.com/rust-lang/rust/issues/111272
202        unsafe { drop_handler(MAIN_ALTSTACK.load(Ordering::Relaxed)) };
203    }
204
205    unsafe fn get_stack() -> libc::stack_t {
206        // OpenBSD requires this flag for stack mapping
207        // otherwise the said mapping will fail as a no-op on most systems
208        // and has a different meaning on FreeBSD
209        #[cfg(any(
210            target_os = "openbsd",
211            target_os = "netbsd",
212            target_os = "linux",
213            target_os = "dragonfly",
214        ))]
215        let flags = MAP_PRIVATE | MAP_ANON | libc::MAP_STACK;
216        #[cfg(not(any(
217            target_os = "openbsd",
218            target_os = "netbsd",
219            target_os = "linux",
220            target_os = "dragonfly",
221        )))]
222        let flags = MAP_PRIVATE | MAP_ANON;
223
224        let sigstack_size = sigstack_size();
225        let page_size = PAGE_SIZE.load(Ordering::Relaxed);
226
227        let stackp = mmap64(
228            ptr::null_mut(),
229            sigstack_size + page_size,
230            PROT_READ | PROT_WRITE,
231            flags,
232            -1,
233            0,
234        );
235        if stackp == MAP_FAILED {
236            panic!("failed to allocate an alternative stack: {}", io::Error::last_os_error());
237        }
238        let guard_result = libc::mprotect(stackp, page_size, PROT_NONE);
239        if guard_result != 0 {
240            panic!("failed to set up alternative stack guard page: {}", io::Error::last_os_error());
241        }
242        let stackp = stackp.add(page_size);
243
244        libc::stack_t { ss_sp: stackp, ss_flags: 0, ss_size: sigstack_size }
245    }
246
247    /// # Safety
248    /// Mutates the alternate signal stack
249    #[forbid(unsafe_op_in_unsafe_fn)]
250    pub unsafe fn make_handler(main_thread: bool) -> Handler {
251        if cfg!(panic = "immediate-abort") || !NEED_ALTSTACK.load(Ordering::Acquire) {
252            return Handler::null();
253        }
254
255        if !main_thread {
256            if let Some(guard_page_range) = unsafe { current_guard() } {
257                set_current_info(guard_page_range);
258            }
259        }
260
261        // SAFETY: assuming stack_t is zero-initializable
262        let mut stack = unsafe { mem::zeroed() };
263        // SAFETY: reads current stack_t into stack
264        unsafe { sigaltstack(ptr::null(), &mut stack) };
265        // Configure alternate signal stack, if one is not already set.
266        if stack.ss_flags & SS_DISABLE != 0 {
267            // SAFETY: We warned our caller this would happen!
268            unsafe {
269                stack = get_stack();
270                sigaltstack(&stack, ptr::null_mut());
271            }
272            Handler { data: stack.ss_sp as *mut libc::c_void }
273        } else {
274            Handler::null()
275        }
276    }
277
278    /// # Safety
279    /// Must be called
280    /// - only with our handler or nullptr
281    /// - only when done with our altstack
282    /// This disables the alternate signal stack!
283    #[forbid(unsafe_op_in_unsafe_fn)]
284    pub unsafe fn drop_handler(data: *mut libc::c_void) {
285        if !data.is_null() {
286            let sigstack_size = sigstack_size();
287            let page_size = PAGE_SIZE.load(Ordering::Relaxed);
288            let disabling_stack = libc::stack_t {
289                ss_sp: ptr::null_mut(),
290                ss_flags: SS_DISABLE,
291                // Workaround for bug in macOS implementation of sigaltstack
292                // UNIX2003 which returns ENOMEM when disabling a stack while
293                // passing ss_size smaller than MINSIGSTKSZ. According to POSIX
294                // both ss_sp and ss_size should be ignored in this case.
295                ss_size: sigstack_size,
296            };
297            // SAFETY: we warned the caller this disables the alternate signal stack!
298            unsafe { sigaltstack(&disabling_stack, ptr::null_mut()) };
299            // SAFETY: We know from `get_stackp` that the alternate stack we installed is part of
300            // a mapping that started one page earlier, so walk back a page and unmap from there.
301            unsafe { munmap(data.sub(page_size), sigstack_size + page_size) };
302        }
303
304        delete_current_info();
305    }
306
307    /// Modern kernels on modern hardware can have dynamic signal stack sizes.
308    #[cfg(any(target_os = "linux", target_os = "android"))]
309    fn sigstack_size() -> usize {
310        let dynamic_sigstksz = unsafe { libc::getauxval(libc::AT_MINSIGSTKSZ) };
311        // If getauxval couldn't find the entry, it returns 0,
312        // so take the higher of the "constant" and auxval.
313        // This transparently supports older kernels which don't provide AT_MINSIGSTKSZ
314        libc::SIGSTKSZ.max(dynamic_sigstksz as _)
315    }
316
317    /// Not all OS support hardware where this is needed.
318    #[cfg(not(any(target_os = "linux", target_os = "android")))]
319    fn sigstack_size() -> usize {
320        libc::SIGSTKSZ
321    }
322
323    #[cfg(any(target_os = "solaris", target_os = "illumos"))]
324    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
325        let mut current_stack: libc::stack_t = crate::mem::zeroed();
326        assert_eq!(libc::stack_getbounds(&mut current_stack), 0);
327        Some(current_stack.ss_sp)
328    }
329
330    #[cfg(target_os = "macos")]
331    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
332        let th = libc::pthread_self();
333        let stackptr = libc::pthread_get_stackaddr_np(th);
334        Some(stackptr.map_addr(|addr| addr - libc::pthread_get_stacksize_np(th)))
335    }
336
337    #[cfg(target_os = "openbsd")]
338    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
339        let mut current_stack: libc::stack_t = crate::mem::zeroed();
340        assert_eq!(libc::pthread_stackseg_np(libc::pthread_self(), &mut current_stack), 0);
341
342        let stack_ptr = current_stack.ss_sp;
343        let stackaddr = if libc::pthread_main_np() == 1 {
344            // main thread
345            stack_ptr.addr() - current_stack.ss_size + PAGE_SIZE.load(Ordering::Relaxed)
346        } else {
347            // new thread
348            stack_ptr.addr() - current_stack.ss_size
349        };
350        Some(stack_ptr.with_addr(stackaddr))
351    }
352
353    #[cfg(any(
354        target_os = "android",
355        target_os = "freebsd",
356        target_os = "netbsd",
357        target_os = "hurd",
358        target_os = "linux",
359        target_os = "l4re"
360    ))]
361    unsafe fn get_stack_start() -> Option<*mut libc::c_void> {
362        let mut ret = None;
363        let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
364        if !cfg!(target_os = "freebsd") {
365            attr = mem::MaybeUninit::zeroed();
366        }
367        #[cfg(target_os = "freebsd")]
368        assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
369        #[cfg(target_os = "freebsd")]
370        let e = libc::pthread_attr_get_np(libc::pthread_self(), attr.as_mut_ptr());
371        #[cfg(not(target_os = "freebsd"))]
372        let e = libc::pthread_getattr_np(libc::pthread_self(), attr.as_mut_ptr());
373        if e == 0 {
374            let mut stackaddr = crate::ptr::null_mut();
375            let mut stacksize = 0;
376            assert_eq!(
377                libc::pthread_attr_getstack(attr.as_ptr(), &mut stackaddr, &mut stacksize),
378                0
379            );
380            ret = Some(stackaddr);
381        }
382        if e == 0 || cfg!(target_os = "freebsd") {
383            assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
384        }
385        ret
386    }
387
388    fn stack_start_aligned(page_size: usize) -> Option<*mut libc::c_void> {
389        let stackptr = unsafe { get_stack_start()? };
390        let stackaddr = stackptr.addr();
391
392        // Ensure stackaddr is page aligned! A parent process might
393        // have reset RLIMIT_STACK to be non-page aligned. The
394        // pthread_attr_getstack() reports the usable stack area
395        // stackaddr < stackaddr + stacksize, so if stackaddr is not
396        // page-aligned, calculate the fix such that stackaddr <
397        // new_page_aligned_stackaddr < stackaddr + stacksize
398        let remainder = stackaddr % page_size;
399        Some(if remainder == 0 {
400            stackptr
401        } else {
402            stackptr.with_addr(stackaddr + page_size - remainder)
403        })
404    }
405
406    #[forbid(unsafe_op_in_unsafe_fn)]
407    unsafe fn install_main_guard() -> Option<Range<usize>> {
408        let page_size = PAGE_SIZE.load(Ordering::Relaxed);
409
410        unsafe {
411            // this way someone on any unix-y OS can check that all these compile
412            if cfg!(all(target_os = "linux", not(target_env = "musl"))) {
413                install_main_guard_linux(page_size)
414            } else if cfg!(all(target_os = "linux", target_env = "musl")) {
415                install_main_guard_linux_musl(page_size)
416            } else if cfg!(target_os = "freebsd") {
417                #[cfg(not(target_os = "freebsd"))]
418                return None;
419                // The FreeBSD code cannot be checked on non-BSDs.
420                #[cfg(target_os = "freebsd")]
421                install_main_guard_freebsd(page_size)
422            } else if cfg!(any(target_os = "netbsd", target_os = "openbsd")) {
423                install_main_guard_bsds(page_size)
424            } else {
425                install_main_guard_default(page_size)
426            }
427        }
428    }
429
430    #[forbid(unsafe_op_in_unsafe_fn)]
431    unsafe fn install_main_guard_linux(page_size: usize) -> Option<Range<usize>> {
432        // Linux doesn't allocate the whole stack right away, and
433        // the kernel has its own stack-guard mechanism to fault
434        // when growing too close to an existing mapping. If we map
435        // our own guard, then the kernel starts enforcing a rather
436        // large gap above that, rendering much of the possible
437        // stack space useless. See #43052.
438        //
439        // Instead, we'll just note where we expect rlimit to start
440        // faulting, so our handler can report "stack overflow", and
441        // trust that the kernel's own stack guard will work.
442        let stackptr = stack_start_aligned(page_size)?;
443        let stackaddr = stackptr.addr();
444        Some(stackaddr - page_size..stackaddr)
445    }
446
447    #[forbid(unsafe_op_in_unsafe_fn)]
448    unsafe fn install_main_guard_linux_musl(_page_size: usize) -> Option<Range<usize>> {
449        // For the main thread, the musl's pthread_attr_getstack
450        // returns the current stack size, rather than maximum size
451        // it can eventually grow to. It cannot be used to determine
452        // the position of kernel's stack guard.
453        None
454    }
455
456    #[forbid(unsafe_op_in_unsafe_fn)]
457    #[cfg(target_os = "freebsd")]
458    unsafe fn install_main_guard_freebsd(page_size: usize) -> Option<Range<usize>> {
459        // FreeBSD's stack autogrows, and optionally includes a guard page
460        // at the bottom. If we try to remap the bottom of the stack
461        // ourselves, FreeBSD's guard page moves upwards. So we'll just use
462        // the builtin guard page.
463        let stackptr = stack_start_aligned(page_size)?;
464        let guardaddr = stackptr.addr();
465        // Technically the number of guard pages is tunable and controlled
466        // by the security.bsd.stack_guard_page sysctl.
467        // By default it is 1, checking once is enough since it is
468        // a boot time config value.
469        static PAGES: crate::sync::OnceLock<usize> = crate::sync::OnceLock::new();
470
471        let pages = PAGES.get_or_init(|| {
472            let mut guard: usize = 0;
473            let mut size = size_of_val(&guard);
474            let oid = c"security.bsd.stack_guard_page";
475
476            let r = unsafe {
477                libc::sysctlbyname(
478                    oid.as_ptr(),
479                    (&raw mut guard).cast(),
480                    &raw mut size,
481                    ptr::null_mut(),
482                    0,
483                )
484            };
485            if r == 0 { guard } else { 1 }
486        });
487        Some(guardaddr..guardaddr + pages * page_size)
488    }
489
490    #[forbid(unsafe_op_in_unsafe_fn)]
491    unsafe fn install_main_guard_bsds(page_size: usize) -> Option<Range<usize>> {
492        // OpenBSD stack already includes a guard page, and stack is
493        // immutable.
494        // NetBSD stack includes the guard page.
495        //
496        // We'll just note where we expect rlimit to start
497        // faulting, so our handler can report "stack overflow", and
498        // trust that the kernel's own stack guard will work.
499        let stackptr = stack_start_aligned(page_size)?;
500        let stackaddr = stackptr.addr();
501        Some(stackaddr - page_size..stackaddr)
502    }
503
504    #[forbid(unsafe_op_in_unsafe_fn)]
505    unsafe fn install_main_guard_default(page_size: usize) -> Option<Range<usize>> {
506        // Reallocate the last page of the stack.
507        // This ensures SIGBUS will be raised on
508        // stack overflow.
509        // Systems which enforce strict PAX MPROTECT do not allow
510        // to mprotect() a mapping with less restrictive permissions
511        // than the initial mmap() used, so we mmap() here with
512        // read/write permissions and only then mprotect() it to
513        // no permissions at all. See issue #50313.
514        let stackptr = stack_start_aligned(page_size)?;
515        let result = unsafe {
516            mmap64(
517                stackptr,
518                page_size,
519                PROT_READ | PROT_WRITE,
520                MAP_PRIVATE | MAP_ANON | MAP_FIXED,
521                -1,
522                0,
523            )
524        };
525        if result != stackptr || result == MAP_FAILED {
526            panic!("failed to allocate a guard page: {}", io::Error::last_os_error());
527        }
528
529        let result = unsafe { mprotect(stackptr, page_size, PROT_NONE) };
530        if result != 0 {
531            panic!("failed to protect the guard page: {}", io::Error::last_os_error());
532        }
533
534        let guardaddr = stackptr.addr();
535
536        Some(guardaddr..guardaddr + page_size)
537    }
538
539    #[cfg(any(
540        target_os = "macos",
541        target_os = "openbsd",
542        target_os = "solaris",
543        target_os = "illumos",
544    ))]
545    // FIXME: I am probably not unsafe.
546    unsafe fn current_guard() -> Option<Range<usize>> {
547        let stackptr = get_stack_start()?;
548        let stackaddr = stackptr.addr();
549        Some(stackaddr - PAGE_SIZE.load(Ordering::Relaxed)..stackaddr)
550    }
551
552    #[cfg(any(
553        target_os = "android",
554        target_os = "freebsd",
555        target_os = "hurd",
556        target_os = "linux",
557        target_os = "netbsd",
558        target_os = "l4re"
559    ))]
560    // FIXME: I am probably not unsafe.
561    unsafe fn current_guard() -> Option<Range<usize>> {
562        let mut ret = None;
563
564        let mut attr: mem::MaybeUninit<libc::pthread_attr_t> = mem::MaybeUninit::uninit();
565        if !cfg!(target_os = "freebsd") {
566            attr = mem::MaybeUninit::zeroed();
567        }
568        #[cfg(target_os = "freebsd")]
569        assert_eq!(libc::pthread_attr_init(attr.as_mut_ptr()), 0);
570        #[cfg(target_os = "freebsd")]
571        let e = libc::pthread_attr_get_np(libc::pthread_self(), attr.as_mut_ptr());
572        #[cfg(not(target_os = "freebsd"))]
573        let e = libc::pthread_getattr_np(libc::pthread_self(), attr.as_mut_ptr());
574        if e == 0 {
575            let mut guardsize = 0;
576            assert_eq!(libc::pthread_attr_getguardsize(attr.as_ptr(), &mut guardsize), 0);
577            if guardsize == 0 {
578                if cfg!(all(target_os = "linux", target_env = "musl")) {
579                    // musl versions before 1.1.19 always reported guard
580                    // size obtained from pthread_attr_get_np as zero.
581                    // Use page size as a fallback.
582                    guardsize = PAGE_SIZE.load(Ordering::Relaxed);
583                } else {
584                    panic!("there is no guard page");
585                }
586            }
587            let mut stackptr = crate::ptr::null_mut::<libc::c_void>();
588            let mut size = 0;
589            assert_eq!(libc::pthread_attr_getstack(attr.as_ptr(), &mut stackptr, &mut size), 0);
590
591            let stackaddr = stackptr.addr();
592            ret = if cfg!(any(target_os = "freebsd", target_os = "netbsd", target_os = "hurd")) {
593                Some(stackaddr - guardsize..stackaddr)
594            } else if cfg!(all(target_os = "linux", target_env = "musl")) {
595                Some(stackaddr - guardsize..stackaddr)
596            } else if cfg!(all(target_os = "linux", any(target_env = "gnu", target_env = "uclibc")))
597            {
598                // glibc used to include the guard area within the stack, as noted in the BUGS
599                // section of `man pthread_attr_getguardsize`. This has been corrected starting
600                // with glibc 2.27, and in some distro backports, so the guard is now placed at the
601                // end (below) the stack. There's no easy way for us to know which we have at
602                // runtime, so we'll just match any fault in the range right above or below the
603                // stack base to call that fault a stack overflow.
604                Some(stackaddr - guardsize..stackaddr + guardsize)
605            } else {
606                Some(stackaddr..stackaddr + guardsize)
607            };
608        }
609        if e == 0 || cfg!(target_os = "freebsd") {
610            assert_eq!(libc::pthread_attr_destroy(attr.as_mut_ptr()), 0);
611        }
612        ret
613    }
614}
615
616// This is intentionally not enabled on iOS/tvOS/watchOS/visionOS, as it uses
617// several symbols that might lead to rejections from the App Store, namely
618// `sigaction`, `sigaltstack`, `sysctlbyname`, `mmap`, `munmap` and `mprotect`.
619//
620// This might be overly cautious, though it is also what Swift does (and they
621// usually have fewer qualms about forwards compatibility, since the runtime
622// is shipped with the OS):
623// <https://github.com/apple/swift/blob/swift-5.10-RELEASE/stdlib/public/runtime/CrashHandlerMacOS.cpp>
624#[cfg(any(
625    miri,
626    not(any(
627        target_os = "linux",
628        target_os = "freebsd",
629        target_os = "hurd",
630        target_os = "macos",
631        target_os = "netbsd",
632        target_os = "openbsd",
633        target_os = "solaris",
634        target_os = "illumos",
635        target_os = "cygwin",
636    ))
637))]
638mod imp {
639    pub unsafe fn init() {}
640
641    pub unsafe fn cleanup() {}
642
643    pub unsafe fn make_handler(_main_thread: bool) -> super::Handler {
644        super::Handler::null()
645    }
646
647    pub unsafe fn drop_handler(_data: *mut libc::c_void) {}
648}
649
650#[cfg(target_os = "cygwin")]
651mod imp {
652    mod c {
653        pub type PVECTORED_EXCEPTION_HANDLER =
654            Option<unsafe extern "system" fn(exceptioninfo: *mut EXCEPTION_POINTERS) -> i32>;
655        pub type NTSTATUS = i32;
656        pub type BOOL = i32;
657
658        unsafe extern "system" {
659            pub fn AddVectoredExceptionHandler(
660                first: u32,
661                handler: PVECTORED_EXCEPTION_HANDLER,
662            ) -> *mut core::ffi::c_void;
663            pub fn SetThreadStackGuarantee(stacksizeinbytes: *mut u32) -> BOOL;
664        }
665
666        pub const EXCEPTION_STACK_OVERFLOW: NTSTATUS = 0xC00000FD_u32 as _;
667        pub const EXCEPTION_CONTINUE_SEARCH: i32 = 1i32;
668
669        #[repr(C)]
670        #[derive(Clone, Copy)]
671        pub struct EXCEPTION_POINTERS {
672            pub ExceptionRecord: *mut EXCEPTION_RECORD,
673            // We don't need this field here
674            // pub Context: *mut CONTEXT,
675        }
676        #[repr(C)]
677        #[derive(Clone, Copy)]
678        pub struct EXCEPTION_RECORD {
679            pub ExceptionCode: NTSTATUS,
680            pub ExceptionFlags: u32,
681            pub ExceptionRecord: *mut EXCEPTION_RECORD,
682            pub ExceptionAddress: *mut core::ffi::c_void,
683            pub NumberParameters: u32,
684            pub ExceptionInformation: [usize; 15],
685        }
686    }
687
688    /// Reserve stack space for use in stack overflow exceptions.
689    fn reserve_stack() {
690        let result = unsafe { c::SetThreadStackGuarantee(&mut 0x5000) };
691        // Reserving stack space is not critical so we allow it to fail in the released build of libstd.
692        // We still use debug assert here so that CI will test that we haven't made a mistake calling the function.
693        debug_assert_ne!(result, 0, "failed to reserve stack space for exception handling");
694    }
695
696    unsafe extern "system" fn vectored_handler(ExceptionInfo: *mut c::EXCEPTION_POINTERS) -> i32 {
697        // SAFETY: It's up to the caller (which in this case is the OS) to ensure that `ExceptionInfo` is valid.
698        unsafe {
699            let rec = &(*(*ExceptionInfo).ExceptionRecord);
700            let code = rec.ExceptionCode;
701
702            if code == c::EXCEPTION_STACK_OVERFLOW {
703                crate::thread::with_current_name(|name| {
704                    let name = name.unwrap_or("<unknown>");
705                    let tid = crate::thread::current_os_id();
706                    rtprintpanic!("\nthread '{name}' ({tid}) has overflowed its stack\n");
707                });
708            }
709            c::EXCEPTION_CONTINUE_SEARCH
710        }
711    }
712
713    pub unsafe fn init() {
714        // SAFETY: `vectored_handler` has the correct ABI and is safe to call during exception handling.
715        unsafe {
716            let result = c::AddVectoredExceptionHandler(0, Some(vectored_handler));
717            // Similar to the above, adding the stack overflow handler is allowed to fail
718            // but a debug assert is used so CI will still test that it normally works.
719            debug_assert!(!result.is_null(), "failed to install exception handler");
720        }
721        // Set the thread stack guarantee for the main thread.
722        reserve_stack();
723    }
724
725    pub unsafe fn cleanup() {}
726
727    pub unsafe fn make_handler(main_thread: bool) -> super::Handler {
728        if !main_thread {
729            reserve_stack();
730        }
731        super::Handler::null()
732    }
733
734    pub unsafe fn drop_handler(_data: *mut libc::c_void) {}
735}