rustc_driver_impl/
signal_handler.rs

1//! Signal handler for rustc
2//! Primarily used to extract a backtrace from stack overflow
3
4use std::alloc::{Layout, alloc};
5use std::{fmt, mem, ptr, slice};
6
7use rustc_interface::util::{DEFAULT_STACK_SIZE, STACK_SIZE};
8
9/// Signals that represent that we have a bug, and our prompt termination has
10/// been ordered.
11#[rustfmt::skip]
12const KILL_SIGNALS: [(libc::c_int, &str); 3] = [
13    (libc::SIGILL, "SIGILL"),
14    (libc::SIGBUS, "SIGBUS"),
15    (libc::SIGSEGV, "SIGSEGV")
16];
17
18unsafe extern "C" {
19    fn backtrace_symbols_fd(buffer: *const *mut libc::c_void, size: libc::c_int, fd: libc::c_int);
20}
21
22fn backtrace_stderr(buffer: &[*mut libc::c_void]) {
23    let size = buffer.len().try_into().unwrap_or_default();
24    unsafe { backtrace_symbols_fd(buffer.as_ptr(), size, libc::STDERR_FILENO) };
25}
26
27/// Unbuffered, unsynchronized writer to stderr.
28///
29/// Only acceptable because everything will end soon anyways.
30struct RawStderr(());
31
32impl fmt::Write for RawStderr {
33    fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
34        let ret = unsafe { libc::write(libc::STDERR_FILENO, s.as_ptr().cast(), s.len()) };
35        if ret == -1 { Err(fmt::Error) } else { Ok(()) }
36    }
37}
38
39/// We don't really care how many bytes we actually get out. SIGSEGV comes for our head.
40/// Splash stderr with letters of our own blood to warn our friends about the monster.
41macro raw_errln($tokens:tt) {
42    let _ = ::core::fmt::Write::write_fmt(&mut RawStderr(()), format_args!($tokens));
43    let _ = ::core::fmt::Write::write_char(&mut RawStderr(()), '\n');
44}
45
46/// Signal handler installed for SIGSEGV
47///
48/// # Safety
49///
50/// Caller must ensure that this function is not re-entered.
51unsafe extern "C" fn print_stack_trace(signum: libc::c_int) {
52    const MAX_FRAMES: usize = 256;
53
54    let signame = {
55        let mut signame = "<unknown>";
56        for sig in KILL_SIGNALS {
57            if sig.0 == signum {
58                signame = sig.1;
59            }
60        }
61        signame
62    };
63
64    let stack = unsafe {
65        // Reserve data segment so we don't have to malloc in a signal handler, which might fail
66        // in incredibly undesirable and unexpected ways due to e.g. the allocator deadlocking
67        static mut STACK_TRACE: [*mut libc::c_void; MAX_FRAMES] = [ptr::null_mut(); MAX_FRAMES];
68        // Collect return addresses
69        let depth = libc::backtrace(&raw mut STACK_TRACE as _, MAX_FRAMES as i32);
70        if depth == 0 {
71            return;
72        }
73        slice::from_raw_parts(&raw const STACK_TRACE as _, depth as _)
74    };
75
76    // Just a stack trace is cryptic. Explain what we're doing.
77    raw_errln!("error: rustc interrupted by {signame}, printing backtrace\n");
78
79    let mut written = 1;
80    let mut consumed = 0;
81    // Begin elaborating return addrs into symbols and writing them directly to stderr
82    // Most backtraces are stack overflow, most stack overflows are from recursion
83    // Check for cycles before writing 250 lines of the same ~5 symbols
84    let cycled = |(runner, walker)| runner == walker;
85    let mut cyclic = false;
86    if let Some(period) = stack.iter().skip(1).step_by(2).zip(stack).position(cycled) {
87        let period = period.saturating_add(1); // avoid "what if wrapped?" branches
88        let Some(offset) = stack.iter().skip(period).zip(stack).position(cycled) else {
89            // impossible.
90            return;
91        };
92
93        // Count matching trace slices, else we could miscount "biphasic cycles"
94        // with the same period + loop entry but a different inner loop
95        let next_cycle = stack[offset..].chunks_exact(period).skip(1);
96        let cycles = 1 + next_cycle
97            .zip(stack[offset..].chunks_exact(period))
98            .filter(|(next, prev)| next == prev)
99            .count();
100        backtrace_stderr(&stack[..offset]);
101        written += offset;
102        consumed += offset;
103        if cycles > 1 {
104            raw_errln!("\n### cycle encountered after {offset} frames with period {period}");
105            backtrace_stderr(&stack[consumed..consumed + period]);
106            raw_errln!("### recursed {cycles} times\n");
107            written += period + 4;
108            consumed += period * cycles;
109            cyclic = true;
110        };
111    }
112    let rem = &stack[consumed..];
113    backtrace_stderr(rem);
114    raw_errln!("");
115    written += rem.len() + 1;
116
117    let random_depth = || 8 * 16; // chosen by random diceroll (2d20)
118    if (cyclic || stack.len() > random_depth()) && signum == libc::SIGSEGV {
119        // technically speculation, but assert it with confidence anyway.
120        // rustc only arrived in this signal handler because bad things happened
121        // and this message is for explaining it's not the programmer's fault
122        raw_errln!("note: rustc unexpectedly overflowed its stack! this is a bug");
123        written += 1;
124    }
125    if stack.len() == MAX_FRAMES {
126        raw_errln!("note: maximum backtrace depth reached, frames may have been lost");
127        written += 1;
128    }
129    raw_errln!("note: we would appreciate a report at https://github.com/rust-lang/rust");
130    written += 1;
131    if signum == libc::SIGSEGV {
132        // get the current stack size WITHOUT blocking and double it
133        let new_size = STACK_SIZE.get().copied().unwrap_or(DEFAULT_STACK_SIZE) * 2;
134        raw_errln!(
135            "help: you can increase rustc's stack size by setting RUST_MIN_STACK={new_size}"
136        );
137        written += 1;
138    }
139    if written > 24 {
140        // We probably just scrolled the earlier "interrupted by {signame}" message off the terminal
141        raw_errln!("note: backtrace dumped due to {signame}! resuming signal");
142    };
143}
144
145/// When one of the KILL signals is delivered to the process, print a stack trace and then exit.
146pub(super) fn install() {
147    unsafe {
148        let alt_stack_size: usize = min_sigstack_size() + 64 * 1024;
149        let mut alt_stack: libc::stack_t = mem::zeroed();
150        alt_stack.ss_sp = alloc(Layout::from_size_align(alt_stack_size, 1).unwrap()).cast();
151        alt_stack.ss_size = alt_stack_size;
152        libc::sigaltstack(&alt_stack, ptr::null_mut());
153
154        let mut sa: libc::sigaction = mem::zeroed();
155        sa.sa_sigaction = print_stack_trace as libc::sighandler_t;
156        sa.sa_flags = libc::SA_NODEFER | libc::SA_RESETHAND | libc::SA_ONSTACK;
157        libc::sigemptyset(&mut sa.sa_mask);
158        for (signum, _signame) in KILL_SIGNALS {
159            libc::sigaction(signum, &sa, ptr::null_mut());
160        }
161    }
162}
163
164/// Modern kernels on modern hardware can have dynamic signal stack sizes.
165#[cfg(any(target_os = "linux", target_os = "android"))]
166fn min_sigstack_size() -> usize {
167    const AT_MINSIGSTKSZ: core::ffi::c_ulong = 51;
168    let dynamic_sigstksz = unsafe { libc::getauxval(AT_MINSIGSTKSZ) };
169    // If getauxval couldn't find the entry, it returns 0,
170    // so take the higher of the "constant" and auxval.
171    // This transparently supports older kernels which don't provide AT_MINSIGSTKSZ
172    libc::MINSIGSTKSZ.max(dynamic_sigstksz as _)
173}
174
175/// Not all OS support hardware where this is needed.
176#[cfg(not(any(target_os = "linux", target_os = "android")))]
177fn min_sigstack_size() -> usize {
178    libc::MINSIGSTKSZ
179}