miri/shims/native_lib/trace/parent.rs
1use std::sync::atomic::{AtomicPtr, AtomicUsize};
2
3use ipc_channel::ipc;
4use nix::sys::{ptrace, signal, wait};
5use nix::unistd;
6
7use super::CALLBACK_STACK_SIZE;
8use super::messages::{Confirmation, StartFfiInfo, TraceRequest};
9use crate::shims::native_lib::{AccessEvent, AccessRange, MemEvents};
10
11/// The flags to use when calling `waitid()`.
12const WAIT_FLAGS: wait::WaitPidFlag =
13 wait::WaitPidFlag::WUNTRACED.union(wait::WaitPidFlag::WEXITED);
14
15/// The default word size on a given platform, in bytes.
16#[cfg(target_arch = "x86")]
17const ARCH_WORD_SIZE: usize = 4;
18#[cfg(target_arch = "x86_64")]
19const ARCH_WORD_SIZE: usize = 8;
20
21/// The address of the page set to be edited, initialised to a sentinel null
22/// pointer.
23static PAGE_ADDR: AtomicPtr<u8> = AtomicPtr::new(std::ptr::null_mut());
24/// The host pagesize, initialised to a sentinel zero value.
25pub static PAGE_SIZE: AtomicUsize = AtomicUsize::new(0);
26/// How many consecutive pages to unprotect. 1 by default, unlikely to be set
27/// higher than 2.
28static PAGE_COUNT: AtomicUsize = AtomicUsize::new(1);
29
30/// Allows us to get common arguments from the `user_regs_t` across architectures.
31/// Normally this would land us ABI hell, but thankfully all of our usecases
32/// consist of functions with a small number of register-sized integer arguments.
33/// See <https://man7.org/linux/man-pages/man2/syscall.2.html> for sources.
34trait ArchIndependentRegs {
35 /// Gets the address of the instruction pointer.
36 fn ip(&self) -> usize;
37 /// Set the instruction pointer; remember to also set the stack pointer, or
38 /// else the stack might get messed up!
39 fn set_ip(&mut self, ip: usize);
40 /// Set the stack pointer, ideally to a zeroed-out area.
41 fn set_sp(&mut self, sp: usize);
42}
43
44// It's fine / desirable behaviour for values to wrap here, we care about just
45// preserving the bit pattern.
46#[cfg(target_arch = "x86_64")]
47#[rustfmt::skip]
48impl ArchIndependentRegs for libc::user_regs_struct {
49 #[inline]
50 fn ip(&self) -> usize { self.rip.try_into().unwrap() }
51 #[inline]
52 fn set_ip(&mut self, ip: usize) { self.rip = ip.try_into().unwrap() }
53 #[inline]
54 fn set_sp(&mut self, sp: usize) { self.rsp = sp.try_into().unwrap() }
55}
56
57#[cfg(target_arch = "x86")]
58#[rustfmt::skip]
59impl ArchIndependentRegs for libc::user_regs_struct {
60 #[inline]
61 fn ip(&self) -> usize { self.eip.cast_unsigned().try_into().unwrap() }
62 #[inline]
63 fn set_ip(&mut self, ip: usize) { self.eip = ip.cast_signed().try_into().unwrap() }
64 #[inline]
65 fn set_sp(&mut self, sp: usize) { self.esp = sp.cast_signed().try_into().unwrap() }
66}
67
68/// A unified event representing something happening on the child process. Wraps
69/// `nix`'s `WaitStatus` and our custom signals so it can all be done with one
70/// `match` statement.
71pub enum ExecEvent {
72 /// Child process requests that we begin monitoring it.
73 Start(StartFfiInfo),
74 /// Child requests that we stop monitoring and pass over the events we
75 /// detected.
76 End,
77 /// The child process with the specified pid was stopped by the given signal.
78 Status(unistd::Pid, signal::Signal),
79 /// The child process with the specified pid entered or existed a syscall.
80 Syscall(unistd::Pid),
81 /// A child process exited or was killed; if we have a return code, it is
82 /// specified.
83 Died(Option<i32>),
84}
85
86/// A listener for the FFI start info channel along with relevant state.
87pub struct ChildListener {
88 /// The matching channel for the child's `Supervisor` struct.
89 message_rx: ipc::IpcReceiver<TraceRequest>,
90 /// ...
91 confirm_tx: ipc::IpcSender<Confirmation>,
92 /// Whether an FFI call is currently ongoing.
93 attached: bool,
94 /// If `Some`, overrides the return code with the given value.
95 override_retcode: Option<i32>,
96 /// Last code obtained from a child exiting.
97 last_code: Option<i32>,
98}
99
100impl ChildListener {
101 pub fn new(
102 message_rx: ipc::IpcReceiver<TraceRequest>,
103 confirm_tx: ipc::IpcSender<Confirmation>,
104 ) -> Self {
105 Self { message_rx, confirm_tx, attached: false, override_retcode: None, last_code: None }
106 }
107}
108
109impl Iterator for ChildListener {
110 type Item = ExecEvent;
111
112 // Allows us to monitor the child process by just iterating over the listener.
113 // NB: This should never return None!
114 fn next(&mut self) -> Option<Self::Item> {
115 // Do not block if the child has nothing to report for `waitid`.
116 let opts = WAIT_FLAGS | wait::WaitPidFlag::WNOHANG;
117 loop {
118 // Listen to any child, not just the main one. Important if we want
119 // to allow the C code to fork further, along with being a bit of
120 // defensive programming since Linux sometimes assigns threads of
121 // the same process different PIDs with unpredictable rules...
122 match wait::waitid(wait::Id::All, opts) {
123 Ok(stat) =>
124 match stat {
125 // Child exited normally with a specific code set.
126 wait::WaitStatus::Exited(_, code) => self.last_code = Some(code),
127 // Child was killed by a signal, without giving a code.
128 wait::WaitStatus::Signaled(_, _, _) => self.last_code = None,
129 // Child entered or exited a syscall.
130 wait::WaitStatus::PtraceSyscall(pid) =>
131 if self.attached {
132 return Some(ExecEvent::Syscall(pid));
133 },
134 // Child with the given pid was stopped by the given signal.
135 // It's somewhat dubious when this is returned instead of
136 // WaitStatus::Stopped, but for our purposes they are the
137 // same thing.
138 wait::WaitStatus::PtraceEvent(pid, signal, _) =>
139 if self.attached {
140 // This is our end-of-FFI signal!
141 if signal == signal::SIGUSR1 {
142 self.attached = false;
143 return Some(ExecEvent::End);
144 } else {
145 return Some(ExecEvent::Status(pid, signal));
146 }
147 } else {
148 // Just pass along the signal.
149 ptrace::cont(pid, signal).unwrap();
150 },
151 // Child was stopped at the given signal. Same logic as for
152 // WaitStatus::PtraceEvent.
153 wait::WaitStatus::Stopped(pid, signal) =>
154 if self.attached {
155 if signal == signal::SIGUSR1 {
156 self.attached = false;
157 return Some(ExecEvent::End);
158 } else {
159 return Some(ExecEvent::Status(pid, signal));
160 }
161 } else {
162 ptrace::cont(pid, signal).unwrap();
163 },
164 _ => (),
165 },
166 // This case should only trigger when all children died.
167 Err(_) => return Some(ExecEvent::Died(self.override_retcode.or(self.last_code))),
168 }
169
170 // Similarly, do a non-blocking poll of the IPC channel.
171 if let Ok(req) = self.message_rx.try_recv() {
172 match req {
173 TraceRequest::StartFfi(info) =>
174 // Should never trigger - but better to panic explicitly than deadlock!
175 if self.attached {
176 panic!("Attempting to begin FFI multiple times!");
177 } else {
178 self.attached = true;
179 return Some(ExecEvent::Start(info));
180 },
181 TraceRequest::OverrideRetcode(code) => {
182 self.override_retcode = Some(code);
183 self.confirm_tx.send(Confirmation).unwrap();
184 }
185 }
186 }
187
188 // Not ideal, but doing anything else might sacrifice performance.
189 std::thread::yield_now();
190 }
191 }
192}
193
194/// An error came up while waiting on the child process to do something.
195/// It likely died, with this return code if we have one.
196#[derive(Debug)]
197pub struct ExecEnd(pub Option<i32>);
198
199/// Whether to call `ptrace::cont()` immediately. Used exclusively by `wait_for_signal`.
200enum InitialCont {
201 Yes,
202 No,
203}
204
205/// This is the main loop of the supervisor process. It runs in a separate
206/// process from the rest of Miri (but because we fork, addresses for anything
207/// created before the fork - like statics - are the same).
208pub fn sv_loop(
209 listener: ChildListener,
210 init_pid: unistd::Pid,
211 event_tx: ipc::IpcSender<MemEvents>,
212 confirm_tx: ipc::IpcSender<Confirmation>,
213) -> Result<!, ExecEnd> {
214 // Get the pagesize set and make sure it isn't still on the zero sentinel value!
215 let page_size = PAGE_SIZE.load(std::sync::atomic::Ordering::Relaxed);
216 assert_ne!(page_size, 0);
217
218 // Things that we return to the child process.
219 let mut acc_events = Vec::new();
220
221 // Memory allocated for the MiriMachine.
222 let mut ch_pages = Vec::new();
223 let mut ch_stack = None;
224
225 // An instance of the Capstone disassembler, so we don't spawn one on every access.
226 let cs = get_disasm();
227
228 // The pid of the last process we interacted with, used by default if we don't have a
229 // reason to use a different one.
230 let mut curr_pid = init_pid;
231
232 // There's an initial sigstop we need to deal with.
233 wait_for_signal(Some(curr_pid), signal::SIGSTOP, InitialCont::No)?;
234 ptrace::cont(curr_pid, None).unwrap();
235
236 for evt in listener {
237 match evt {
238 // Child started ffi, so prep memory.
239 ExecEvent::Start(ch_info) => {
240 // All the pages that the child process is "allowed to" access.
241 ch_pages = ch_info.page_ptrs;
242 // And the temporary callback stack it allocated for us to use later.
243 ch_stack = Some(ch_info.stack_ptr);
244
245 // We received the signal and are no longer in the main listener loop,
246 // so we can let the child move on to the end of the ffi prep where it will
247 // raise a SIGSTOP. We need it to be signal-stopped *and waited for* in
248 // order to do most ptrace operations!
249 confirm_tx.send(Confirmation).unwrap();
250 // We can't trust simply calling `Pid::this()` in the child process to give the right
251 // PID for us, so we get it this way.
252 curr_pid = wait_for_signal(None, signal::SIGSTOP, InitialCont::No).unwrap();
253
254 ptrace::syscall(curr_pid, None).unwrap();
255 }
256 // Child wants to end tracing.
257 ExecEvent::End => {
258 // Hand over the access info we traced.
259 event_tx.send(MemEvents { acc_events }).unwrap();
260 // And reset our values.
261 acc_events = Vec::new();
262 ch_stack = None;
263
264 // No need to monitor syscalls anymore, they'd just be ignored.
265 ptrace::cont(curr_pid, None).unwrap();
266 }
267 // Child process was stopped by a signal
268 ExecEvent::Status(pid, signal) =>
269 match signal {
270 // If it was a segfault, check if it was an artificial one
271 // caused by it trying to access the MiriMachine memory.
272 signal::SIGSEGV =>
273 handle_segfault(
274 pid,
275 &ch_pages,
276 ch_stack.unwrap(),
277 page_size,
278 &cs,
279 &mut acc_events,
280 )?,
281 // Something weird happened.
282 _ => {
283 eprintln!("Process unexpectedly got {signal}; continuing...");
284 // In case we're not tracing
285 if ptrace::syscall(pid, None).is_err() {
286 // If *this* fails too, something really weird happened
287 // and it's probably best to just panic.
288 signal::kill(pid, signal::SIGCONT).unwrap();
289 }
290 }
291 },
292 // Child entered a syscall; we wait for exits inside of this, so it
293 // should never trigger on return from a syscall we care about.
294 ExecEvent::Syscall(pid) => {
295 ptrace::syscall(pid, None).unwrap();
296 }
297 ExecEvent::Died(code) => {
298 return Err(ExecEnd(code));
299 }
300 }
301 }
302
303 unreachable!()
304}
305
306/// Spawns a Capstone disassembler for the host architecture.
307#[rustfmt::skip]
308fn get_disasm() -> capstone::Capstone {
309 use capstone::prelude::*;
310 let cs_pre = Capstone::new();
311 {
312 #[cfg(target_arch = "x86_64")]
313 {cs_pre.x86().mode(arch::x86::ArchMode::Mode64)}
314 #[cfg(target_arch = "x86")]
315 {cs_pre.x86().mode(arch::x86::ArchMode::Mode32)}
316 }
317 .detail(true)
318 .build()
319 .unwrap()
320}
321
322/// Waits for `wait_signal`. If `init_cont`, it will first do a `ptrace::cont`.
323/// We want to avoid that in some cases, like at the beginning of FFI.
324///
325/// If `pid` is `None`, only one wait will be done and `init_cont` should be false.
326fn wait_for_signal(
327 pid: Option<unistd::Pid>,
328 wait_signal: signal::Signal,
329 init_cont: InitialCont,
330) -> Result<unistd::Pid, ExecEnd> {
331 if matches!(init_cont, InitialCont::Yes) {
332 ptrace::cont(pid.unwrap(), None).unwrap();
333 }
334 // Repeatedly call `waitid` until we get the signal we want, or the process dies.
335 loop {
336 let wait_id = match pid {
337 Some(pid) => wait::Id::Pid(pid),
338 None => wait::Id::All,
339 };
340 let stat = wait::waitid(wait_id, WAIT_FLAGS).map_err(|_| ExecEnd(None))?;
341 let (signal, pid) = match stat {
342 // Report the cause of death, if we know it.
343 wait::WaitStatus::Exited(_, code) => {
344 return Err(ExecEnd(Some(code)));
345 }
346 wait::WaitStatus::Signaled(_, _, _) => return Err(ExecEnd(None)),
347 wait::WaitStatus::Stopped(pid, signal) => (signal, pid),
348 wait::WaitStatus::PtraceEvent(pid, signal, _) => (signal, pid),
349 // This covers PtraceSyscall and variants that are impossible with
350 // the flags set (e.g. WaitStatus::StillAlive).
351 _ => {
352 ptrace::cont(pid.unwrap(), None).unwrap();
353 continue;
354 }
355 };
356 if signal == wait_signal {
357 return Ok(pid);
358 } else {
359 ptrace::cont(pid, signal).map_err(|_| ExecEnd(None))?;
360 }
361 }
362}
363
364/// Add the memory events from `op` being executed while there is a memory access at `addr` to
365/// `acc_events`. Return whether this was a memory operand.
366fn capstone_find_events(
367 addr: usize,
368 op: &capstone::arch::ArchOperand,
369 acc_events: &mut Vec<AccessEvent>,
370) -> bool {
371 use capstone::prelude::*;
372 match op {
373 #[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
374 arch::ArchOperand::X86Operand(x86_operand) => {
375 match x86_operand.op_type {
376 // We only care about memory accesses
377 arch::x86::X86OperandType::Mem(_) => {
378 let push = AccessRange { addr, size: x86_operand.size.into() };
379 // It's called a "RegAccessType" but it also applies to memory
380 let acc_ty = x86_operand.access.unwrap();
381 // The same instruction might do both reads and writes, so potentially add both.
382 // We do not know the order in which they happened, but writing and then reading
383 // makes little sense so we put the read first. That is also the more
384 // conservative choice.
385 if acc_ty.is_readable() {
386 acc_events.push(AccessEvent::Read(push.clone()));
387 }
388 if acc_ty.is_writable() {
389 // FIXME: This could be made certain; either determine all cases where
390 // only reads happen, or have an intermediate mempr_* function to first
391 // map the page(s) as readonly and check if a segfault occurred.
392
393 // Per https://docs.rs/iced-x86/latest/iced_x86/enum.OpAccess.html,
394 // we know that the possible access types are Read, CondRead, Write,
395 // CondWrite, ReadWrite, and ReadCondWrite. Since we got a segfault
396 // we know some kind of access happened so Cond{Read, Write}s are
397 // certain reads and writes; the only uncertainty is with an RW op
398 // as it might be a ReadCondWrite with the write condition unmet.
399 acc_events.push(AccessEvent::Write(push, !acc_ty.is_readable()));
400 }
401
402 return true;
403 }
404 _ => (),
405 }
406 }
407 // FIXME: arm64
408 _ => unimplemented!(),
409 }
410
411 false
412}
413
414/// Extract the events from the given instruction.
415fn capstone_disassemble(
416 instr: &[u8],
417 addr: usize,
418 cs: &capstone::Capstone,
419 acc_events: &mut Vec<AccessEvent>,
420) -> capstone::CsResult<()> {
421 // The arch_detail is what we care about, but it relies on these temporaries
422 // that we can't drop. 0x1000 is the default base address for Captsone, and
423 // we're expecting 1 instruction.
424 let insns = cs.disasm_count(instr, 0x1000, 1)?;
425 let ins_detail = cs.insn_detail(&insns[0])?;
426 let arch_detail = ins_detail.arch_detail();
427
428 let mut found_mem_op = false;
429
430 for op in arch_detail.operands() {
431 if capstone_find_events(addr, &op, acc_events) {
432 if found_mem_op {
433 panic!("more than one memory operand found; we don't know which one accessed what");
434 }
435 found_mem_op = true;
436 }
437 }
438
439 Ok(())
440}
441
442/// Grabs the access that caused a segfault and logs it down if it's to our memory,
443/// or kills the child and returns the appropriate error otherwise.
444fn handle_segfault(
445 pid: unistd::Pid,
446 ch_pages: &[usize],
447 ch_stack: usize,
448 page_size: usize,
449 cs: &capstone::Capstone,
450 acc_events: &mut Vec<AccessEvent>,
451) -> Result<(), ExecEnd> {
452 // Get information on what caused the segfault. This contains the address
453 // that triggered it.
454 let siginfo = ptrace::getsiginfo(pid).unwrap();
455 // All x86 instructions only have at most one memory operand (thankfully!)
456 // SAFETY: si_addr is safe to call.
457 let addr = unsafe { siginfo.si_addr().addr() };
458 let page_addr = addr.strict_sub(addr.strict_rem(page_size));
459
460 if !ch_pages.iter().any(|pg| (*pg..pg.strict_add(page_size)).contains(&addr)) {
461 // This was a real segfault (not one of the Miri memory pages), so print some debug info and
462 // quit.
463 let regs = ptrace::getregs(pid).unwrap();
464 eprintln!("Segfault occurred during FFI at {addr:#018x}");
465 eprintln!("Expected access on pages: {ch_pages:#018x?}");
466 eprintln!("Register dump: {regs:#x?}");
467 ptrace::kill(pid).unwrap();
468 return Err(ExecEnd(None));
469 }
470
471 // Overall structure:
472 // - Get the address that caused the segfault
473 // - Unprotect the memory: we force the child to execute `mempr_off`, passing parameters via
474 // global atomic variables. This is what we use the temporary callback stack for.
475 // - Step 1 instruction
476 // - Parse executed code to estimate size & type of access
477 // - Reprotect the memory by executing `mempr_on` in the child, using the callback stack again.
478 // - Continue
479
480 // Ensure the stack is properly zeroed out!
481 for a in (ch_stack..ch_stack.strict_add(CALLBACK_STACK_SIZE)).step_by(ARCH_WORD_SIZE) {
482 ptrace::write(pid, std::ptr::with_exposed_provenance_mut(a), 0).unwrap();
483 }
484
485 // Guard against both architectures with upwards and downwards-growing stacks.
486 let stack_ptr = ch_stack.strict_add(CALLBACK_STACK_SIZE / 2);
487 let regs_bak = ptrace::getregs(pid).unwrap();
488 let mut new_regs = regs_bak;
489 let ip_prestep = regs_bak.ip();
490
491 // Move the instr ptr into the deprotection code.
492 #[expect(clippy::as_conversions)]
493 new_regs.set_ip(mempr_off as usize);
494 // Don't mess up the stack by accident!
495 new_regs.set_sp(stack_ptr);
496
497 // Modify the PAGE_ADDR global on the child process to point to the page
498 // that we want unprotected.
499 ptrace::write(
500 pid,
501 (&raw const PAGE_ADDR).cast_mut().cast(),
502 libc::c_long::try_from(page_addr.cast_signed()).unwrap(),
503 )
504 .unwrap();
505
506 // Check if we also own the next page, and if so unprotect it in case
507 // the access spans the page boundary.
508 let flag = if ch_pages.contains(&page_addr.strict_add(page_size)) { 2 } else { 1 };
509 ptrace::write(pid, (&raw const PAGE_COUNT).cast_mut().cast(), flag).unwrap();
510
511 ptrace::setregs(pid, new_regs).unwrap();
512
513 // Our mempr_* functions end with a raise(SIGSTOP).
514 wait_for_signal(Some(pid), signal::SIGSTOP, InitialCont::Yes)?;
515
516 // Step 1 instruction.
517 ptrace::setregs(pid, regs_bak).unwrap();
518 ptrace::step(pid, None).unwrap();
519 // Don't use wait_for_signal here since 1 instruction doesn't give room
520 // for any uncertainty + we don't want it `cont()`ing randomly by accident
521 // Also, don't let it continue with unprotected memory if something errors!
522 let _ = wait::waitid(wait::Id::Pid(pid), WAIT_FLAGS).map_err(|_| ExecEnd(None))?;
523
524 // Zero out again to be safe
525 for a in (ch_stack..ch_stack.strict_add(CALLBACK_STACK_SIZE)).step_by(ARCH_WORD_SIZE) {
526 ptrace::write(pid, std::ptr::with_exposed_provenance_mut(a), 0).unwrap();
527 }
528
529 // Save registers and grab the bytes that were executed. This would
530 // be really nasty if it was a jump or similar but those thankfully
531 // won't do memory accesses and so can't trigger this!
532 let regs_bak = ptrace::getregs(pid).unwrap();
533 new_regs = regs_bak;
534 let ip_poststep = regs_bak.ip();
535
536 // Ensure that we've actually gone forwards.
537 assert!(ip_poststep > ip_prestep);
538 // But not by too much. 64 bytes should be "big enough" on ~any architecture.
539 assert!(ip_prestep.strict_add(64) > ip_poststep);
540
541 // We need to do reads/writes in word-sized chunks.
542 let diff = (ip_poststep.strict_sub(ip_prestep)).div_ceil(ARCH_WORD_SIZE);
543 let instr = (ip_prestep..ip_prestep.strict_add(diff)).fold(vec![], |mut ret, ip| {
544 // This only needs to be a valid pointer in the child process, not ours.
545 ret.append(
546 &mut ptrace::read(pid, std::ptr::without_provenance_mut(ip))
547 .unwrap()
548 .to_ne_bytes()
549 .to_vec(),
550 );
551 ret
552 });
553
554 // Now figure out the size + type of access and log it down.
555 capstone_disassemble(&instr, addr, cs, acc_events).expect("Failed to disassemble instruction");
556
557 // Reprotect everything and continue.
558 #[expect(clippy::as_conversions)]
559 new_regs.set_ip(mempr_on as usize);
560 new_regs.set_sp(stack_ptr);
561 ptrace::setregs(pid, new_regs).unwrap();
562 wait_for_signal(Some(pid), signal::SIGSTOP, InitialCont::Yes)?;
563
564 ptrace::setregs(pid, regs_bak).unwrap();
565 ptrace::syscall(pid, None).unwrap();
566 Ok(())
567}
568
569// We only get dropped into these functions via offsetting the instr pointer
570// manually, so we *must not ever* unwind from them.
571
572/// Disables protections on the page whose address is currently in `PAGE_ADDR`.
573///
574/// SAFETY: `PAGE_ADDR` should be set to a page-aligned pointer to an owned page,
575/// `PAGE_SIZE` should be the host pagesize, and the range from `PAGE_ADDR` to
576/// `PAGE_SIZE` * `PAGE_COUNT` must be owned and allocated memory. No other threads
577/// should be running.
578pub unsafe extern "C" fn mempr_off() {
579 use std::sync::atomic::Ordering;
580
581 // Again, cannot allow unwinds to happen here.
582 let len = PAGE_SIZE.load(Ordering::Relaxed).saturating_mul(PAGE_COUNT.load(Ordering::Relaxed));
583 // SAFETY: Upheld by "caller".
584 unsafe {
585 // It's up to the caller to make sure this doesn't actually overflow, but
586 // we mustn't unwind from here, so...
587 if libc::mprotect(
588 PAGE_ADDR.load(Ordering::Relaxed).cast(),
589 len,
590 libc::PROT_READ | libc::PROT_WRITE,
591 ) != 0
592 {
593 // Can't return or unwind, but we can do this.
594 std::process::exit(-1);
595 }
596 }
597 // If this fails somehow we're doomed.
598 if signal::raise(signal::SIGSTOP).is_err() {
599 std::process::exit(-1);
600 }
601}
602
603/// Reenables protection on the page set by `PAGE_ADDR`.
604///
605/// SAFETY: See `mempr_off()`.
606pub unsafe extern "C" fn mempr_on() {
607 use std::sync::atomic::Ordering;
608
609 let len = PAGE_SIZE.load(Ordering::Relaxed).wrapping_mul(PAGE_COUNT.load(Ordering::Relaxed));
610 // SAFETY: Upheld by "caller".
611 unsafe {
612 if libc::mprotect(PAGE_ADDR.load(Ordering::Relaxed).cast(), len, libc::PROT_NONE) != 0 {
613 std::process::exit(-1);
614 }
615 }
616 if signal::raise(signal::SIGSTOP).is_err() {
617 std::process::exit(-1);
618 }
619}