1use std::fs::File;
2use std::io::{self, Write};
3use std::marker::PhantomData;
4use std::ops::Range;
5use std::path::{Path, PathBuf};
67// This code is very hot and uses lots of arithmetic, avoid overflow checks for performance.
8// See https://github.com/rust-lang/rust/pull/119440#issuecomment-1874255727
9use crate::int_overflow::DebugStrictAdd;
10use crate::leb128;
11use crate::serialize::{Decodable, Decoder, Encodable, Encoder};
1213pub mod mem_encoder;
1415// -----------------------------------------------------------------------------
16// Encoder
17// -----------------------------------------------------------------------------
1819pub type FileEncodeResult = Result<usize, (PathBuf, io::Error)>;
2021pub const MAGIC_END_BYTES: &[u8] = b"rust-end-file";
2223/// The size of the buffer in `FileEncoder`.
24const BUF_SIZE: usize = 64 * 1024;
2526/// `FileEncoder` encodes data to file via fixed-size buffer.
27///
28/// There used to be a `MemEncoder` type that encoded all the data into a
29/// `Vec`. `FileEncoder` is better because its memory use is determined by the
30/// size of the buffer, rather than the full length of the encoded data, and
31/// because it doesn't need to reallocate memory along the way.
32pub struct FileEncoder {
33// The input buffer. For adequate performance, we need to be able to write
34 // directly to the unwritten region of the buffer, without calling copy_from_slice.
35 // Note that our buffer is always initialized so that we can do that direct access
36 // without unsafe code. Users of this type write many more than BUF_SIZE bytes, so the
37 // initialization is approximately free.
38buf: Box<[u8; BUF_SIZE]>,
39 buffered: usize,
40 flushed: usize,
41 file: File,
42// This is used to implement delayed error handling, as described in the
43 // comment on `trait Encoder`.
44res: Result<(), io::Error>,
45 path: PathBuf,
46#[cfg(debug_assertions)]
47finished: bool,
48}
4950impl FileEncoder {
51pub fn new<P: AsRef<Path>>(path: P) -> io::Result<Self> {
52// File::create opens the file for writing only. When -Zmeta-stats is enabled, the metadata
53 // encoder rewinds the file to inspect what was written. So we need to always open the file
54 // for reading and writing.
55let file =
56File::options().read(true).write(true).create(true).truncate(true).open(&path)?;
5758Ok(FileEncoder {
59 buf: ::alloc::vec::from_elem(0u8, BUF_SIZE)vec![0u8; BUF_SIZE].into_boxed_slice().try_into().unwrap(),
60 path: path.as_ref().into(),
61 buffered: 0,
62 flushed: 0,
63file,
64 res: Ok(()),
65#[cfg(debug_assertions)]
66finished: false,
67 })
68 }
6970#[inline]
71pub fn position(&self) -> usize {
72// Tracking position this way instead of having a `self.position` field
73 // means that we only need to update `self.buffered` on a write call,
74 // as opposed to updating `self.position` and `self.buffered`.
75self.flushed.debug_strict_add(self.buffered)
76 }
7778#[cold]
79 #[inline(never)]
80pub fn flush(&mut self) {
81#[cfg(debug_assertions)]
82{
83self.finished = false;
84 }
85if self.res.is_ok() {
86self.res = self.file.write_all(&self.buf[..self.buffered]);
87 }
88self.flushed += self.buffered;
89self.buffered = 0;
90 }
9192#[inline]
93pub fn file(&self) -> &File {
94&self.file
95 }
9697#[inline]
98pub fn path(&self) -> &Path {
99&self.path
100 }
101102#[inline]
103fn buffer_empty(&mut self) -> &mut [u8] {
104// SAFETY: self.buffered is inbounds as an invariant of the type
105unsafe { self.buf.get_unchecked_mut(self.buffered..) }
106 }
107108#[cold]
109 #[inline(never)]
110fn write_all_cold_path(&mut self, buf: &[u8]) {
111self.flush();
112if let Some(dest) = self.buf.get_mut(..buf.len()) {
113dest.copy_from_slice(buf);
114self.buffered += buf.len();
115 } else {
116if self.res.is_ok() {
117self.res = self.file.write_all(buf);
118 }
119self.flushed += buf.len();
120 }
121 }
122123#[inline]
124fn write_all(&mut self, buf: &[u8]) {
125#[cfg(debug_assertions)]
126{
127self.finished = false;
128 }
129if let Some(dest) = self.buffer_empty().get_mut(..buf.len()) {
130dest.copy_from_slice(buf);
131self.buffered = self.buffered.debug_strict_add(buf.len());
132 } else {
133self.write_all_cold_path(buf);
134 }
135 }
136137/// Write up to `N` bytes to this encoder.
138 ///
139 /// This function can be used to avoid the overhead of calling memcpy for writes that
140 /// have runtime-variable length, but are small and have a small fixed upper bound.
141 ///
142 /// This can be used to do in-place encoding as is done for leb128 (without this function
143 /// we would need to write to a temporary buffer then memcpy into the encoder), and it can
144 /// also be used to implement the varint scheme we use for rmeta and dep graph encoding,
145 /// where we only want to encode the first few bytes of an integer. Copying in the whole
146 /// integer then only advancing the encoder state for the few bytes we care about is more
147 /// efficient than calling [`FileEncoder::write_all`], because variable-size copies are
148 /// always lowered to `memcpy`, which has overhead and contains a lot of logic we can bypass
149 /// with this function. Note that common architectures support fixed-size writes up to 8 bytes
150 /// with one instruction, so while this does in some sense do wasted work, we come out ahead.
151#[inline]
152pub fn write_with<const N: usize>(&mut self, visitor: impl FnOnce(&mut [u8; N]) -> usize) {
153#[cfg(debug_assertions)]
154{
155self.finished = false;
156 }
157let flush_threshold = const { BUF_SIZE.checked_sub(N).unwrap() };
158if std::intrinsics::unlikely(self.buffered > flush_threshold) {
159self.flush();
160 }
161// SAFETY: We checked above that N < self.buffer_empty().len(),
162 // and if isn't, flush ensures that our empty buffer is now BUF_SIZE.
163 // We produce a post-mono error if N > BUF_SIZE.
164let buf = unsafe { self.buffer_empty().first_chunk_mut::<N>().unwrap_unchecked() };
165let written = visitor(buf);
166// We have to ensure that an errant visitor cannot cause self.buffered to exceed BUF_SIZE.
167if written > N {
168Self::panic_invalid_write::<N>(written);
169 }
170self.buffered = self.buffered.debug_strict_add(written);
171 }
172173#[cold]
174 #[inline(never)]
175fn panic_invalid_write<const N: usize>(written: usize) {
176{
::core::panicking::panic_fmt(format_args!("FileEncoder::write_with::<{0}> cannot be used to write {1} bytes",
N, written));
};panic!("FileEncoder::write_with::<{N}> cannot be used to write {written} bytes");
177 }
178179/// Helper for calls where [`FileEncoder::write_with`] always writes the whole array.
180#[inline]
181pub fn write_array<const N: usize>(&mut self, buf: [u8; N]) {
182self.write_with(|dest| {
183*dest = buf;
184N185 })
186 }
187188pub fn finish(&mut self) -> FileEncodeResult {
189self.write_all(MAGIC_END_BYTES);
190self.flush();
191#[cfg(debug_assertions)]
192{
193self.finished = true;
194 }
195match std::mem::replace(&mut self.res, Ok(())) {
196Ok(()) => Ok(self.position()),
197Err(e) => Err((self.path.clone(), e)),
198 }
199 }
200}
201202#[cfg(debug_assertions)]
203impl Dropfor FileEncoder {
204fn drop(&mut self) {
205if !std::thread::panicking() {
206if !self.finished {
::core::panicking::panic("assertion failed: self.finished")
};assert!(self.finished);
207 }
208 }
209}
210211macro_rules!write_leb128 {
212 ($this_fn:ident, $int_ty:ty, $write_leb_fn:ident) => {
213#[inline]
214fn $this_fn(&mut self, v: $int_ty) {
215self.write_with(|buf| leb128::$write_leb_fn(buf, v))
216 }
217 };
218}
219220impl Encoderfor FileEncoder {
221self
v
self.write_with(|buf| leb128::write_usize_leb128(buf, v));write_leb128!(emit_usize, usize, write_usize_leb128);
222self
v
self.write_with(|buf| leb128::write_u128_leb128(buf, v));write_leb128!(emit_u128, u128, write_u128_leb128);
223self
v
self.write_with(|buf| leb128::write_u64_leb128(buf, v));write_leb128!(emit_u64, u64, write_u64_leb128);
224self
v
self.write_with(|buf| leb128::write_u32_leb128(buf, v));write_leb128!(emit_u32, u32, write_u32_leb128);
225226#[inline]
227fn emit_u16(&mut self, v: u16) {
228self.write_array(v.to_le_bytes());
229 }
230231#[inline]
232fn emit_u8(&mut self, v: u8) {
233self.write_array([v]);
234 }
235236self
v
self.write_with(|buf| leb128::write_isize_leb128(buf, v));write_leb128!(emit_isize, isize, write_isize_leb128);
237self
v
self.write_with(|buf| leb128::write_i128_leb128(buf, v));write_leb128!(emit_i128, i128, write_i128_leb128);
238self
v
self.write_with(|buf| leb128::write_i64_leb128(buf, v));write_leb128!(emit_i64, i64, write_i64_leb128);
239self
v
self.write_with(|buf| leb128::write_i32_leb128(buf, v));write_leb128!(emit_i32, i32, write_i32_leb128);
240241#[inline]
242fn emit_i16(&mut self, v: i16) {
243self.write_array(v.to_le_bytes());
244 }
245246#[inline]
247fn emit_raw_bytes(&mut self, s: &[u8]) {
248self.write_all(s);
249 }
250}
251252// -----------------------------------------------------------------------------
253// Decoder
254// -----------------------------------------------------------------------------
255256// Conceptually, `MemDecoder` wraps a `&[u8]` with a cursor into it that is always valid.
257// This is implemented with three pointers, two which represent the original slice and a
258// third that is our cursor.
259// It is an invariant of this type that start <= current <= end.
260// Additionally, the implementation of this type never modifies start and end.
261pub struct MemDecoder<'a> {
262 start: *const u8,
263 current: *const u8,
264 end: *const u8,
265 _marker: PhantomData<&'a u8>,
266}
267268impl<'a> MemDecoder<'a> {
269#[inline]
270pub fn new(data: &'a [u8], position: usize) -> Result<MemDecoder<'a>, ()> {
271let data = data.strip_suffix(MAGIC_END_BYTES).ok_or(())?;
272let Range { start, end } = data.as_ptr_range();
273Ok(MemDecoder { start, current: data[position..].as_ptr(), end, _marker: PhantomData })
274 }
275276#[inline]
277pub fn split_at(&self, position: usize) -> MemDecoder<'a> {
278if !(position <= self.len()) {
::core::panicking::panic("assertion failed: position <= self.len()")
};assert!(position <= self.len());
279// SAFETY: We checked above that this offset is within the original slice
280let current = unsafe { self.start.add(position) };
281MemDecoder { start: self.start, current, end: self.end, _marker: PhantomData }
282 }
283284#[inline]
285pub fn len(&self) -> usize {
286// SAFETY: This recovers the length of the original slice, only using members we never modify.
287unsafe { self.end.offset_from_unsigned(self.start) }
288 }
289290#[inline]
291pub fn remaining(&self) -> usize {
292// SAFETY: This type guarantees current <= end.
293unsafe { self.end.offset_from_unsigned(self.current) }
294 }
295296#[cold]
297 #[inline(never)]
298fn decoder_exhausted() -> ! {
299{ ::core::panicking::panic_fmt(format_args!("MemDecoder exhausted")); }panic!("MemDecoder exhausted")300 }
301302#[inline]
303pub fn read_array<const N: usize>(&mut self) -> [u8; N] {
304self.read_raw_bytes(N).try_into().unwrap()
305 }
306307/// While we could manually expose manipulation of the decoder position,
308 /// all current users of that method would need to reset the position later,
309 /// incurring the bounds check of set_position twice.
310#[inline]
311pub fn with_position<F, T>(&mut self, pos: usize, func: F) -> T
312where
313F: Fn(&mut MemDecoder<'a>) -> T,
314 {
315struct SetOnDrop<'a, 'guarded> {
316 decoder: &'guarded mut MemDecoder<'a>,
317 current: *const u8,
318 }
319impl Dropfor SetOnDrop<'_, '_> {
320fn drop(&mut self) {
321self.decoder.current = self.current;
322 }
323 }
324325if pos >= self.len() {
326Self::decoder_exhausted();
327 }
328let previous = self.current;
329// SAFETY: We just checked if this add is in-bounds above.
330unsafe {
331self.current = self.start.add(pos);
332 }
333let guard = SetOnDrop { current: previous, decoder: self };
334func(guard.decoder)
335 }
336}
337338macro_rules!read_leb128 {
339 ($this_fn:ident, $int_ty:ty, $read_leb_fn:ident) => {
340#[inline]
341fn $this_fn(&mut self) -> $int_ty {
342 leb128::$read_leb_fn(self)
343 }
344 };
345}
346347impl<'a> Decoderfor MemDecoder<'a> {
348self
leb128::read_usize_leb128(self);read_leb128!(read_usize, usize, read_usize_leb128);
349self
leb128::read_u128_leb128(self);read_leb128!(read_u128, u128, read_u128_leb128);
350self
leb128::read_u64_leb128(self);read_leb128!(read_u64, u64, read_u64_leb128);
351self
leb128::read_u32_leb128(self);read_leb128!(read_u32, u32, read_u32_leb128);
352353#[inline]
354fn read_u16(&mut self) -> u16 {
355u16::from_le_bytes(self.read_array())
356 }
357358#[inline]
359fn read_u8(&mut self) -> u8 {
360if self.current == self.end {
361Self::decoder_exhausted();
362 }
363// SAFETY: This type guarantees current <= end, and we just checked current == end.
364unsafe {
365let byte = *self.current;
366self.current = self.current.add(1);
367byte368 }
369 }
370371self
leb128::read_isize_leb128(self);read_leb128!(read_isize, isize, read_isize_leb128);
372self
leb128::read_i128_leb128(self);read_leb128!(read_i128, i128, read_i128_leb128);
373self
leb128::read_i64_leb128(self);read_leb128!(read_i64, i64, read_i64_leb128);
374self
leb128::read_i32_leb128(self);read_leb128!(read_i32, i32, read_i32_leb128);
375376#[inline]
377fn read_i16(&mut self) -> i16 {
378i16::from_le_bytes(self.read_array())
379 }
380381#[inline]
382fn read_raw_bytes(&mut self, bytes: usize) -> &'a [u8] {
383if bytes > self.remaining() {
384Self::decoder_exhausted();
385 }
386// SAFETY: We just checked if this range is in-bounds above.
387unsafe {
388let slice = std::slice::from_raw_parts(self.current, bytes);
389self.current = self.current.add(bytes);
390slice391 }
392 }
393394#[inline]
395fn peek_byte(&self) -> u8 {
396if self.current == self.end {
397Self::decoder_exhausted();
398 }
399// SAFETY: This type guarantees current is inbounds or one-past-the-end, which is end.
400 // Since we just checked current == end, the current pointer must be inbounds.
401unsafe { *self.current }
402 }
403404#[inline]
405fn position(&self) -> usize {
406// SAFETY: This type guarantees start <= current
407unsafe { self.current.offset_from_unsigned(self.start) }
408 }
409}
410411// Specializations for contiguous byte sequences follow. The default implementations for slices
412// encode and decode each element individually. This isn't necessary for `u8` slices when using
413// opaque encoders and decoders, because each `u8` is unchanged by encoding and decoding.
414// Therefore, we can use more efficient implementations that process the entire sequence at once.
415416// Specialize encoding byte slices. This specialization also applies to encoding `Vec<u8>`s, etc.,
417// since the default implementations call `encode` on their slices internally.
418impl Encodable<FileEncoder> for [u8] {
419fn encode(&self, e: &mut FileEncoder) {
420 Encoder::emit_usize(e, self.len());
421e.emit_raw_bytes(self);
422 }
423}
424425// Specialize decoding `Vec<u8>`. This specialization also applies to decoding `Box<[u8]>`s, etc.,
426// since the default implementations call `decode` to produce a `Vec<u8>` internally.
427impl<'a> Decodable<MemDecoder<'a>> for Vec<u8> {
428fn decode(d: &mut MemDecoder<'a>) -> Self {
429let len = Decoder::read_usize(d);
430d.read_raw_bytes(len).to_owned()
431 }
432}
433434/// An integer that will always encode to 8 bytes.
435pub struct IntEncodedWithFixedSize(pub u64);
436437impl IntEncodedWithFixedSize {
438pub const ENCODED_SIZE: usize = 8;
439}
440441impl Encodable<FileEncoder> for IntEncodedWithFixedSize {
442#[inline]
443fn encode(&self, e: &mut FileEncoder) {
444let start_pos = e.position();
445e.write_array(self.0.to_le_bytes());
446let end_pos = e.position();
447if true {
match (&(end_pos - start_pos), &IntEncodedWithFixedSize::ENCODED_SIZE) {
(left_val, right_val) => {
if !(*left_val == *right_val) {
let kind = ::core::panicking::AssertKind::Eq;
::core::panicking::assert_failed(kind, &*left_val,
&*right_val, ::core::option::Option::None);
}
}
};
};debug_assert_eq!((end_pos - start_pos), IntEncodedWithFixedSize::ENCODED_SIZE);
448 }
449}
450451impl<'a> Decodable<MemDecoder<'a>> for IntEncodedWithFixedSize {
452#[inline]
453fn decode(decoder: &mut MemDecoder<'a>) -> IntEncodedWithFixedSize {
454let bytes = decoder.read_array::<{ IntEncodedWithFixedSize::ENCODED_SIZE }>();
455IntEncodedWithFixedSize(u64::from_le_bytes(bytes))
456 }
457}
458459#[cfg(test)]
460mod tests;