std/io/buffered/bufreader/buffer.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156
//! An encapsulation of `BufReader`'s buffer management logic.
//!
//! This module factors out the basic functionality of `BufReader` in order to protect two core
//! invariants:
//! * `filled` bytes of `buf` are always initialized
//! * `pos` is always <= `filled`
//! Since this module encapsulates the buffer management logic, we can ensure that the range
//! `pos..filled` is always a valid index into the initialized region of the buffer. This means
//! that user code which wants to do reads from a `BufReader` via `buffer` + `consume` can do so
//! without encountering any runtime bounds checks.
use crate::cmp;
use crate::io::{self, BorrowedBuf, ErrorKind, Read};
use crate::mem::MaybeUninit;
pub struct Buffer {
// The buffer.
buf: Box<[MaybeUninit<u8>]>,
// The current seek offset into `buf`, must always be <= `filled`.
pos: usize,
// Each call to `fill_buf` sets `filled` to indicate how many bytes at the start of `buf` are
// initialized with bytes from a read.
filled: usize,
// This is the max number of bytes returned across all `fill_buf` calls. We track this so that we
// can accurately tell `read_buf` how many bytes of buf are initialized, to bypass as much of its
// defensive initialization as possible. Note that while this often the same as `filled`, it
// doesn't need to be. Calls to `fill_buf` are not required to actually fill the buffer, and
// omitting this is a huge perf regression for `Read` impls that do not.
initialized: usize,
}
impl Buffer {
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
let buf = Box::new_uninit_slice(capacity);
Self { buf, pos: 0, filled: 0, initialized: 0 }
}
#[inline]
pub fn try_with_capacity(capacity: usize) -> io::Result<Self> {
match Box::try_new_uninit_slice(capacity) {
Ok(buf) => Ok(Self { buf, pos: 0, filled: 0, initialized: 0 }),
Err(_) => {
Err(io::const_io_error!(ErrorKind::OutOfMemory, "failed to allocate read buffer"))
}
}
}
#[inline]
pub fn buffer(&self) -> &[u8] {
// SAFETY: self.pos and self.cap are valid, and self.cap => self.pos, and
// that region is initialized because those are all invariants of this type.
unsafe { MaybeUninit::slice_assume_init_ref(self.buf.get_unchecked(self.pos..self.filled)) }
}
#[inline]
pub fn capacity(&self) -> usize {
self.buf.len()
}
#[inline]
pub fn filled(&self) -> usize {
self.filled
}
#[inline]
pub fn pos(&self) -> usize {
self.pos
}
// This is only used by a test which asserts that the initialization-tracking is correct.
#[cfg(test)]
pub fn initialized(&self) -> usize {
self.initialized
}
#[inline]
pub fn discard_buffer(&mut self) {
self.pos = 0;
self.filled = 0;
}
#[inline]
pub fn consume(&mut self, amt: usize) {
self.pos = cmp::min(self.pos + amt, self.filled);
}
/// If there are `amt` bytes available in the buffer, pass a slice containing those bytes to
/// `visitor` and return true. If there are not enough bytes available, return false.
#[inline]
pub fn consume_with<V>(&mut self, amt: usize, mut visitor: V) -> bool
where
V: FnMut(&[u8]),
{
if let Some(claimed) = self.buffer().get(..amt) {
visitor(claimed);
// If the indexing into self.buffer() succeeds, amt must be a valid increment.
self.pos += amt;
true
} else {
false
}
}
#[inline]
pub fn unconsume(&mut self, amt: usize) {
self.pos = self.pos.saturating_sub(amt);
}
/// Read more bytes into the buffer without discarding any of its contents
pub fn read_more(&mut self, mut reader: impl Read) -> io::Result<usize> {
let mut buf = BorrowedBuf::from(&mut self.buf[self.pos..]);
let old_init = self.initialized - self.pos;
unsafe {
buf.set_init(old_init);
}
reader.read_buf(buf.unfilled())?;
self.filled += buf.len();
self.initialized += buf.init_len() - old_init;
Ok(buf.len())
}
/// Remove bytes that have already been read from the buffer.
pub fn backshift(&mut self) {
self.buf.copy_within(self.pos.., 0);
self.initialized -= self.pos;
self.filled -= self.pos;
self.pos = 0;
}
#[inline]
pub fn fill_buf(&mut self, mut reader: impl Read) -> io::Result<&[u8]> {
// If we've reached the end of our internal buffer then we need to fetch
// some more data from the reader.
// Branch using `>=` instead of the more correct `==`
// to tell the compiler that the pos..cap slice is always valid.
if self.pos >= self.filled {
debug_assert!(self.pos == self.filled);
let mut buf = BorrowedBuf::from(&mut *self.buf);
// SAFETY: `self.filled` bytes will always have been initialized.
unsafe {
buf.set_init(self.initialized);
}
let result = reader.read_buf(buf.unfilled());
self.pos = 0;
self.filled = buf.len();
self.initialized = buf.init_len();
result?;
}
Ok(self.buffer())
}
}