Auto merge of #46485 - khuey:cursor-read_exact, r=Manishearth

Add a specialization of read_exact for Cursor.

The read_exact implementation for &[u8] is optimized and usually allows LLVM to reduce a read_exact call for small numbers of bytes to a bounds check and a register load instead of a generic memcpy.  On a workload I have that decompresses, deserializes (via bincode), and processes some data, this leads to a 40% speedup by essentially eliminating the deserialization overhead entirely.
This commit is contained in:
bors 2017-12-04 10:02:48 +00:00
commit c16f480fa6

View File

@ -230,6 +230,13 @@ impl<T> Read for Cursor<T> where T: AsRef<[u8]> {
Ok(n)
}
fn read_exact(&mut self, buf: &mut [u8]) -> io::Result<()> {
let n = buf.len();
Read::read_exact(&mut self.fill_buf()?, buf)?;
self.pos += n as u64;
Ok(())
}
#[inline]
unsafe fn initializer(&self) -> Initializer {
Initializer::nop()
@ -475,6 +482,24 @@ mod tests {
assert_eq!(reader.read(&mut buf).unwrap(), 0);
}
#[test]
fn test_read_exact() {
let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7];
let reader = &mut &in_buf[..];
let mut buf = [];
assert!(reader.read_exact(&mut buf).is_ok());
let mut buf = [8];
assert!(reader.read_exact(&mut buf).is_ok());
assert_eq!(buf[0], 0);
assert_eq!(reader.len(), 7);
let mut buf = [0, 0, 0, 0, 0, 0, 0];
assert!(reader.read_exact(&mut buf).is_ok());
assert_eq!(buf, [1, 2, 3, 4, 5, 6, 7]);
assert_eq!(reader.len(), 0);
let mut buf = [0];
assert!(reader.read_exact(&mut buf).is_err());
}
#[test]
fn test_buf_reader() {
let in_buf = vec![0, 1, 2, 3, 4, 5, 6, 7];