use crate::{
memory_units::{Bytes, Pages, RoundUpTo},
value::LittleEndianConvert,
Error,
};
use alloc::{rc::Rc, string::ToString, vec::Vec};
use core::{
cell::{Cell, Ref, RefCell, RefMut},
cmp,
fmt,
ops::Range,
u32,
};
use parity_wasm::elements::ResizableLimits;
#[cfg(all(feature = "virtual_memory", target_pointer_width = "64"))]
#[path = "mmap_bytebuf.rs"]
mod bytebuf;
#[cfg(not(all(feature = "virtual_memory", target_pointer_width = "64")))]
#[path = "vec_bytebuf.rs"]
mod bytebuf;
use self::bytebuf::ByteBuf;
pub const LINEAR_MEMORY_PAGE_SIZE: Bytes = Bytes(65536);
#[derive(Clone, Debug)]
pub struct MemoryRef(Rc<MemoryInstance>);
impl ::core::ops::Deref for MemoryRef {
type Target = MemoryInstance;
fn deref(&self) -> &MemoryInstance {
&self.0
}
}
pub struct MemoryInstance {
limits: ResizableLimits,
buffer: RefCell<ByteBuf>,
initial: Pages,
current_size: Cell<usize>,
maximum: Option<Pages>,
}
impl fmt::Debug for MemoryInstance {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("MemoryInstance")
.field("limits", &self.limits)
.field("buffer.len", &self.buffer.borrow().len())
.field("maximum", &self.maximum)
.field("initial", &self.initial)
.finish()
}
}
struct CheckedRegion {
offset: usize,
size: usize,
}
impl CheckedRegion {
fn range(&self) -> Range<usize> {
self.offset..self.offset + self.size
}
fn intersects(&self, other: &Self) -> bool {
let low = cmp::max(self.offset, other.offset);
let high = cmp::min(self.offset + self.size, other.offset + other.size);
low < high
}
}
impl MemoryInstance {
pub fn alloc(initial: Pages, maximum: Option<Pages>) -> Result<MemoryRef, Error> {
{
let initial_u32: u32 = initial.0.try_into().map_err(|_| {
Error::Memory(format!("initial ({}) can't be coerced to u32", initial.0))
})?;
let maximum_u32: Option<u32> = maximum
.map(|maximum_pages| {
maximum_pages.0.try_into().map_err(|_| {
Error::Memory(format!(
"maximum ({}) can't be coerced to u32",
maximum_pages.0
))
})
})
.transpose()?;
validation::validate_memory(initial_u32, maximum_u32).map_err(Error::Memory)?;
}
let memory = MemoryInstance::new(initial, maximum)?;
Ok(MemoryRef(Rc::new(memory)))
}
fn new(initial: Pages, maximum: Option<Pages>) -> Result<Self, Error> {
let limits = ResizableLimits::new(initial.0 as u32, maximum.map(|p| p.0 as u32));
let initial_size: Bytes = initial.into();
Ok(MemoryInstance {
limits,
buffer: RefCell::new(ByteBuf::new(initial_size.0).map_err(Error::Memory)?),
initial,
current_size: Cell::new(initial_size.0),
maximum,
})
}
pub(crate) fn limits(&self) -> &ResizableLimits {
&self.limits
}
pub fn initial(&self) -> Pages {
self.initial
}
pub fn maximum(&self) -> Option<Pages> {
self.maximum
}
pub fn current_size(&self) -> Pages {
Bytes(self.buffer.borrow().len()).round_up_to()
}
pub fn get_value<T: LittleEndianConvert>(&self, offset: u32) -> Result<T, Error> {
let mut bytes = <<T as LittleEndianConvert>::Bytes as Default>::default();
self.get_into(offset, bytes.as_mut())?;
let value = T::from_le_bytes(bytes);
Ok(value)
}
#[deprecated(since = "0.10.0", note = "use get_into or get_value method instead")]
pub fn get(&self, offset: u32, size: usize) -> Result<Vec<u8>, Error> {
let mut buffer = self.buffer.borrow_mut();
let region = self.checked_region(&mut buffer, offset as usize, size)?;
Ok(buffer.as_slice_mut()[region.range()].to_vec())
}
pub fn get_into(&self, offset: u32, target: &mut [u8]) -> Result<(), Error> {
let mut buffer = self.buffer.borrow_mut();
let region = self.checked_region(&mut buffer, offset as usize, target.len())?;
target.copy_from_slice(&buffer.as_slice_mut()[region.range()]);
Ok(())
}
pub fn set(&self, offset: u32, value: &[u8]) -> Result<(), Error> {
let mut buffer = self.buffer.borrow_mut();
let range = self
.checked_region(&mut buffer, offset as usize, value.len())?
.range();
buffer.as_slice_mut()[range].copy_from_slice(value);
Ok(())
}
pub fn set_value<T: LittleEndianConvert>(&self, offset: u32, value: T) -> Result<(), Error> {
let bytes = T::into_le_bytes(value);
self.set(offset, bytes.as_ref())?;
Ok(())
}
pub fn grow(&self, additional: Pages) -> Result<Pages, Error> {
let size_before_grow: Pages = self.current_size();
if additional == Pages(0) {
return Ok(size_before_grow);
}
if additional > Pages(65536) {
return Err(Error::Memory(
"Trying to grow memory by more than 65536 pages".to_string(),
));
}
let new_size: Pages = size_before_grow + additional;
let maximum = self
.maximum
.unwrap_or(Pages(validation::LINEAR_MEMORY_MAX_PAGES as usize));
if new_size > maximum {
return Err(Error::Memory(format!(
"Trying to grow memory by {} pages when already have {}",
additional.0, size_before_grow.0,
)));
}
let new_buffer_length: Bytes = new_size.into();
self.buffer
.borrow_mut()
.realloc(new_buffer_length.0)
.map_err(Error::Memory)?;
self.current_size.set(new_buffer_length.0);
Ok(size_before_grow)
}
fn checked_region(
&self,
buffer: &mut ByteBuf,
offset: usize,
size: usize,
) -> Result<CheckedRegion, Error> {
let end = offset.checked_add(size).ok_or_else(|| {
Error::Memory(format!(
"trying to access memory block of size {} from offset {}",
size, offset
))
})?;
if end > buffer.len() {
return Err(Error::Memory(format!(
"trying to access region [{}..{}] in memory [0..{}]",
offset,
end,
buffer.len()
)));
}
Ok(CheckedRegion { offset, size })
}
fn checked_region_pair(
&self,
buffer: &mut ByteBuf,
offset1: usize,
size1: usize,
offset2: usize,
size2: usize,
) -> Result<(CheckedRegion, CheckedRegion), Error> {
let end1 = offset1.checked_add(size1).ok_or_else(|| {
Error::Memory(format!(
"trying to access memory block of size {} from offset {}",
size1, offset1
))
})?;
let end2 = offset2.checked_add(size2).ok_or_else(|| {
Error::Memory(format!(
"trying to access memory block of size {} from offset {}",
size2, offset2
))
})?;
if end1 > buffer.len() {
return Err(Error::Memory(format!(
"trying to access region [{}..{}] in memory [0..{}]",
offset1,
end1,
buffer.len()
)));
}
if end2 > buffer.len() {
return Err(Error::Memory(format!(
"trying to access region [{}..{}] in memory [0..{}]",
offset2,
end2,
buffer.len()
)));
}
Ok((
CheckedRegion {
offset: offset1,
size: size1,
},
CheckedRegion {
offset: offset2,
size: size2,
},
))
}
pub fn copy(&self, src_offset: usize, dst_offset: usize, len: usize) -> Result<(), Error> {
let mut buffer = self.buffer.borrow_mut();
let (read_region, write_region) =
self.checked_region_pair(&mut buffer, src_offset, len, dst_offset, len)?;
unsafe {
::core::ptr::copy(
buffer.as_slice()[read_region.range()].as_ptr(),
buffer.as_slice_mut()[write_region.range()].as_mut_ptr(),
len,
)
}
Ok(())
}
pub fn copy_nonoverlapping(
&self,
src_offset: usize,
dst_offset: usize,
len: usize,
) -> Result<(), Error> {
let mut buffer = self.buffer.borrow_mut();
let (read_region, write_region) =
self.checked_region_pair(&mut buffer, src_offset, len, dst_offset, len)?;
if read_region.intersects(&write_region) {
return Err(Error::Memory(
"non-overlapping copy is used for overlapping regions".to_string(),
));
}
unsafe {
::core::ptr::copy_nonoverlapping(
buffer.as_slice()[read_region.range()].as_ptr(),
buffer.as_slice_mut()[write_region.range()].as_mut_ptr(),
len,
)
}
Ok(())
}
pub fn transfer(
src: &MemoryRef,
src_offset: usize,
dst: &MemoryRef,
dst_offset: usize,
len: usize,
) -> Result<(), Error> {
if Rc::ptr_eq(&src.0, &dst.0) {
return src.copy(src_offset, dst_offset, len);
}
let mut src_buffer = src.buffer.borrow_mut();
let mut dst_buffer = dst.buffer.borrow_mut();
let src_range = src
.checked_region(&mut src_buffer, src_offset, len)?
.range();
let dst_range = dst
.checked_region(&mut dst_buffer, dst_offset, len)?
.range();
dst_buffer.as_slice_mut()[dst_range].copy_from_slice(&src_buffer.as_slice()[src_range]);
Ok(())
}
pub fn clear(&self, offset: usize, new_val: u8, len: usize) -> Result<(), Error> {
let mut buffer = self.buffer.borrow_mut();
let range = self.checked_region(&mut buffer, offset, len)?.range();
for val in &mut buffer.as_slice_mut()[range] {
*val = new_val
}
Ok(())
}
pub fn zero(&self, offset: usize, len: usize) -> Result<(), Error> {
self.clear(offset, 0, len)
}
pub fn erase(&self) -> Result<(), Error> {
self.buffer.borrow_mut().erase().map_err(Error::Memory)
}
pub fn with_direct_access<R, F: FnOnce(&[u8]) -> R>(&self, f: F) -> R {
let buf = self.buffer.borrow();
f(buf.as_slice())
}
pub fn with_direct_access_mut<R, F: FnOnce(&mut [u8]) -> R>(&self, f: F) -> R {
let mut buf = self.buffer.borrow_mut();
f(buf.as_slice_mut())
}
pub fn direct_access(&self) -> impl AsRef<[u8]> + '_ {
struct Buffer<'a>(Ref<'a, ByteBuf>);
impl<'a> AsRef<[u8]> for Buffer<'a> {
fn as_ref(&self) -> &[u8] {
self.0.as_slice()
}
}
Buffer(self.buffer.borrow())
}
pub fn direct_access_mut(&self) -> impl AsMut<[u8]> + '_ {
struct Buffer<'a>(RefMut<'a, ByteBuf>);
impl<'a> AsMut<[u8]> for Buffer<'a> {
fn as_mut(&mut self) -> &mut [u8] {
self.0.as_slice_mut()
}
}
Buffer(self.buffer.borrow_mut())
}
}
#[cfg(test)]
mod tests {
use super::{MemoryInstance, MemoryRef, LINEAR_MEMORY_PAGE_SIZE};
use crate::{memory_units::Pages, Error};
use alloc::rc::Rc;
#[test]
fn alloc() {
let mut fixtures = vec![
(0, None, true),
(0, Some(0), true),
(1, None, true),
(1, Some(1), true),
(0, Some(1), true),
(1, Some(0), false),
];
#[cfg(target_pointer_width = "64")]
fixtures.extend(&[
(65536, Some(65536), true),
(65536, Some(0), false),
(65536, None, true),
]);
for (index, &(initial, maybe_max, expected_ok)) in fixtures.iter().enumerate() {
let initial: Pages = Pages(initial);
let maximum: Option<Pages> = maybe_max.map(Pages);
let result = MemoryInstance::alloc(initial, maximum);
if result.is_ok() != expected_ok {
panic!(
"unexpected error at {}, initial={:?}, max={:?}, expected={}, result={:?}",
index, initial, maybe_max, expected_ok, result,
);
}
}
}
#[test]
fn ensure_page_size() {
use crate::memory_units::ByteSize;
assert_eq!(LINEAR_MEMORY_PAGE_SIZE, Pages::BYTE_SIZE);
}
fn create_memory(initial_content: &[u8]) -> MemoryInstance {
let mem = MemoryInstance::new(Pages(1), Some(Pages(1))).unwrap();
mem.set(0, initial_content)
.expect("Successful initialize the memory");
mem
}
fn get_into_vec(mem: &MemoryInstance, offset: u32, size: usize) -> Vec<u8> {
let mut buffer = vec![0x00; size];
mem.get_into(offset, &mut buffer[..])
.unwrap_or_else(|error| {
panic!(
"failed to retrieve data from linear memory at offset {} with size {}: {}",
offset, size, error
)
});
buffer
}
#[test]
fn copy_overlaps_1() {
let mem = create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
mem.copy(0, 4, 6).expect("Successfully copy the elements");
let result = get_into_vec(&mem, 0, 10);
assert_eq!(result, &[0, 1, 2, 3, 0, 1, 2, 3, 4, 5]);
}
#[test]
fn copy_overlaps_2() {
let mem = create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
mem.copy(4, 0, 6).expect("Successfully copy the elements");
let result = get_into_vec(&mem, 0, 10);
assert_eq!(result, &[4, 5, 6, 7, 8, 9, 6, 7, 8, 9]);
}
#[test]
fn copy_nonoverlapping() {
let mem = create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
mem.copy_nonoverlapping(0, 10, 10)
.expect("Successfully copy the elements");
let result = get_into_vec(&mem, 10, 10);
assert_eq!(result, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
}
#[test]
fn copy_nonoverlapping_overlaps_1() {
let mem = create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
let result = mem.copy_nonoverlapping(0, 4, 6);
match result {
Err(Error::Memory(_)) => {}
_ => panic!("Expected Error::Memory(_) result, but got {:?}", result),
}
}
#[test]
fn copy_nonoverlapping_overlaps_2() {
let mem = create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
let result = mem.copy_nonoverlapping(4, 0, 6);
match result {
Err(Error::Memory(_)) => {}
_ => panic!("Expected Error::Memory(_), but got {:?}", result),
}
}
#[test]
fn transfer_works() {
let src = MemoryRef(Rc::new(create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])));
let dst = MemoryRef(Rc::new(create_memory(&[
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
])));
MemoryInstance::transfer(&src, 4, &dst, 0, 3).unwrap();
assert_eq!(get_into_vec(&src, 0, 10), &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
assert_eq!(
get_into_vec(&dst, 0, 10),
&[4, 5, 6, 13, 14, 15, 16, 17, 18, 19]
);
}
#[test]
fn transfer_still_works_with_same_memory() {
let src = MemoryRef(Rc::new(create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])));
MemoryInstance::transfer(&src, 4, &src, 0, 3).unwrap();
assert_eq!(get_into_vec(&src, 0, 10), &[4, 5, 6, 3, 4, 5, 6, 7, 8, 9]);
}
#[test]
fn transfer_oob_with_same_memory_errors() {
let src = MemoryRef(Rc::new(create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])));
assert!(MemoryInstance::transfer(&src, 65535, &src, 0, 3).is_err());
assert_eq!(get_into_vec(&src, 0, 10), &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
}
#[test]
fn transfer_oob_errors() {
let src = MemoryRef(Rc::new(create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])));
let dst = MemoryRef(Rc::new(create_memory(&[
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
])));
assert!(MemoryInstance::transfer(&src, 65535, &dst, 0, 3).is_err());
assert_eq!(get_into_vec(&src, 0, 10), &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
assert_eq!(
get_into_vec(&dst, 0, 10),
&[10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
);
}
#[test]
fn clear() {
let mem = create_memory(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
mem.clear(0, 0x4A, 10)
.expect("To successfully clear the memory");
let result = get_into_vec(&mem, 0, 10);
assert_eq!(result, &[0x4A; 10]);
}
#[test]
fn get_into() {
let mem = MemoryInstance::new(Pages(1), None).unwrap();
mem.set(6, &[13, 17, 129])
.expect("memory set should not fail");
let mut data = [0u8; 2];
mem.get_into(7, &mut data[..])
.expect("get_into should not fail");
assert_eq!(data, [17, 129]);
}
#[test]
fn zero_copy() {
let mem = MemoryInstance::alloc(Pages(1), None).unwrap();
mem.set(100, &[0]).expect("memory set should not fail");
mem.with_direct_access_mut(|buf| {
assert_eq!(
buf.len(),
65536,
"the buffer length is expected to be 1 page long"
);
buf[..10].copy_from_slice(&[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
});
mem.with_direct_access(|buf| {
assert_eq!(
buf.len(),
65536,
"the buffer length is expected to be 1 page long"
);
assert_eq!(&buf[..10], &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
});
}
#[should_panic]
#[test]
fn zero_copy_panics_on_nested_access() {
let mem = MemoryInstance::alloc(Pages(1), None).unwrap();
let mem_inner = mem.clone();
mem.with_direct_access(move |_| {
let _ = mem_inner.set(0, &[11, 12, 13]);
});
}
}